text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
import ast
from jaspyx.visitor import BaseVisitor
class AugAssign(BaseVisitor):
def visit_AugAssign(self, node):
attr = getattr(self, 'AugAssign_%s' % node.op.__class__.__name__, None)
if attr is None:
# Rewrite the expression as an assignment using a BinOp
self.visit(ast.Assign(
[node.target],
ast.BinOp(
ast.Name(node.target.id, ast.Load()),
node.op,
node.value
)
))
else:
attr(node.target, node.value)
for key, value in {
'Add': ' += ',
'Sub': ' -= ',
'Mult': ' *= ',
'Div': ' /= ',
'Mod': ' %= ',
'BitAnd': ' &= ',
'BitOr': ' |= ',
'BitXor': ' ^= ',
}.items():
def gen_op(op):
def f_op(self, target, value):
self.indent()
self.group(
[target, value],
prefix='',
infix=op,
suffix='',
)
self.finish()
return f_op
exec 'AugAssign_%s = gen_op("%s")' % (key, value)
| iksteen/jaspyx | jaspyx/visitor/augassign.py | Python | mit | 1,209 | [
"VisIt"
] | d4888a47755b2e87a3b068c8dab897b8be7af40612fee78b01661ab2e70de54d |
#!/usr/bin/python
#Audio Tools, a module and set of tools for manipulating audio data
#Copyright (C) 2007-2012 Brian Langenberger
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
import audiotools
import subprocess
import cStringIO
import unicodedata
import tempfile
import os
import os.path
import shutil
import time
import test_streams
from hashlib import md5
from test import (parser, BLANK_PCM_Reader, Combinations, Possibilities,
EXACT_BLANK_PCM_Reader, RANDOM_PCM_Reader,
TEST_COVER1, TEST_COVER2, TEST_COVER3, TEST_COVER4,
HUGE_BMP)
class InvalidTemporaryFile:
def __init__(self, bad_path):
self.name = bad_path
def do_nothing(self):
pass
#add a bunch of decorator metafunctions like LIB_CORE
#which can be wrapped around individual tests as needed
for section in parser.sections():
for option in parser.options(section):
if (parser.getboolean(section, option)):
vars()["%s_%s" % (section.upper(),
option.upper())] = lambda function: function
else:
vars()["%s_%s" % (section.upper(),
option.upper())] = lambda function: do_nothing
class UtilTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.line_checks = []
#takes a list of argument strings
#returns a returnval integer
#self.stdout and self.stderr are set to file-like cStringIO objects
def __run_app__(self, arguments):
sub = subprocess.Popen(arguments,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.stdout = cStringIO.StringIO(sub.stdout.read())
self.stderr = cStringIO.StringIO(sub.stderr.read())
sub.stdout.close()
sub.stderr.close()
returnval = sub.wait()
return returnval
def __add_check__(self, stream, unicode_string):
self.line_checks.append((stream, unicode_string))
def __run_checks__(self):
for (stream, expected_output) in self.line_checks:
stream_line = unicodedata.normalize(
'NFC',
getattr(self,
stream).readline().decode(audiotools.IO_ENCODING))
expected_line = unicodedata.normalize(
'NFC',
expected_output) + unicode(os.linesep)
self.assertEqual(stream_line, expected_line,
"%s != %s" % (
repr(stream_line),
repr(expected_line)))
self.line_checks = []
def __clear_checks__(self):
self.line_checks = []
def filename(self, s):
return s.decode(audiotools.FS_ENCODING, 'replace')
def __queue_output__(self, s):
self.__add_check__("stdout", s)
def __check_output__(self, s):
self.__queue_output__(s)
self.__run_checks__()
def __queue_info__(self, s):
self.__add_check__("stderr", s)
def __check_info__(self, s):
self.__queue_info__(s)
self.__run_checks__()
def __queue_error__(self, s):
self.__add_check__("stderr", u"*** Error: " + s)
def __check_error__(self, s):
self.__queue_error__(s)
self.__run_checks__()
def __queue_warning__(self, s):
self.__add_check__("stderr", u"*** Warning: " + s)
def __check_warning__(self, s):
self.__queue_warning__(s)
self.__run_checks__()
def __check_usage__(self, executable, s):
self.__add_check__("stderr", u"*** Usage: " + s)
self.__run_checks__()
class cd2track(UtilTest):
@UTIL_CD2TRACK
def setUp(self):
self.type = audiotools.FlacAudio
self.quality = "1"
self.input_dir = tempfile.mkdtemp()
self.stream = test_streams.Sine16_Stereo(793800, 44100,
8820.0, 0.70,
4410.0, 0.29, 1.0)
self.cue_file = os.path.join(self.input_dir, "CDImage.cue")
self.bin_file = os.path.join(self.input_dir, "CDImage.bin")
f = open(self.cue_file, "w")
f.write('FILE "CDImage.wav" WAVE\r\n TRACK 01 AUDIO\r\n ISRC JPPI00652340\r\n INDEX 01 00:00:00\r\n TRACK 02 AUDIO\r\n ISRC JPPI00652349\r\n INDEX 00 00:03:00\r\n INDEX 01 00:05:00\r\n TRACK 03 AUDIO\r\n ISRC JPPI00652341\r\n INDEX 00 00:9:00\r\n INDEX 01 00:11:00\r\n')
f.close()
f = open(self.bin_file, "w")
audiotools.transfer_framelist_data(self.stream, f.write)
f.close()
self.output_dir = tempfile.mkdtemp()
self.format = "%(track_number)2.2d.%(suffix)s"
self.cwd_dir = tempfile.mkdtemp()
self.original_dir = os.getcwd()
os.chdir(self.cwd_dir)
self.unwritable_dir = tempfile.mkdtemp()
os.chmod(self.unwritable_dir, 0)
@UTIL_CD2TRACK
def tearDown(self):
os.chdir(self.original_dir)
for f in os.listdir(self.input_dir):
os.unlink(os.path.join(self.input_dir, f))
os.rmdir(self.input_dir)
for f in os.listdir(self.output_dir):
os.unlink(os.path.join(self.output_dir, f))
os.rmdir(self.output_dir)
for f in os.listdir(self.cwd_dir):
os.unlink(os.path.join(self.cwd_dir, f))
os.rmdir(self.cwd_dir)
os.chmod(self.unwritable_dir, 0700)
os.rmdir(self.unwritable_dir)
def populate_options(self, options):
populated = ["--no-musicbrainz", "--no-freedb"]
for option in options:
if (option == '-t'):
populated.append(option)
populated.append(self.type.NAME)
elif (option == '-q'):
populated.append(option)
populated.append(self.quality)
elif (option == '-d'):
populated.append(option)
populated.append(self.output_dir)
elif (option == '--format'):
populated.append(option)
populated.append(self.format)
elif (option == '--album-number'):
populated.append(option)
populated.append(str(8))
elif (option == '--album-total'):
populated.append(option)
populated.append(str(9))
else:
populated.append(option)
return populated
def clean_output_dirs(self):
for f in os.listdir(self.output_dir):
os.unlink(os.path.join(self.output_dir, f))
for f in os.listdir(self.cwd_dir):
os.unlink(os.path.join(self.cwd_dir, f))
@UTIL_CD2TRACK
def test_options(self):
from audiotools.text import (ERR_UNSUPPORTED_COMPRESSION_MODE,
LAB_CD2TRACK_PROGRESS)
all_options = ["-t", "-q", "-d", "--format",
"--album-number", "--album-total"]
for count in xrange(1, len(all_options) + 1):
for options in Combinations(all_options, count):
self.clean_output_dirs()
options = self.populate_options(options)
if ("-t" in options):
output_type = self.type
else:
output_type = audiotools.TYPE_MAP[audiotools.DEFAULT_TYPE]
if (("-q" in options) and
("1" not in output_type.COMPRESSION_MODES)):
self.assertEqual(
self.__run_app__(["cd2track", "-V", "normal",
"-c", self.cue_file] +
options), 1)
self.__check_error__(
ERR_UNSUPPORTED_COMPRESSION_MODE %
{"quality": "1",
"type": output_type.NAME})
continue
self.assertEqual(
self.__run_app__(["cd2track", "-V", "normal",
"-c", self.cue_file] +
options), 0)
if ("--format" in options):
output_format = self.format
else:
output_format = None
if ("-d" in options):
output_dir = self.output_dir
else:
output_dir = "."
base_metadata = audiotools.MetaData(track_total=3)
if ("--album-number" in options):
base_metadata.album_number = 8
if ("--album-total" in options):
base_metadata.album_total = 9
output_filenames = []
for i in xrange(3):
base_metadata.track_number = i + 1
output_filenames.append(
output_type.track_name(
"",
base_metadata,
output_format))
#check that the output is being generated correctly
for (i, path) in enumerate(output_filenames):
self.__check_info__(
audiotools.output_progress(
LAB_CD2TRACK_PROGRESS %
{"track_number": i + 1,
"filename": audiotools.Filename(
os.path.join(output_dir, path))},
i + 1, len(output_filenames)))
#rip log is generated afterward as a table
#FIXME - check table of rip log?
#make sure no track data has been lost
output_tracks = [
audiotools.open(os.path.join(output_dir, filename))
for filename in output_filenames]
self.assertEqual(len(output_tracks), 3)
self.stream.reset()
self.assert_(
audiotools.pcm_frame_cmp(
audiotools.PCMCat([t.to_pcm() for t in output_tracks]),
self.stream) is None)
#make sure metadata fits our expectations
for i in xrange(len(output_tracks)):
metadata = output_tracks[i].get_metadata()
if (metadata is not None):
self.assertEqual(metadata.track_name, None)
self.assertEqual(metadata.album_name, None)
self.assertEqual(metadata.artist_name, None)
self.assertEqual(metadata.track_number, i + 1)
self.assertEqual(metadata.track_total, 3)
if ("--album-number" in options):
self.assertEqual(metadata.album_number, 8)
else:
self.assertEqual(metadata.album_number, None)
if ("--album-total" in options):
self.assertEqual(metadata.album_total, 9)
else:
self.assertEqual(metadata.album_total, None)
@UTIL_CD2TRACK
def test_unicode(self):
from shutil import rmtree
for (output_directory,
format_string) in Possibilities(
["testdir",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046'.encode('utf-8')],
["%(track_number)d.%(suffix)s",
u'%(track_number)d - abc\xe0\xe7\xe8\u3041\u3044\u3046.%(suffix)s'.encode('utf-8')]):
if (os.path.isdir(output_directory)):
rmtree(output_directory)
self.assertEqual(
self.__run_app__(
["cd2track", "-c", self.cue_file,
"--type", "flac",
"--format", format_string,
"--dir", output_directory]), 0)
tracks = [audiotools.open(
os.path.join(output_directory,
format_string % {"track_number":i,
"suffix":"flac"}))
for i in range(1, 4)]
self.assertEqual(sum([t.total_frames() for t in tracks]),
793800)
if (os.path.isdir(output_directory)):
rmtree(output_directory)
def populate_bad_options(self, options):
populated = ["--no-musicbrainz", "--no-freedb"]
for option in sorted(options):
if (option == '-t'):
populated.append(option)
populated.append("foo")
elif (option == '-q'):
populated.append(option)
populated.append("bar")
elif (option == '-d'):
populated.append(option)
populated.append(self.unwritable_dir)
elif (option == '--format'):
populated.append(option)
populated.append("%(foo)s.%(suffix)s")
elif (option == '--album-number'):
populated.append(option)
populated.append("foo")
elif (option == '--album-total'):
populated.append(option)
populated.append("bar")
else:
populated.append(option)
return populated
@UTIL_CD2TRACK
def test_errors(self):
from audiotools.text import (ERR_DUPLICATE_OUTPUT_FILE,
ERR_UNSUPPORTED_COMPRESSION_MODE,
ERR_UNKNOWN_FIELD,
LAB_SUPPORTED_FIELDS,
ERR_ENCODING_ERROR,
)
self.assertEqual(
self.__run_app__(["cd2track", "-c", self.cue_file,
"--format=foo"]), 1)
self.__check_error__(ERR_DUPLICATE_OUTPUT_FILE %
(audiotools.Filename("foo"),))
all_options = ["-t", "-q", "-d", "--format",
"--album-number", "--album-total"]
for count in xrange(1, len(all_options) + 1):
for options in Combinations(all_options, count):
options = self.populate_bad_options(options)
if ("-t" in options):
self.assertEqual(
self.__run_app__(["cd2track", "-c", self.cue_file] +
options),
2)
continue
else:
output_type = audiotools.TYPE_MAP[audiotools.DEFAULT_TYPE]
if (("--album-number" in options) or
("--album-total" in options)):
self.assertEqual(
self.__run_app__(["cd2track", "-c", self.cue_file] +
options),
2)
continue
self.assertEqual(
self.__run_app__(["cd2track", "-c", self.cue_file] +
options),
1)
if ("-q" in options):
self.__check_error__(
ERR_UNSUPPORTED_COMPRESSION_MODE %
{"quality": "bar",
"type": audiotools.DEFAULT_TYPE})
continue
if ("--format" in options):
self.__check_error__(
ERR_UNKNOWN_FIELD % ("foo"))
self.__check_info__(LAB_SUPPORTED_FIELDS)
for field in sorted(audiotools.MetaData.FIELDS + \
("album_track_number", "suffix")):
if (field == 'track_number'):
self.__check_info__(u"%(track_number)2.2d")
else:
self.__check_info__(u"%%(%s)s" % (field))
self.__check_info__(u"%(basename)s")
continue
if ("-d" in options):
output_path = os.path.join(
self.unwritable_dir,
output_type.track_name(
"",
audiotools.MetaData(track_number=1,
track_total=3)))
self.__check_error__(
ERR_ENCODING_ERROR %
(audiotools.Filename(output_path),))
continue
class coverdump(UtilTest):
@UTIL_COVERDUMP
def setUp(self):
self.type = audiotools.FlacAudio
self.input_file1 = tempfile.NamedTemporaryFile(
suffix="." + self.type.SUFFIX)
self.track1 = self.type.from_pcm(self.input_file1.name,
BLANK_PCM_Reader(1))
self.input_file2 = tempfile.NamedTemporaryFile(
suffix="." + self.type.SUFFIX)
self.track2 = self.type.from_pcm(self.input_file2.name,
BLANK_PCM_Reader(1))
self.prefix = "PREFIX_"
self.output_dir = tempfile.mkdtemp()
self.cwd_dir = tempfile.mkdtemp()
self.original_dir = os.getcwd()
os.chdir(self.cwd_dir)
metadata = audiotools.MetaData(track_name=u"Track")
self.images1 = []
for i in xrange(10):
import Image
img = Image.new("RGB", (100, 100), "#%2.2X%2.2X%2.2X" % (i, i, i))
data = cStringIO.StringIO()
img.save(data, "PNG")
img = audiotools.Image.new(data.getvalue(), u"", i / 2)
self.images1.append(img)
metadata.add_image(img)
self.track1.set_metadata(metadata)
metadata = audiotools.MetaData(track_name=u"Track")
self.images2 = []
for i in xrange(5):
import Image
img = Image.new("RGB", (100, 100), "#%2.2X%2.2X%2.2X" %
(100 + i, 100 + i, 100 + i))
data = cStringIO.StringIO()
img.save(data, "PNG")
img = audiotools.Image.new(data.getvalue(), u"", i)
self.images2.append(img)
metadata.add_image(img)
self.track2.set_metadata(metadata)
self.filename_types = ("front_cover", "back_cover",
"leaflet", "media", "other")
@UTIL_COVERDUMP
def tearDown(self):
os.chdir(self.original_dir)
for f in os.listdir(self.output_dir):
os.unlink(os.path.join(self.output_dir, f))
os.rmdir(self.output_dir)
for f in os.listdir(self.cwd_dir):
os.unlink(os.path.join(self.cwd_dir, f))
os.rmdir(self.cwd_dir)
self.input_file1.close()
self.input_file2.close()
def clean_output_dir(self):
for f in os.listdir(self.output_dir):
os.unlink(os.path.join(self.output_dir, f))
def populate_options(self, options):
populated = []
for option in options:
if (option == "-d"):
populated.append(option)
populated.append(self.output_dir)
elif (option == "-p"):
populated.append(option)
populated.append(self.prefix)
else:
populated.append(option)
return populated
@UTIL_COVERDUMP
def test_options(self):
from audiotools.text import (LAB_ENCODE,
)
all_options = ["-d", "-p"]
for count in xrange(len(all_options) + 1):
for options in Combinations(all_options, count):
options = self.populate_options(options)
self.clean_output_dir()
self.assertEqual(
self.__run_app__(["coverdump", "-V", "normal",
self.track1.filename] + options),
0)
if ("-d" in options):
output_directory = self.output_dir
else:
output_directory = "."
template = "%(prefix)s%(filename)s%(filenum)2.2d.%(suffix)s"
for (i, image) in enumerate(self.images1):
if ("-p" in options):
output_filename = template % {
"prefix": "PREFIX_",
"filename": self.filename_types[image.type],
"filenum": (i % 2) + 1,
"suffix": "png"}
else:
output_filename = template % {
"prefix": "",
"filename": self.filename_types[image.type],
"filenum": (i % 2) + 1,
"suffix": "png"}
if ("-d" in options):
output_path = os.path.join(self.output_dir,
output_filename)
else:
output_path = os.path.join(".", output_filename)
self.__check_info__(
LAB_ENCODE %
{"source": audiotools.Filename(self.track1.filename),
"destination": audiotools.Filename(output_path)})
output_image = audiotools.Image.new(
open(output_path, "rb").read(),
u"",
i / 2)
self.assertEqual(output_image, image)
self.clean_output_dir()
self.assertEqual(
self.__run_app__(["coverdump", "-V", "normal",
self.track2.filename] + options),
0)
if ("-d" in options):
output_directory = self.output_dir
else:
output_directory = "."
template = "%(prefix)s%(filename)s.%(suffix)s"
for (i, image) in enumerate(self.images2):
if ("-p" in options):
output_filename = template % {
"prefix": "PREFIX_",
"filename": self.filename_types[image.type],
"suffix": "png"}
else:
output_filename = template % {
"prefix": "",
"filename": self.filename_types[image.type],
"suffix": "png"}
if ("-d" in options):
output_path = os.path.join(self.output_dir,
output_filename)
else:
output_path = os.path.join(".", output_filename)
self.__check_info__(
LAB_ENCODE %
{"source": audiotools.Filename(self.track2.filename),
"destination": audiotools.Filename(output_path)})
output_image = audiotools.Image.new(
open(output_path, "rb").read(),
u"",
i)
self.assertEqual(output_image, image)
@UTIL_COVERDUMP
def test_unicode(self):
from shutil import rmtree
for (output_directory,
file_path,
prefix) in Possibilities(
["testdir", #check --dir
u'abc\xe0\xe7\xe8\u3041\u3044\u3046'.encode('utf-8')],
["test.flac", #check filename arguments
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.flac'.encode('utf-8')],
[None, #check --prefix
"prefix_",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046_'.encode('utf-8')]):
if (os.path.isdir(output_directory)):
rmtree(output_directory)
if (os.path.isfile(file_path)):
os.unlink(file_path)
track = audiotools.FlacAudio.from_pcm(
file_path,
BLANK_PCM_Reader(1))
metadata = track.get_metadata()
metadata.add_image(audiotools.Image.new(TEST_COVER1,
u"",
0))
track.update_metadata(metadata)
self.assertEqual(
self.__run_app__(
["coverdump",
"--dir", output_directory] +
(["--prefix", prefix] if prefix is not None else []) +
[file_path]), 0)
self.assertEqual(
os.path.isfile(
os.path.join(output_directory,
(prefix if prefix is not None else "") +
"front_cover.jpg")), True)
if (os.path.isdir(output_directory)):
rmtree(output_directory)
if (os.path.isfile(file_path)):
os.unlink(file_path)
@UTIL_COVERDUMP
def test_errors(self):
from audiotools.text import (ERR_1_FILE_REQUIRED,
ERR_ENCODING_ERROR,
ERR_OUTPUT_IS_INPUT)
#check no input files
self.assertEqual(self.__run_app__(
["coverdump", "-V", "normal"]), 1)
self.__check_error__(ERR_1_FILE_REQUIRED)
#check multiple input files
self.assertEqual(self.__run_app__(
["coverdump", "-V", "normal",
self.track1.filename, self.track2.filename]), 1)
self.__check_error__(ERR_1_FILE_REQUIRED)
#check unwritable output dir
old_mode = os.stat(self.output_dir).st_mode
try:
os.chmod(self.output_dir, 0)
self.assertEqual(self.__run_app__(
["coverdump", "-V", "normal", "-d", self.output_dir,
self.track1.filename]), 1)
self.__check_error__(
ERR_ENCODING_ERROR %
(audiotools.Filename(os.path.join(self.output_dir,
"front_cover01.png")),))
finally:
os.chmod(self.output_dir, old_mode)
#check unwritable cwd
old_mode = os.stat(self.cwd_dir).st_mode
try:
os.chmod(self.cwd_dir, 0)
self.assertEqual(self.__run_app__(
["coverdump", "-V", "normal",
self.track1.filename]), 1)
self.__check_error__(
ERR_ENCODING_ERROR %
(audiotools.Filename("front_cover01.png"),))
finally:
os.chmod(self.cwd_dir, old_mode)
#check input file same as output file
track = audiotools.FlacAudio.from_pcm(
os.path.join(self.output_dir, "front_cover.jpg"),
BLANK_PCM_Reader(1))
metadata = track.get_metadata()
metadata.add_image(audiotools.Image.new(TEST_COVER1, u"", 0))
track.update_metadata(metadata)
self.assertEqual(self.__run_app__(
["coverdump", "-V", "normal",
"-d", self.output_dir, track.filename]), 1)
self.__check_error__(
ERR_OUTPUT_IS_INPUT %
(audiotools.Filename(track.filename),))
class dvdainfo(UtilTest):
@UTIL_DVDAINFO
def setUp(self):
self.invalid_dir1 = tempfile.mkdtemp()
self.invalid_dir2 = tempfile.mkdtemp()
f = open(os.path.join(self.invalid_dir2, "AUDIO_TS.IFO"), "wb")
f.write(os.urandom(1000))
f.close()
@UTIL_DVDAINFO
def tearDown(self):
os.rmdir(self.invalid_dir1)
os.unlink(os.path.join(self.invalid_dir2, "AUDIO_TS.IFO"))
os.rmdir(self.invalid_dir2)
@UTIL_DVDAINFO
def test_errors(self):
from audiotools.text import (ERR_NO_AUDIO_TS,
ERR_DVDA_IOERROR_AUDIO_TS,
ERR_DVDA_INVALID_AUDIO_TS)
#test with no -A option
self.assertEqual(self.__run_app__(["dvdainfo"]), 1)
self.__check_error__(ERR_NO_AUDIO_TS)
#test with an invalid AUDIO_TS dir
self.assertEqual(self.__run_app__(["dvdainfo",
"-A", self.invalid_dir1]), 1)
self.__check_error__(ERR_DVDA_IOERROR_AUDIO_TS)
#test with an invalid AUDIO_TS/AUDIO_TS.IFO file
self.assertEqual(self.__run_app__(["dvdainfo",
"-A", self.invalid_dir2]), 1)
self.__check_error__(ERR_DVDA_INVALID_AUDIO_TS)
class dvda2track(UtilTest):
@UTIL_DVDA2TRACK
def setUp(self):
self.invalid_dir1 = tempfile.mkdtemp()
self.invalid_dir2 = tempfile.mkdtemp()
f = open(os.path.join(self.invalid_dir2, "AUDIO_TS.IFO"), "wb")
f.write(os.urandom(1000))
f.close()
@UTIL_DVDA2TRACK
def tearDown(self):
os.rmdir(self.invalid_dir1)
os.unlink(os.path.join(self.invalid_dir2, "AUDIO_TS.IFO"))
os.rmdir(self.invalid_dir2)
@UTIL_DVDA2TRACK
def test_errors(self):
from audiotools.text import (ERR_NO_AUDIO_TS,
ERR_DVDA_IOERROR_AUDIO_TS,
ERR_DVDA_INVALID_AUDIO_TS)
#test with no -A option
self.assertEqual(self.__run_app__(["dvda2track"]), 1)
self.__check_error__(ERR_NO_AUDIO_TS)
#test with an invalid AUDIO_TS dir
self.assertEqual(self.__run_app__(["dvda2track",
"-A", self.invalid_dir1]), 1)
self.__check_error__(ERR_DVDA_IOERROR_AUDIO_TS)
#test with an invalid AUDIO_TS/AUDIO_TS.IFO file
self.assertEqual(self.__run_app__(["dvda2track",
"-A", self.invalid_dir2]), 1)
self.__check_error__(ERR_DVDA_INVALID_AUDIO_TS)
#FIXME
#It's difficult to test an invalid --title or invalid --xmcd
#without a valid AUDIO_TS.IFO file,
#and a set of present IFO files and AOB files.
#I'll need a way to generate synthetic ones.
class track2track(UtilTest):
@UTIL_TRACK2TRACK
def setUp(self):
#input format should be something other than the user's default
#and should support embedded metadata
for self.input_format in [audiotools.ALACAudio,
audiotools.AiffAudio]:
if (self.input_format is not audiotools.DEFAULT_TYPE):
break
#output format shouldn't be the user's default, the input format
#and should support embedded images and ReplayGain tags
for self.output_format in [audiotools.FlacAudio,
audiotools.WavPackAudio]:
if (self.input_format is not audiotools.DEFAULT_TYPE):
break
self.input_dir = tempfile.mkdtemp()
self.track1 = self.input_format.from_pcm(
os.path.join(self.input_dir, "01.%s" % (self.input_format.SUFFIX)),
BLANK_PCM_Reader(1))
self.track_metadata = audiotools.MetaData(track_name=u"Track 1",
track_number=1,
album_name=u"Album",
artist_name=u"Artist")
self.cover = audiotools.Image.new(TEST_COVER1, u"", 0)
self.track_metadata.add_image(self.cover)
# audiotools.Image.new(open("bigpng.png", "rb").read(), u"", 0))
self.track1.set_metadata(self.track_metadata)
self.output_dir = tempfile.mkdtemp()
self.output_file = tempfile.NamedTemporaryFile(
suffix="." + self.output_format.SUFFIX)
self.format = "%(track_number)2.2d - %(track_name)s.%(suffix)s"
self.type = self.output_format.NAME
self.quality = self.output_format.COMPRESSION_MODES[0]
self.cwd_dir = tempfile.mkdtemp()
self.original_dir = os.getcwd()
os.chdir(self.cwd_dir)
self.unwritable_dir = tempfile.mkdtemp()
os.chmod(self.unwritable_dir, 0)
self.unwritable_file = "/dev/null/foo.%s" % (self.output_format.SUFFIX)
f = open(os.path.join(self.input_dir,
"broken.%s" % (self.input_format.SUFFIX)), "wb")
f.write(open(self.track1.filename, "rb").read()[0:-10])
f.close()
self.broken_track1 = audiotools.open(
os.path.join(self.input_dir,
"broken.%s" % (self.input_format.SUFFIX)))
#Why a static set of input/output arguments for each set of options?
#Since track2track uses the standard interface for everything,
#we're only testing that the options work.
#The interface itself is tested at a lower level
#in the test_core.py or test_formats.py modules.
@UTIL_TRACK2TRACK
def tearDown(self):
os.chdir(self.original_dir)
for f in os.listdir(self.input_dir):
os.unlink(os.path.join(self.input_dir, f))
os.rmdir(self.input_dir)
for f in os.listdir(self.output_dir):
os.unlink(os.path.join(self.output_dir, f))
os.rmdir(self.output_dir)
for f in os.listdir(self.cwd_dir):
os.unlink(os.path.join(self.cwd_dir, f))
os.rmdir(self.cwd_dir)
self.output_file.close()
os.chmod(self.unwritable_dir, 0700)
os.rmdir(self.unwritable_dir)
def clean_output_dirs(self):
for f in os.listdir(self.output_dir):
os.unlink(os.path.join(self.output_dir, f))
for f in os.listdir(self.cwd_dir):
os.unlink(os.path.join(self.cwd_dir, f))
f = open(self.output_file.name, "wb")
f.close()
def populate_options(self, options):
populated = []
for option in sorted(options):
if (option == '-t'):
populated.append(option)
populated.append(self.type)
elif (option == '-q'):
populated.append(option)
populated.append(self.quality)
elif (option == '-d'):
populated.append(option)
populated.append(self.output_dir)
elif (option == '--format'):
populated.append(option)
populated.append(self.format)
elif (option == '-o'):
populated.append(option)
populated.append(self.output_file.name)
else:
populated.append(option)
return populated
def populate_bad_options(self, options):
populated = []
for option in sorted(options):
if (option == '-t'):
populated.append(option)
populated.append("foo")
elif (option == '-q'):
populated.append(option)
populated.append("bar")
elif (option == '-d'):
populated.append(option)
populated.append(self.unwritable_dir)
elif (option == '--format'):
populated.append(option)
populated.append("%(foo)s.%(suffix)s")
elif (option == '-o'):
populated.append(option)
populated.append(self.unwritable_file)
elif (option == '-x'):
populated.append(option)
populated.append(os.devnull)
elif (option == '-j'):
populated.append(option)
populated.append(str(0))
else:
populated.append(option)
return populated
@UTIL_TRACK2TRACK
def test_options(self):
from audiotools.text import (ERR_TRACK2TRACK_O_AND_D,
ERR_TRACK2TRACK_O_AND_D_SUGGESTION,
ERR_TRACK2TRACK_O_AND_FORMAT,
ERR_UNSUPPORTED_COMPRESSION_MODE,
LAB_ENCODE,
RG_REPLAYGAIN_ADDED,
RG_REPLAYGAIN_APPLIED)
messenger = audiotools.Messenger("track2track", None)
all_options = ["-t", "-q", "-d", "--format", "-o",
"--replay-gain", "--no-replay-gain"]
for count in xrange(1, len(all_options) + 1):
for options in Combinations(all_options, count):
self.clean_output_dirs()
self.__clear_checks__()
options = self.populate_options(options) + \
["-V", "normal", "-j", "1", self.track1.filename]
if (("-d" in options) and ("-o" in options)):
#-d and -o trigger an error
self.assertEqual(
self.__run_app__(["track2track"] + options), 1)
self.__check_error__(ERR_TRACK2TRACK_O_AND_D)
self.__check_info__(ERR_TRACK2TRACK_O_AND_D_SUGGESTION)
continue
if (("--format" in options) and ("-o" in options)):
self.__queue_warning__(ERR_TRACK2TRACK_O_AND_FORMAT)
if ('-t' in options):
output_class = audiotools.TYPE_MAP[
options[options.index('-t') + 1]]
elif ("-o" in options):
output_class = self.output_format
else:
output_class = audiotools.TYPE_MAP[
audiotools.DEFAULT_TYPE]
if (("-q" in options) and
(options[options.index("-q") + 1] not in
output_class.COMPRESSION_MODES)):
self.assertEqual(
self.__run_app__(["track2track"] + options), 1)
self.__check_error__(
ERR_UNSUPPORTED_COMPRESSION_MODE %
{"quality": options[options.index("-q") + 1],
"type": output_class.NAME})
continue
if ('--format' in options):
output_format = options[options.index('--format') + 1]
else:
output_format = None
metadata = self.track1.get_metadata()
if ("-o" in options):
output_path = self.output_file.name
elif ("-d" in options):
output_path = os.path.join(
self.output_dir,
output_class.track_name("", metadata, output_format))
else:
output_path = os.path.join(
".",
output_class.track_name("", metadata, output_format))
self.assertEqual(
self.__run_app__(["track2track"] + options), 0)
self.assert_(os.path.isfile(output_path))
if ("-o" not in options):
self.__check_info__(
LAB_ENCODE %
{"source":
audiotools.Filename(self.track1.filename),
"destination":
audiotools.Filename(output_path)})
track2 = audiotools.open(output_path)
self.assertEqual(track2.NAME, output_class.NAME)
if (self.track1.lossless() and
track2.lossless() and not
(output_class.supports_replay_gain() and
"--replay-gain" in options)):
self.assert_(
audiotools.pcm_frame_cmp(self.track1.to_pcm(),
track2.to_pcm()) is None)
if (track2.get_metadata() is not None):
self.assertEqual(track2.get_metadata(), metadata)
image = track2.get_metadata().images()[0]
self.assertEqual(image.width, self.cover.width)
self.assertEqual(image.height, self.cover.height)
if (output_class.supports_replay_gain()):
if (output_class.lossless_replay_gain()):
if (("-o" not in options) and
audiotools.ADD_REPLAYGAIN and
("--no-replay-gain" not in options)):
self.__check_info__(RG_REPLAYGAIN_ADDED)
self.assert_(track2.replay_gain() is not None)
else:
if ("--replay-gain" in options):
self.__check_info__(RG_REPLAYGAIN_APPLIED)
@UTIL_TRACK2TRACK
def test_unicode(self):
from shutil import rmtree
for (output_directory,
format_string,
file_path) in Possibilities(
["testdir", #check --dir
u'abc\xe0\xe7\xe8\u3041\u3044\u3046'.encode('utf-8')],
["new_file.flac", #check --format]
u'abc\xe0\xe7\xe8\u3041\u3044\u3046-2.flac'.encode('utf-8')],
["file.flac", #check filename arguments
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.flac'.encode('utf-8')]):
if (os.path.isdir(output_directory)):
rmtree(output_directory)
if (os.path.isfile(file_path)):
os.unlink(file_path)
track = audiotools.FlacAudio.from_pcm(
file_path,
BLANK_PCM_Reader(1))
self.assertEqual(
self.__run_app__(
["track2track",
"--dir", output_directory,
"--format", format_string,
file_path]), 0)
self.assertEqual(
audiotools.pcm_frame_cmp(
track.to_pcm(),
audiotools.open(os.path.join(output_directory,
format_string)).to_pcm()),
None)
if (os.path.isdir(output_directory)):
rmtree(output_directory)
if (os.path.isfile(file_path)):
os.unlink(file_path)
for (file_path,
output_path) in Possibilities(
["file.flac", #check filename arguments
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.flac'.encode('utf-8')],
["output_file.flac", #check --output argument
u'abc\xe0\xe7\xe8\u3041\u3044\u3046-2.flac'.encode('utf-8')]):
if (os.path.isfile(output_path)):
os.unlink(output_path)
if (os.path.isfile(file_path)):
os.unlink(file_path)
track = audiotools.FlacAudio.from_pcm(
file_path,
BLANK_PCM_Reader(1))
self.assertEqual(
self.__run_app__(
["track2track", "-o", output_path, file_path]), 0)
self.assertEqual(
audiotools.pcm_frame_cmp(
track.to_pcm(),
audiotools.open(output_path).to_pcm()),
None)
if (os.path.isfile(output_path)):
os.unlink(output_path)
if (os.path.isfile(file_path)):
os.unlink(file_path)
@UTIL_TRACK2TRACK
def test_errors(self):
from audiotools.text import (ERR_TRACK2TRACK_O_AND_D,
ERR_TRACK2TRACK_O_AND_D_SUGGESTION,
ERR_TRACK2TRACK_O_AND_FORMAT,
ERR_UNSUPPORTED_COMPRESSION_MODE,
ERR_INVALID_JOINT,
ERR_UNKNOWN_FIELD,
LAB_SUPPORTED_FIELDS,
ERR_FILES_REQUIRED,
ERR_TRACK2TRACK_O_AND_MULTIPLE,
ERR_DUPLICATE_FILE,
ERR_OUTPUT_IS_INPUT,
ERR_DUPLICATE_OUTPUT_FILE,
ERR_UNSUPPORTED_CHANNEL_COUNT,
ERR_UNSUPPORTED_CHANNEL_MASK,
ERR_UNSUPPORTED_BITS_PER_SAMPLE,
)
all_options = ["-t", "-q", "-d", "--format", "-o", "-j",
"--replay-gain", "--no-replay-gain"]
for count in xrange(0, len(all_options) + 1):
for options in Combinations(all_options, count):
self.clean_output_dirs()
self.__clear_checks__()
options = self.populate_bad_options(options) + \
[self.broken_track1.filename]
if ("-t" in options):
self.assertEqual(
self.__run_app__(["track2track"] + options),
2)
continue
elif ("-o" in options):
output_class = self.output_format
else:
output_class = audiotools.TYPE_MAP[
audiotools.DEFAULT_TYPE]
self.assertEqual(
self.__run_app__(["track2track"] + options),
1)
if (("-o" in options) and
("-d" in options)):
self.__check_error__(ERR_TRACK2TRACK_O_AND_D)
self.__check_info__(ERR_TRACK2TRACK_O_AND_D_SUGGESTION)
continue
if (("--format" in options) and ("-o" in options)):
self.__queue_warning__(ERR_TRACK2TRACK_O_AND_FORMAT)
if ("-q" in options):
self.__check_error__(
ERR_UNSUPPORTED_COMPRESSION_MODE %
{"quality": "bar",
"type": output_class.NAME})
continue
if ("-j" in options):
self.__check_error__(
ERR_INVALID_JOINT)
continue
if ("-o" in options):
self.__check_error__(
u"[Errno 20] Not a directory: '%s'" %
(self.unwritable_file))
continue
if ("--format" in options):
self.__check_error__(
ERR_UNKNOWN_FIELD % ("foo"))
self.__check_info__(LAB_SUPPORTED_FIELDS)
for field in sorted(audiotools.MetaData.FIELDS + \
("album_track_number", "suffix")):
if (field == 'track_number'):
self.__check_info__(u"%(track_number)2.2d")
else:
self.__check_info__(u"%%(%s)s" % (field))
self.__check_info__(u"%(basename)s")
continue
if ("-d" in options):
output_path = os.path.join(
self.unwritable_dir,
output_class.track_name(
"",
self.track1.get_metadata(),
None))
self.__check_error__(
u"[Errno 13] Permission denied: '%s'" %
(output_path))
continue
#the error triggered by a broken file is variable
#so no need to check its exact value
self.assert_(len(self.stderr.getvalue()) > 0)
#check no input files
self.assertEqual(self.__run_app__(["track2track"]), 1)
self.__check_error__(ERR_FILES_REQUIRED)
self.track2 = self.input_format.from_pcm(
os.path.join(self.input_dir, "02.%s" % (self.input_format.SUFFIX)),
BLANK_PCM_Reader(2))
#check multiple input files and -o
self.assertEqual(self.__run_app__(["track2track",
"-o", self.output_file.name,
self.track1.filename,
self.track2.filename]), 1)
self.__check_error__(ERR_TRACK2TRACK_O_AND_MULTIPLE)
#check duplicate input file
self.assertEqual(self.__run_app__(["track2track",
self.track1.filename,
self.track1.filename,
self.track2.filename]), 1)
self.__check_error__(
ERR_DUPLICATE_FILE %
(audiotools.Filename(self.track1.filename),))
#check identical input and output file
self.assertEqual(
self.__run_app__(["track2track",
self.track1.filename,
"-t", self.input_format.NAME,
"-d", self.input_dir,
"--format=%(track_number)2.2d.%(suffix)s"]), 1)
self.__check_error__(
ERR_OUTPUT_IS_INPUT %
(audiotools.Filename(self.track1.filename),))
#check identical input and output file with -o
self.assertEqual(self.__run_app__(["track2track",
"-t", self.input_format.NAME,
"-o", self.track1.filename,
self.track1.filename]), 1)
self.__check_error__(
ERR_OUTPUT_IS_INPUT %
(audiotools.Filename(self.track1.filename),))
#check duplicate output files
self.assertEqual(self.__run_app__(["track2track",
"--format", "foo",
self.track1.filename,
self.track2.filename]), 1)
self.__check_error__(
ERR_DUPLICATE_OUTPUT_FILE % (
audiotools.Filename(os.path.join(".", "foo")),))
#check conversion from supported to unsupported channel count
unsupported_count_file = tempfile.NamedTemporaryFile(
suffix=".flac")
try:
supported_track = audiotools.WaveAudio.from_pcm(
os.path.join(self.input_dir, "00 - channels.wav"),
BLANK_PCM_Reader(1, channels=10, channel_mask=0))
self.assertEqual(self.__run_app__(["track2track",
"-t", "flac",
"-d",
self.output_dir,
supported_track.filename]), 1)
self.__check_error__(
ERR_UNSUPPORTED_CHANNEL_COUNT %
{"target_filename": audiotools.Filename(
os.path.join(self.output_dir, "00 - .flac")),
"channels": 10})
self.assertEqual(self.__run_app__(["track2track",
"-o",
unsupported_count_file.name,
supported_track.filename]), 1)
self.__check_error__(
ERR_UNSUPPORTED_CHANNEL_COUNT %
{"target_filename": audiotools.Filename(
unsupported_count_file.name),
"channels": 10})
finally:
unsupported_count_file.close()
#check conversion from supported to unsupported channel mask
unsupported_mask_file = tempfile.NamedTemporaryFile(
suffix=".flac")
try:
supported_track = audiotools.WaveAudio.from_pcm(
os.path.join(self.input_dir, "00 - mask.wav"),
BLANK_PCM_Reader(1, channels=6, channel_mask=0x3F000))
self.assertEqual(self.__run_app__(["track2track",
"-t", "flac",
"-d",
self.output_dir,
supported_track.filename]), 1)
self.__check_error__(
ERR_UNSUPPORTED_CHANNEL_MASK %
{"target_filename": audiotools.Filename(
os.path.join(self.output_dir, "00 - .flac")),
"assignment": audiotools.ChannelMask(0x3F000)})
self.assertEqual(self.__run_app__(["track2track",
"-o",
unsupported_mask_file.name,
supported_track.filename]), 1)
self.__check_error__(
ERR_UNSUPPORTED_CHANNEL_MASK %
{"target_filename": audiotools.Filename(
unsupported_mask_file.name),
"assignment": audiotools.ChannelMask(0x3F000)})
finally:
unsupported_mask_file.close()
#check conversion from supported to unsupported bits-per-sample
unsupported_bps_file = tempfile.NamedTemporaryFile(
suffix=".shn")
try:
supported_track = audiotools.WaveAudio.from_pcm(
os.path.join(self.input_dir, "00 - bps.wav"),
BLANK_PCM_Reader(1, bits_per_sample=24))
self.assertEqual(self.__run_app__(["track2track",
"-t", "shn",
"-d",
self.output_dir,
supported_track.filename]), 1)
self.__check_error__(
ERR_UNSUPPORTED_BITS_PER_SAMPLE %
{"target_filename": audiotools.Filename(
os.path.join(self.output_dir, "00 - .shn")),
"bps": 24})
self.assertEqual(self.__run_app__(["track2track",
"-o",
unsupported_bps_file.name,
supported_track.filename]), 1)
self.__check_error__(
ERR_UNSUPPORTED_BITS_PER_SAMPLE %
{"target_filename": audiotools.Filename(
unsupported_bps_file.name),
"bps": 24})
finally:
unsupported_bps_file.close()
@UTIL_TRACK2TRACK
def test_replay_gain(self):
temp_files = [os.path.join(
self.input_dir,
"%2.2d.%s" % (i + 1, self.input_format.SUFFIX))
for i in xrange(7)]
temp_tracks = []
temp_tracks.append(self.input_format.from_pcm(
temp_files[0],
test_streams.Sine16_Stereo(44100, 44100,
441.0, 0.50, 4410.0, 0.49, 1.0)))
temp_tracks.append(self.input_format.from_pcm(
temp_files[1],
test_streams.Sine16_Stereo(66150, 44100,
8820.0, 0.70, 4410.0, 0.29, 1.0)))
temp_tracks.append(self.input_format.from_pcm(
temp_files[2],
test_streams.Sine16_Stereo(52920, 44100,
441.0, 0.50, 441.0, 0.49, 0.5)))
temp_tracks.append(self.input_format.from_pcm(
temp_files[3],
test_streams.Sine16_Stereo(61740, 44100,
441.0, 0.61, 661.5, 0.37, 2.0)))
temp_tracks.append(self.input_format.from_pcm(
temp_files[4],
test_streams.Sine16_Stereo(26460, 44100,
441.0, 0.50, 882.0, 0.49, 0.7)))
temp_tracks.append(self.input_format.from_pcm(
temp_files[5],
test_streams.Sine16_Stereo(61740, 44100,
441.0, 0.50, 4410.0, 0.49, 1.3)))
temp_tracks.append(self.input_format.from_pcm(
temp_files[6],
test_streams.Sine16_Stereo(79380, 44100,
8820.0, 0.70, 4410.0, 0.29, 0.1)))
temp_tracks[0].set_metadata(audiotools.MetaData(
track_name=u"Track 3",
album_name=u"Test Album",
track_number=1,
album_number=1))
temp_tracks[1].set_metadata(audiotools.MetaData(
track_name=u"Track 4",
album_name=u"Test Album",
track_number=2,
album_number=1))
temp_tracks[2].set_metadata(audiotools.MetaData(
track_name=u"Track 5",
album_name=u"Test Album",
track_number=1,
album_number=2))
temp_tracks[3].set_metadata(audiotools.MetaData(
track_name=u"Track 6",
album_name=u"Test Album",
track_number=2,
album_number=2))
temp_tracks[4].set_metadata(audiotools.MetaData(
track_name=u"Track 7",
album_name=u"Test Album",
track_number=3,
album_number=2))
temp_tracks[5].set_metadata(audiotools.MetaData(
track_name=u"Track 1",
album_name=u"Test Album 2",
track_number=1))
temp_tracks[6].set_metadata(audiotools.MetaData(
track_name=u"Track 2",
album_name=u"Test Album 2",
track_number=2))
self.assertEqual(
self.__run_app__(["track2track",
"-d", self.output_dir,
"--format=%(track_name)s.%(suffix)s",
"-t", self.output_format.NAME,
"--replay-gain",
"-V", "quiet"] + \
[f.filename for f in temp_tracks]), 0)
converted_tracks = audiotools.open_files(
[os.path.join(self.output_dir, f) for f in
os.listdir(self.output_dir)], sorted=True)
self.assertEqual(len(converted_tracks), 7)
for (i, track) in enumerate(converted_tracks):
self.assertEqual(track.get_metadata().track_name,
u"Track %d" % (i + 1))
self.assert_(track.replay_gain() is not None)
replay_gains = [track.replay_gain() for track in
converted_tracks]
#tracks 0 and 1 should be on the same album
self.assertEqual(replay_gains[0],
replay_gains[0])
self.assertEqual(replay_gains[0].album_gain,
replay_gains[1].album_gain)
self.assertNotEqual(replay_gains[0].album_gain,
replay_gains[2].album_gain)
self.assertNotEqual(replay_gains[0].album_gain,
replay_gains[4].album_gain)
#tracks 2 and 3 should be on the same album
self.assertEqual(replay_gains[2].album_gain,
replay_gains[3].album_gain)
self.assertNotEqual(replay_gains[3].album_gain,
replay_gains[0].album_gain)
self.assertNotEqual(replay_gains[3].album_gain,
replay_gains[5].album_gain)
#tracks 4, 5 and 6 should be on the same album
self.assertEqual(replay_gains[4].album_gain,
replay_gains[5].album_gain)
self.assertEqual(replay_gains[5].album_gain,
replay_gains[6].album_gain)
self.assertEqual(replay_gains[4].album_gain,
replay_gains[6].album_gain)
self.assertNotEqual(replay_gains[6].album_gain,
replay_gains[0].album_gain)
self.assertNotEqual(replay_gains[6].album_gain,
replay_gains[2].album_gain)
class trackcat(UtilTest):
@UTIL_TRACKCAT
def setUp(self):
self.stream1 = test_streams.Sine16_Stereo(220500, 44100,
441.0, 0.50,
4410.0, 0.49, 1.0)
self.stream2 = test_streams.Sine16_Stereo(264600, 44100,
8820.0, 0.70,
4410.0, 0.29, 1.0)
self.stream3 = test_streams.Sine16_Stereo(308700, 44100,
441.0, 0.50,
441.0, 0.49, 0.5)
self.misfit_stream1 = test_streams.Sine24_Stereo(200000, 44100,
441.0, 0.50,
441.0, 0.49, 1.0)
self.misfit_stream2 = test_streams.Sine16_Mono(200000, 44100,
441.0, 0.50,
441.0, 0.49)
self.misfit_stream3 = test_streams.Sine16_Stereo(200000, 48000,
441.0, 0.50,
441.0, 0.49, 1.0)
self.track1_file = tempfile.NamedTemporaryFile(suffix=".flac")
self.track2_file = tempfile.NamedTemporaryFile(suffix=".flac")
self.track3_file = tempfile.NamedTemporaryFile(suffix=".flac")
self.track4_file = tempfile.NamedTemporaryFile(suffix=".flac")
self.track5_file = tempfile.NamedTemporaryFile(suffix=".flac")
self.track6_file = tempfile.NamedTemporaryFile(suffix=".flac")
self.track1 = audiotools.FlacAudio.from_pcm(
self.track1_file.name, self.stream1)
self.track1.set_metadata(audiotools.MetaData(track_name=u"Track 1",
album_name=u"Album",
artist_name=u"Artist",
track_number=1,
track_total=3))
self.track2 = audiotools.FlacAudio.from_pcm(
self.track2_file.name, self.stream2)
self.track2.set_metadata(audiotools.MetaData(track_name=u"Track 2",
album_name=u"Album",
artist_name=u"Artist",
track_number=2,
track_total=3))
self.track3 = audiotools.FlacAudio.from_pcm(
self.track3_file.name, self.stream3)
self.track3.set_metadata(audiotools.MetaData(track_name=u"Track 3",
album_name=u"Album",
artist_name=u"Artist",
track_number=3,
track_total=3))
self.track4 = audiotools.FlacAudio.from_pcm(
self.track4_file.name, self.misfit_stream1)
self.track5 = audiotools.FlacAudio.from_pcm(
self.track5_file.name, self.misfit_stream2)
self.track6 = audiotools.FlacAudio.from_pcm(
self.track6_file.name, self.misfit_stream3)
self.cuesheet = tempfile.NamedTemporaryFile(suffix=".cue")
self.cuesheet.write('FILE "CDImage.wav" WAVE\r\n TRACK 01 AUDIO\r\n ISRC JPPI00652340\r\n INDEX 01 00:00:00\r\n TRACK 02 AUDIO\r\n ISRC JPPI00652349\r\n INDEX 00 00:03:00\r\n INDEX 01 00:05:00\r\n TRACK 03 AUDIO\r\n ISRC JPPI00652341\r\n INDEX 00 00:9:00\r\n INDEX 01 00:11:00\r\n')
self.cuesheet.flush()
self.invalid_cuesheet = tempfile.NamedTemporaryFile(suffix=".cue")
self.invalid_cuesheet.write("Hello, World!")
self.invalid_cuesheet.flush()
self.suffix_outfile = tempfile.NamedTemporaryFile(suffix=".flac")
self.nonsuffix_outfile = tempfile.NamedTemporaryFile()
@UTIL_TRACKCAT
def tearDown(self):
self.track1_file.close()
self.track2_file.close()
self.track3_file.close()
self.track4_file.close()
self.track5_file.close()
self.track6_file.close()
self.cuesheet.close()
self.invalid_cuesheet.close()
self.suffix_outfile.close()
self.nonsuffix_outfile.close()
def populate_options(self, options, type, quality, outfile):
populated = []
for option in options:
if (option == '-t'):
populated.append(option)
populated.append(type)
elif (option == '-q'):
populated.append(option)
populated.append(quality)
elif (option == '--cue'):
populated.append(option)
populated.append(self.cuesheet.name)
elif (option == '-o'):
populated.append(option)
populated.append(outfile)
else:
populated.append(option)
return populated
def output_combinations(self, all_options):
for (type, quality) in [("flac", "8"),
("wav", "foo")]:
for outfile in [self.suffix_outfile.name,
self.nonsuffix_outfile.name,
"/dev/null/foo.wav",
"/dev/null/foo"]:
for count in xrange(1, len(all_options) + 1):
for options in Combinations(all_options, count):
yield (type, quality, outfile, count, options)
@UTIL_TRACKCAT
def test_options(self):
from audiotools.text import (ERR_FILES_REQUIRED,
ERR_BPS_MISMATCH,
ERR_CHANNEL_COUNT_MISMATCH,
ERR_SAMPLE_RATE_MISMATCH,
ERR_CUE_IOERROR,
ERR_CUE_MISSING_TAG,
ERR_DUPLICATE_FILE,
ERR_OUTPUT_IS_INPUT,
ERR_NO_OUTPUT_FILE,
ERR_UNSUPPORTED_AUDIO_TYPE,
ERR_UNSUPPORTED_COMPRESSION_MODE,
ERR_ENCODING_ERROR)
#first, check the error conditions
self.assertEqual(
self.__run_app__(["trackcat", "-o", "fail.flac"]), 1)
self.__check_error__(ERR_FILES_REQUIRED)
self.assertEqual(
self.__run_app__(["trackcat", "-o", "fail.flac",
self.track1.filename,
self.track2.filename,
self.track3.filename,
self.track4.filename]), 1)
self.__check_error__(ERR_BPS_MISMATCH)
self.assertEqual(
self.__run_app__(["trackcat", "-o", "fail.flac",
self.track1.filename,
self.track2.filename,
self.track3.filename,
self.track5.filename]), 1)
self.__check_error__(ERR_CHANNEL_COUNT_MISMATCH)
self.assertEqual(
self.__run_app__(["trackcat", "-o", "fail.flac",
self.track1.filename,
self.track2.filename,
self.track3.filename,
self.track6.filename]), 1)
self.__check_error__(ERR_SAMPLE_RATE_MISMATCH)
self.assertEqual(
self.__run_app__(["trackcat", "--cue", "/dev/null/foo.cue",
"-o", "fail.flac",
self.track1.filename,
self.track2.filename,
self.track3.filename]), 1)
self.__check_error__(ERR_CUE_IOERROR)
self.assertEqual(
self.__run_app__(["trackcat", "--cue", self.invalid_cuesheet.name,
"-o", "fail.flac",
self.track1.filename,
self.track2.filename,
self.track3.filename]), 1)
self.__check_error__(ERR_CUE_MISSING_TAG % (1))
self.assertEqual(
self.__run_app__(["trackcat",
"-o", self.suffix_outfile.name,
self.track1.filename,
self.track1.filename]), 0)
self.__check_warning__(
ERR_DUPLICATE_FILE %
(audiotools.Filename(self.track1.filename),))
self.assertEqual(
self.__run_app__(["trackcat",
"-o", self.track1.filename,
self.track1.filename,
self.track2.filename,
self.track3.filename]), 1)
self.__check_error__(
ERR_OUTPUT_IS_INPUT %
(audiotools.Filename(self.track1.filename),))
#then, check the option combinations
#along with a few different output files and types
all_options = ["-t", "-q", "--cue", "-o"]
for (type,
quality,
outfile,
count,
options) in self.output_combinations(all_options):
if (os.path.isfile(outfile)):
f = open(outfile, "wb")
f.close()
options = self.populate_options(options,
type,
quality,
outfile) + \
[self.track1.filename,
self.track2.filename,
self.track3.filename]
#check a few common errors
if ("-o" not in options):
self.assertEqual(self.__run_app__(["trackcat"] + options),
1)
self.__check_error__(ERR_NO_OUTPUT_FILE)
continue
if ("-t" in options):
output_format = audiotools.TYPE_MAP[type]
else:
try:
output_format = audiotools.filename_to_type(outfile)
except audiotools.UnknownAudioType:
self.assertEqual(self.__run_app__(["trackcat"] +
options), 1)
self.__check_error__(
ERR_UNSUPPORTED_AUDIO_TYPE % (u"",))
continue
if (("-q" in options) and
(quality not in output_format.COMPRESSION_MODES)):
self.assertEqual(self.__run_app__(["trackcat"] + options),
1)
self.__check_error__(
ERR_UNSUPPORTED_COMPRESSION_MODE %
{"quality": quality,
"type": output_format.NAME.decode('ascii')})
continue
if (outfile.startswith("/dev/")):
self.assertEqual(self.__run_app__(["trackcat"] + options),
1)
self.__check_error__(
ERR_ENCODING_ERROR % (audiotools.Filename(outfile),))
continue
#check that no PCM data is lost
self.assertEqual(
self.__run_app__(["trackcat"] + options), 0)
new_track = audiotools.open(outfile)
self.assertEqual(new_track.NAME, output_format.NAME)
self.assertEqual(new_track.total_frames(), 793800)
self.assert_(audiotools.pcm_frame_cmp(
new_track.to_pcm(),
audiotools.PCMCat([track.to_pcm() for track in
[self.track1,
self.track2,
self.track3]])) is None)
#check that metadata is merged properly
metadata = new_track.get_metadata()
if (metadata is not None):
self.assertEqual(metadata.track_name, None)
self.assertEqual(metadata.album_name, u"Album")
self.assertEqual(metadata.artist_name, u"Artist")
self.assertEqual(metadata.track_number, None)
self.assertEqual(metadata.track_total, 3)
#check that the cuesheet is embedded properly
if (("--cue" in options) and
(output_format is audiotools.FlacAudio)):
cuesheet = new_track.get_cuesheet()
self.assert_(cuesheet is not None)
self.assertEqual(cuesheet.ISRCs(),
{1: 'JPPI00652340',
2: 'JPPI00652349',
3: 'JPPI00652341'})
self.assertEqual(list(cuesheet.indexes()),
[(0,), (225, 375), (675, 825)])
self.assertEqual(cuesheet.pcm_lengths(793800, 44100),
[220500, 264600, 308700])
@UTIL_TRACKCAT
def test_unicode(self):
for (input_filenames,
output_path,
cuesheet_file) in Possibilities(
#check filename arguments
[["track%d.flac" % (i) for i in range(3)],
[(u'abc\xe0\xe7\xe8\u3041\u3044\u3046-%d.flac' %
(i)).encode('utf-8') for i in range(3)]],
#check output filename argument
["output.flac",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046-out.flac'.encode('utf-8')],
#check --cue argument
[None,
"cuesheet.cue",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.cue'.encode('utf-8')]):
for input_filename in input_filenames:
if (os.path.isfile(input_filename)):
os.unlink(input_filename)
if (os.path.isfile(output_path)):
os.unlink(output_path)
if ((cuesheet_file is not None) and
os.path.isfile(cuesheet_file)):
os.unlink(cuesheet_file)
tracks = [audiotools.FlacAudio.from_pcm(
input_filename,
EXACT_BLANK_PCM_Reader(pcm_frames))
for (input_filename, pcm_frames) in
zip(input_filenames, [220500, 264600, 308700])]
if (cuesheet_file is not None):
f = open(cuesheet_file, "wb")
f.write('FILE "CDImage.wav" WAVE\r\n TRACK 01 AUDIO\r\n ISRC JPPI00652340\r\n INDEX 01 00:00:00\r\n TRACK 02 AUDIO\r\n ISRC JPPI00652349\r\n INDEX 00 00:03:00\r\n INDEX 01 00:05:00\r\n TRACK 03 AUDIO\r\n ISRC JPPI00652341\r\n INDEX 00 00:9:00\r\n INDEX 01 00:11:00\r\n')
f.close()
self.assertEqual(
self.__run_app__(
["trackcat"] + input_filenames +
([cuesheet_file] if cuesheet_file is not None else []) +
["--output", output_path]), 0)
self.assertEqual(
audiotools.pcm_frame_cmp(
audiotools.PCMCat([t.to_pcm() for t in tracks]),
audiotools.open(output_path).to_pcm()), None)
for input_filename in input_filenames:
if (os.path.isfile(input_filename)):
os.unlink(input_filename)
if (os.path.isfile(output_path)):
os.unlink(output_path)
if ((cuesheet_file is not None) and
os.path.isfile(cuesheet_file)):
os.unlink(cuesheet_file)
class trackcmp(UtilTest):
@UTIL_TRACKCMP
def setUp(self):
self.type = audiotools.FlacAudio
self.match_dir1 = tempfile.mkdtemp()
self.match_dir2 = tempfile.mkdtemp()
self.mismatch_dir1 = tempfile.mkdtemp()
self.mismatch_dir2 = tempfile.mkdtemp()
self.mismatch_dir3 = tempfile.mkdtemp()
self.match_file1 = tempfile.NamedTemporaryFile(
suffix="." + self.type.SUFFIX)
self.match_file2 = tempfile.NamedTemporaryFile(
suffix="." + self.type.SUFFIX)
self.mismatch_file = tempfile.NamedTemporaryFile(
suffix="." + self.type.SUFFIX)
self.broken_file = tempfile.NamedTemporaryFile(
suffix="." + self.type.SUFFIX)
self.type.from_pcm(self.match_file1.name,
BLANK_PCM_Reader(1))
self.type.from_pcm(self.match_file2.name,
BLANK_PCM_Reader(1))
self.type.from_pcm(self.mismatch_file.name,
RANDOM_PCM_Reader(1))
self.broken_file.write(open(self.match_file1.name, "rb").read()[0:-1])
self.broken_file.flush()
for i in xrange(1, 4):
track = self.type.from_pcm(
os.path.join(self.match_dir1,
"%2.2d.%s" % (i, self.type.SUFFIX)),
BLANK_PCM_Reader(i * 2))
track.set_metadata(audiotools.MetaData(track_number=i))
track = self.type.from_pcm(
os.path.join(self.match_dir2,
"%2.2d.%s" % (i, self.type.SUFFIX)),
BLANK_PCM_Reader(i * 2))
track.set_metadata(audiotools.MetaData(track_number=i))
track = self.type.from_pcm(
os.path.join(self.mismatch_dir1,
"%2.2d.%s" % (i, self.type.SUFFIX)),
RANDOM_PCM_Reader(i * 2))
track.set_metadata(audiotools.MetaData(track_number=i))
for i in xrange(1, 3):
track = self.type.from_pcm(
os.path.join(self.mismatch_dir2,
"%2.2d.%s" % (i, self.type.SUFFIX)),
BLANK_PCM_Reader(i * 2))
track.set_metadata(audiotools.MetaData(track_number=i))
for i in xrange(1, 5):
track = self.type.from_pcm(
os.path.join(self.mismatch_dir3,
"%2.2d.%s" % (i, self.type.SUFFIX)),
BLANK_PCM_Reader(i * 2))
track.set_metadata(audiotools.MetaData(track_number=i))
@UTIL_TRACKCMP
def tearDown(self):
for directory in [self.match_dir1,
self.match_dir2,
self.mismatch_dir1,
self.mismatch_dir2,
self.mismatch_dir3]:
for f in os.listdir(directory):
os.unlink(os.path.join(directory, f))
os.rmdir(directory)
self.match_file1.close()
self.match_file2.close()
self.mismatch_file.close()
@UTIL_TRACKCMP
def test_combinations(self):
from audiotools.text import (LAB_TRACKCMP_CMP,
LAB_TRACKCMP_MISMATCH,
LAB_TRACKCMP_TYPE_MISMATCH,
LAB_TRACKCMP_OK,
LAB_TRACKCMP_MISSING)
#check matching file against maching file
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal",
self.match_file1.name, self.match_file2.name]),
0)
#check matching file against itself
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal",
self.match_file1.name, self.match_file1.name]),
0)
#check matching file against mismatching file
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal",
self.match_file1.name, self.mismatch_file.name]),
1)
self.__check_info__(
(LAB_TRACKCMP_CMP %
{"file1":audiotools.Filename(self.match_file1.name),
"file2":audiotools.Filename(self.mismatch_file.name)}) +
u" : " +
(LAB_TRACKCMP_MISMATCH %
{"frame_number": 1}))
#(ANSI output won't be generated because stdout isn't a TTY)
#check matching file against missing file
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal",
self.match_file1.name, "/dev/null/foo"]),
1)
self.__check_error__(
(LAB_TRACKCMP_CMP %
{"file1":audiotools.Filename(self.match_file1.name),
"file2":audiotools.Filename("/dev/null/foo")}) +
u" : " + LAB_TRACKCMP_TYPE_MISMATCH)
#check matching file against broken file
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal",
self.match_file1.name, self.broken_file.name]),
1)
self.__check_error__(u"EOF reading frame")
#check file against directory
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal",
self.match_file1.name, self.match_dir1]),
1)
self.__check_error__(
(LAB_TRACKCMP_CMP %
{"file1":audiotools.Filename(self.match_file1.name),
"file2":audiotools.Filename(self.match_dir1)}) +
u" : " + LAB_TRACKCMP_TYPE_MISMATCH)
#check directory against file
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal",
self.match_dir1, self.match_file1.name]),
1)
self.__check_error__(
(LAB_TRACKCMP_CMP %
{"file1":audiotools.Filename(self.match_dir1),
"file2":audiotools.Filename(self.match_file1.name)}) +
u" : " + LAB_TRACKCMP_TYPE_MISMATCH)
#check matching directory against matching directory
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal", "-j", "1",
self.match_dir1, self.match_dir2]),
0)
for i in xrange(1, 4):
self.__check_info__(
audiotools.output_progress(
(LAB_TRACKCMP_CMP %
{"file1":audiotools.Filename(
os.path.join(self.match_dir1,
"%2.2d.%s" %
(i, self.type.SUFFIX))),
"file2":audiotools.Filename(
os.path.join(self.match_dir2,
"%2.2d.%s" %
(i, self.type.SUFFIX)))}) +
u" : " +
LAB_TRACKCMP_OK,
i, 3))
#check matching directory against itself
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal", "-j", "1",
self.match_dir1, self.match_dir1]),
0)
for i in xrange(1, 4):
self.__check_info__(
audiotools.output_progress(
(LAB_TRACKCMP_CMP %
{"file1":audiotools.Filename(
os.path.join(self.match_dir1,
"%2.2d.%s" %
(i, self.type.SUFFIX))),
"file2":audiotools.Filename(
os.path.join(self.match_dir1,
"%2.2d.%s" %
(i, self.type.SUFFIX)))}) +
u" : " +
LAB_TRACKCMP_OK,
i, 3))
#check matching directory against mismatching directory
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal", "-j", "1",
self.match_dir1, self.mismatch_dir1]),
1)
for i in xrange(1, 4):
self.__check_info__(
audiotools.output_progress(
(LAB_TRACKCMP_CMP %
{"file1":audiotools.Filename(
os.path.join(self.match_dir1,
"%2.2d.%s" %
(i, self.type.SUFFIX))),
"file2":audiotools.Filename(
os.path.join(self.mismatch_dir1,
"%2.2d.%s" %
(i, self.type.SUFFIX)))}) +
u" : " +
(LAB_TRACKCMP_MISMATCH %
{"frame_number": 1}),
i, 3))
#check matching directory against directory missing file
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal", "-j", "1",
self.match_dir1, self.mismatch_dir2]),
1)
self.__check_info__(
os.path.join(self.mismatch_dir2,
"track %2.2d" % (3)) +
u" : " +
LAB_TRACKCMP_MISSING)
for i in xrange(1, 3):
self.__check_info__(
audiotools.output_progress(
(LAB_TRACKCMP_CMP %
{"file1":audiotools.Filename(
os.path.join(self.match_dir1,
"%2.2d.%s" %
(i, self.type.SUFFIX))),
"file2":audiotools.Filename(
os.path.join(self.mismatch_dir2,
"%2.2d.%s" %
(i, self.type.SUFFIX)))}) +
u" : " +
LAB_TRACKCMP_OK,
i, 2))
#check matching directory against directory with extra file
self.assertEqual(
self.__run_app__(["trackcmp", "-V", "normal", "-j", "1",
self.match_dir1, self.mismatch_dir3]),
1)
self.__check_info__(
os.path.join(self.match_dir1,
"track %2.2d" % (4)) +
u" : " +
LAB_TRACKCMP_MISSING)
for i in xrange(1, 4):
self.__check_info__(
audiotools.output_progress(
(LAB_TRACKCMP_CMP %
{"file1":audiotools.Filename(
os.path.join(self.match_dir1,
"%2.2d.%s" %
(i, self.type.SUFFIX))),
"file2":audiotools.Filename(
os.path.join(self.mismatch_dir3,
"%2.2d.%s" %
(i, self.type.SUFFIX)))}) +
u" : " +
LAB_TRACKCMP_OK,
i, 3))
@UTIL_TRACKCMP
def test_unicode(self):
for (file1, file2) in Possibilities(
["file1.flac",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046-1.flac'.encode('utf-8')],
["file2.flac",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046-2.flac'.encode('utf-8')]):
if (os.path.isfile(file1)):
os.unlink(file1)
if (os.path.isfile(file2)):
os.unlink(file2)
track1 = audiotools.FlacAudio.from_pcm(
file1,
BLANK_PCM_Reader(1))
track2 = audiotools.FlacAudio.from_pcm(
file2,
BLANK_PCM_Reader(1))
self.assertEqual(
self.__run_app__(
["trackcmp", file1, file2]), 0)
self.assertEqual(
audiotools.pcm_frame_cmp(
track1.to_pcm(),
track2.to_pcm()), None)
if (os.path.isfile(file1)):
os.unlink(file1)
if (os.path.isfile(file2)):
os.unlink(file2)
class trackinfo(UtilTest):
METADATA_FORMATS = (audiotools.FlacAudio,
audiotools.OggFlacAudio,
audiotools.MP3Audio,
audiotools.MP2Audio,
audiotools.VorbisAudio,
audiotools.AiffAudio,
audiotools.M4AAudio,
audiotools.ALACAudio,
audiotools.WavPackAudio)
@UTIL_TRACKINFO
def setUp(self):
self.metadata_files = [
tempfile.NamedTemporaryFile(suffix="." + format.SUFFIX)
for format in self.METADATA_FORMATS]
self.metadata_tracks = [
format.from_pcm(file.name, BLANK_PCM_Reader(1))
for (file, format) in zip(self.metadata_files,
self.METADATA_FORMATS)]
metadata = audiotools.MetaData(track_name=u"a",
track_number=1,
track_total=2,
album_name=u"b",
artist_name=u"c",
comment=u"d")
for track in self.metadata_tracks:
track.set_metadata(metadata)
@UTIL_TRACKINFO
def tearDown(self):
for file in self.metadata_files:
file.close()
@UTIL_TRACKINFO
def test_trackinfo(self):
import re
import StringIO
from audiotools.text import (LAB_TRACKINFO_CHANNELS,
LAB_TRACKINFO_CHANNEL,
MASK_FRONT_LEFT,
MASK_FRONT_RIGHT)
all_options = ["-n", "-L", "-b", "-%", "-C"]
for track in self.metadata_tracks:
for count in xrange(1, len(all_options) + 1):
for options in Combinations(all_options, count):
self.assertEqual(
self.__run_app__(
["trackinfo"] + options + [track.filename]), 0)
#check the initial output line
line = self.stdout.readline()
if ("-b" in options):
self.assert_(
re.match(r'\s*\d+ kbps: %s\n' %
(track.filename), line) is not None)
elif ("-%" in options):
self.assert_(
re.match(r'\s*\d+%%: %s\n' %
(track.filename), line) is not None)
else:
self.assert_(
re.match(r'\d+:\d+ 2ch 44.1kHz 16-bit: %s\n' %
(track.filename), line) is not None)
#check metadata/low-level metadata if -n not present
if ("-n" not in options):
if ("-L" not in options):
for line in StringIO.StringIO(
unicode(track.get_metadata())):
self.__check_output__(line.rstrip('\r\n'))
else:
for line in StringIO.StringIO(
track.get_metadata().raw_info()):
self.__check_output__(line.rstrip('\r\n'))
if ("-C" in options):
self.__check_output__(u"")
else:
#no metadata display at all
pass
#check channel assignment if -C present
if ("-C" in options):
self.__check_output__(LAB_TRACKINFO_CHANNELS)
self.__check_output__(
LAB_TRACKINFO_CHANNEL %
{"channel_number":1,
"channel_name":MASK_FRONT_LEFT})
self.__check_output__(
LAB_TRACKINFO_CHANNEL %
{"channel_number":2,
"channel_name":MASK_FRONT_RIGHT})
@UTIL_TRACKINFO
def test_unicode(self):
for filename in [
"track.flac",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.flac'.encode('utf-8')]:
if (os.path.isfile(filename)):
os.unlink(filename)
track = audiotools.FlacAudio.from_pcm(
filename,
BLANK_PCM_Reader(1))
self.assertEqual(
self.__run_app__(["trackinfo", filename]), 0)
if (os.path.isfile(filename)):
os.unlink(filename)
class tracklength(UtilTest):
@UTIL_TRACKLENGTH
def setUp(self):
pass
@UTIL_TRACKLENGTH
def tearDown(self):
pass
@UTIL_TRACKLENGTH
def test_tracklength(self):
import shutil
from audiotools.text import (LAB_TRACKLENGTH_FILE_FORMAT,
LAB_TRACKLENGTH_FILE_COUNT,
LAB_TRACKLENGTH_FILE_LENGTH,
LAB_TRACKLENGTH_FILE_SIZE,
LAB_TRACKLENGTH)
track1 = audiotools.open("1s.flac")
track2 = audiotools.open("1m.flac")
track3 = audiotools.open("1h.flac")
self.assertEqual(track1.seconds_length(), 1)
self.assertEqual(track2.seconds_length(), 60)
self.assertEqual(track3.seconds_length(), 60 * 60)
self.assertEqual(self.__run_app__(["tracklength", "1s.flac"]), 0)
self.__check_output__(u"%6s %5s %7s %4s" %
(LAB_TRACKLENGTH_FILE_FORMAT,
LAB_TRACKLENGTH_FILE_COUNT,
LAB_TRACKLENGTH_FILE_LENGTH,
LAB_TRACKLENGTH_FILE_SIZE))
self.__check_output__(u"%s %s %s %s" %
(u"-" * 6,
u"-" * 5,
u"-" * 7,
u"-" * 4))
self.__check_output__(u"%6s %5s %7s %4s" %
(u"flac",
1,
LAB_TRACKLENGTH % {"hours":0,
"minutes":0,
"seconds":1},
380))
self.assertEqual(self.__run_app__(["tracklength", "1s.flac",
"1s.flac"]), 0)
self.__check_output__(u"%6s %5s %7s %4s" %
(LAB_TRACKLENGTH_FILE_FORMAT,
LAB_TRACKLENGTH_FILE_COUNT,
LAB_TRACKLENGTH_FILE_LENGTH,
LAB_TRACKLENGTH_FILE_SIZE))
self.__check_output__(u"%s %s %s %s" %
(u"-" * 6,
u"-" * 5,
u"-" * 7,
u"-" * 4))
self.__check_output__(u"%6s %5s %7s %4s" %
(u"flac",
2,
LAB_TRACKLENGTH % {"hours":0,
"minutes":0,
"seconds":2},
760))
self.assertEqual(self.__run_app__(["tracklength", "1s.flac",
"1m.flac"]), 0)
self.__check_output__(u"%6s %5s %7s %4s" %
(LAB_TRACKLENGTH_FILE_FORMAT,
LAB_TRACKLENGTH_FILE_COUNT,
LAB_TRACKLENGTH_FILE_LENGTH,
LAB_TRACKLENGTH_FILE_SIZE))
self.__check_output__(u"%s %s %s %s" %
(u"-" * 6,
u"-" * 5,
u"-" * 7,
u"-" * 4))
self.__check_output__(u"%6s %5s %7s %4s" %
(u"flac",
2,
LAB_TRACKLENGTH % {"hours":0,
"minutes":1,
"seconds":1},
u"9.8K"))
self.assertEqual(self.__run_app__(["tracklength", "1s.flac",
"1m.flac", "1m.flac"]), 0)
self.__check_output__(u"%6s %5s %7s %5s" %
(LAB_TRACKLENGTH_FILE_FORMAT,
LAB_TRACKLENGTH_FILE_COUNT,
LAB_TRACKLENGTH_FILE_LENGTH,
LAB_TRACKLENGTH_FILE_SIZE))
self.__check_output__(u"%s %s %s %s" %
(u"-" * 6,
u"-" * 5,
u"-" * 7,
u"-" * 5))
self.__check_output__(u"%6s %5s %7s %5s" %
(u"flac",
3,
LAB_TRACKLENGTH % {"hours":0,
"minutes":2,
"seconds":1},
u"19.1K"))
self.assertEqual(self.__run_app__(["tracklength", "1s.flac",
"1m.flac", "1h.flac"]), 0)
self.__check_output__(u"%6s %5s %7s %5s" %
(LAB_TRACKLENGTH_FILE_FORMAT,
LAB_TRACKLENGTH_FILE_COUNT,
LAB_TRACKLENGTH_FILE_LENGTH,
LAB_TRACKLENGTH_FILE_SIZE))
self.__check_output__(u"%s %s %s %s" %
(u"-" * 6,
u"-" * 5,
u"-" * 7,
u"-" * 5))
self.__check_output__(u"%6s %5s %7s %5s" %
(u"flac",
3,
LAB_TRACKLENGTH % {"hours":1,
"minutes":1,
"seconds":1},
u"22.5K"))
self.assertEqual(self.__run_app__(["tracklength", "1s.flac",
"1m.flac", "1h.flac",
"1h.flac"]), 0)
self.__check_output__(u"%6s %5s %7s %5s" %
(LAB_TRACKLENGTH_FILE_FORMAT,
LAB_TRACKLENGTH_FILE_COUNT,
LAB_TRACKLENGTH_FILE_LENGTH,
LAB_TRACKLENGTH_FILE_SIZE))
self.__check_output__(u"%s %s %s %s" %
(u"-" * 6,
u"-" * 5,
u"-" * 7,
u"-" * 5))
self.__check_output__(u"%6s %5s %7s %5s" %
(u"flac",
4,
LAB_TRACKLENGTH % {"hours":2,
"minutes":1,
"seconds":1},
u"35.3K"))
tempdir = tempfile.mkdtemp()
try:
shutil.copy(track1.filename, tempdir)
shutil.copy(track2.filename, tempdir)
shutil.copy(track3.filename, tempdir)
self.assertEqual(self.__run_app__(["tracklength", tempdir]), 0)
self.__check_output__(u"%6s %5s %7s %5s" %
(LAB_TRACKLENGTH_FILE_FORMAT,
LAB_TRACKLENGTH_FILE_COUNT,
LAB_TRACKLENGTH_FILE_LENGTH,
LAB_TRACKLENGTH_FILE_SIZE))
self.__check_output__(u"%s %s %s %s" %
(u"-" * 6,
u"-" * 5,
u"-" * 7,
u"-" * 5))
self.__check_output__(u"%6s %5s %7s %5s" %
(u"flac",
3,
LAB_TRACKLENGTH % {"hours":1,
"minutes":1,
"seconds":1},
u"22.5K"))
finally:
for f in os.listdir(tempdir):
os.unlink(os.path.join(tempdir, f))
os.rmdir(tempdir)
@UTIL_TRACKLENGTH
def test_unicode(self):
for filename in [
"track.flac",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.flac'.encode('utf-8')]:
if (os.path.isfile(filename)):
os.unlink(filename)
track = audiotools.FlacAudio.from_pcm(
filename,
BLANK_PCM_Reader(1))
self.assertEqual(
self.__run_app__(["tracklength", filename]), 0)
if (os.path.isfile(filename)):
os.unlink(filename)
class tracklint(UtilTest):
@UTIL_TRACKLINT
def setUp(self):
pass
@UTIL_TRACKLINT
def tearDown(self):
pass
@UTIL_TRACKLINT
def test_vorbis(self):
for audio_class in [audiotools.OggFlacAudio,
audiotools.VorbisAudio]:
bad_vorbiscomment = audiotools.VorbisComment(
[u"TITLE=Track Name ",
u"TRACKNUMBER=02",
u"DISCNUMBER=003",
u"ARTIST= Some Artist",
u"CATALOG=",
u"YEAR= ",
u"COMMENT= Some Comment "],
u"Vendor String")
fixed = audiotools.MetaData(
track_name=u"Track Name",
track_number=2,
album_number=3,
artist_name=u"Some Artist",
comment=u"Some Comment")
self.assertNotEqual(fixed, bad_vorbiscomment)
tempdir = tempfile.mkdtemp()
tempmp = os.path.join(tempdir, "track.%s" % (audio_class.SUFFIX))
undo = os.path.join(tempdir, "undo.db")
try:
track = audio_class.from_pcm(tempmp, BLANK_PCM_Reader(10))
track.set_metadata(bad_vorbiscomment)
metadata = track.get_metadata()
if (isinstance(metadata, audiotools.FlacMetaData)):
metadata = metadata.get_block(
audiotools.flac.Flac_VORBISCOMMENT.BLOCK_ID)
self.assertEqual(metadata, bad_vorbiscomment)
for (key, value) in metadata.items():
self.assertEqual(value, bad_vorbiscomment[key])
original_checksum = md5()
f = open(track.filename, 'rb')
audiotools.transfer_data(f.read, original_checksum.update)
f.close()
subprocess.call(["tracklint",
"-V", "quiet",
"--fix", "--db=%s" % (undo),
track.filename])
metadata = track.get_metadata()
self.assertNotEqual(metadata, bad_vorbiscomment)
self.assertEqual(metadata, fixed)
subprocess.call(["tracklint",
"-V", "quiet",
"--undo", "--db=%s" % (undo),
track.filename])
metadata = track.get_metadata()
if (isinstance(metadata, audiotools.FlacMetaData)):
metadata = metadata.get_block(
audiotools.flac.Flac_VORBISCOMMENT.BLOCK_ID)
self.assertEqual(metadata, bad_vorbiscomment)
self.assertNotEqual(metadata, fixed)
for (key, value) in metadata.items():
self.assertEqual(value, bad_vorbiscomment[key])
finally:
for f in os.listdir(tempdir):
os.unlink(os.path.join(tempdir, f))
os.rmdir(tempdir)
@UTIL_TRACKLINT
def test_flac1(self):
#copy the test track to a temporary location
tempflac = tempfile.NamedTemporaryFile(suffix=".flac")
try:
f = open("flac-id3.flac", "rb")
audiotools.transfer_data(f.read, tempflac.write)
f.close()
tempflac.flush()
tempflac.seek(0, 0)
self.assertEqual(tempflac.read(3), "ID3")
tempflac.seek(-0x80, 2)
self.assertEqual(tempflac.read(3), "TAG")
#ensure that FLACs tagged with ID3v2/ID3v1 comments are scrubbed
self.assertEqual(self.__run_app__(
["tracklint", "-V", "quiet", "--fix", tempflac.name]), 0)
flac = audiotools.open(tempflac.name)
md5sum = md5()
pcm = flac.to_pcm()
audiotools.transfer_framelist_data(pcm, md5sum.update)
pcm.close()
self.assertEqual(md5sum.hexdigest(),
"9a0ab096c517a627b0ab5a0b959e5f36")
finally:
tempflac.close()
@UTIL_TRACKLINT
def test_flac2(self):
#copy the test track to a temporary location
tempflac = tempfile.NamedTemporaryFile(suffix=".flac")
try:
f = open("flac-disordered.flac", "rb")
audiotools.transfer_data(f.read, tempflac.write)
f.close()
tempflac.flush()
tempflac.seek(0, 0)
self.assertEqual(tempflac.read(4), 'fLaC')
self.assertNotEqual(ord(tempflac.read(1)) & 0x07, 0)
#ensure that FLACs with improper metadata ordering are reordered
self.assertEqual(self.__run_app__(
["tracklint", "-V", "quiet", "--fix", tempflac.name]), 0)
flac = audiotools.open(tempflac.name)
md5sum = md5()
pcm = flac.to_pcm()
audiotools.transfer_framelist_data(pcm, md5sum.update)
pcm.close()
self.assertEqual(md5sum.hexdigest(),
"9a0ab096c517a627b0ab5a0b959e5f36")
finally:
tempflac.close()
@UTIL_TRACKLINT
def test_flac3(self):
#create a small temporary flac
tempflacfile = tempfile.NamedTemporaryFile(suffix=".flac")
try:
tempflac = audiotools.FlacAudio.from_pcm(
tempflacfile.name,
BLANK_PCM_Reader(3))
#build an image from metadata
image = audiotools.Image.new(TEST_COVER1, u"Description", 0)
good_mime_type = image.mime_type
good_width = image.width
good_height = image.height
good_depth = image.color_depth
good_count = image.color_count
good_description = image.description
good_type = image.type
#update image with bogus fields
image.width = 0
image.height = 0
image.color_depth = 0
image.color_count = 0
image.mime_type = u'img/jpg'
#tag flac with bogus fields image
metadata = tempflac.get_metadata()
metadata.add_image(image)
tempflac.set_metadata(metadata)
#ensure bogus fields stick
image = tempflac.get_metadata().images()[0]
self.assertEqual(image.width, 0)
self.assertEqual(image.height, 0)
self.assertEqual(image.color_depth, 0)
self.assertEqual(image.color_count, 0)
self.assertEqual(image.mime_type, u'img/jpg')
#fix flac with tracklint
self.assertEqual(
self.__run_app__(
["tracklint", "-V", "quiet", tempflac.filename, "--fix"]),
0)
#ensure bogus fields are fixed
tempflac = audiotools.open(tempflacfile.name)
image = tempflac.get_metadata().images()[0]
self.assertEqual(image.width, good_width)
self.assertEqual(image.height, good_height)
self.assertEqual(image.color_depth, good_depth)
self.assertEqual(image.color_count, good_count)
self.assertEqual(image.mime_type, good_mime_type)
self.assertEqual(image.description, good_description)
self.assertEqual(image.type, good_type)
finally:
tempflacfile.close()
@UTIL_TRACKLINT
def test_flac4(self):
#create a small temporary FLAC
tempflacfile = tempfile.NamedTemporaryFile(suffix=".flac")
try:
#update it with the data from "flac-nonmd5.flac"
f = open("flac-nonmd5.flac", "rb")
audiotools.transfer_data(f.read, tempflacfile.write)
f.close()
tempflacfile.flush()
#ensure MD5SUM is empty
tempflac = audiotools.open(tempflacfile.name)
self.assertEqual(tempflac.__md5__, chr(0) * 16)
#ensure file verifies okay
self.assertEqual(tempflac.verify(), True)
#fix FLAC with tracklint
self.assertEqual(
self.__run_app__(
["tracklint", "-V", "quiet", tempflac.filename, "--fix"]),
0)
#ensure file's new MD5SUM matches its actual MD5SUM
tempflac2 = audiotools.open(tempflacfile.name)
self.assertEqual(tempflac2.__md5__,
"\xd2\xb1\x20\x19\x90\x19\xb6\x39" +
"\xd5\xa7\xe2\xb3\x46\x3e\x9c\x97")
self.assertEqual(tempflac2.verify(), True)
finally:
tempflacfile.close()
@UTIL_TRACKLINT
def test_apev2(self):
for audio_class in [audiotools.WavPackAudio]:
bad_apev2 = audiotools.ApeTag(
[audiotools.ape.ApeTagItem(0, False, "Title", "Track Name "),
audiotools.ape.ApeTagItem(0, False, "Track", "02"),
audiotools.ape.ApeTagItem(0, False, "Artist", " Some Artist"),
audiotools.ape.ApeTagItem(0, False, "Catalog", ""),
audiotools.ape.ApeTagItem(0, False, "Year", " "),
audiotools.ape.ApeTagItem(0, False, "Comment", " Some Comment ")])
fixed = audiotools.MetaData(
track_name=u"Track Name",
track_number=2,
artist_name=u"Some Artist",
comment=u"Some Comment")
self.assertNotEqual(fixed, bad_apev2)
tempdir = tempfile.mkdtemp()
tempmp = os.path.join(tempdir, "track.%s" % (audio_class.SUFFIX))
undo = os.path.join(tempdir, "undo.db")
try:
track = audio_class.from_pcm(tempmp, BLANK_PCM_Reader(10))
track.set_metadata(bad_apev2)
metadata = track.get_metadata()
self.assertEqual(metadata, bad_apev2)
for key in metadata.keys():
self.assertEqual(metadata[key].data, bad_apev2[key].data)
original_checksum = md5()
f = open(track.filename, 'rb')
audiotools.transfer_data(f.read, original_checksum.update)
f.close()
subprocess.call(["tracklint",
"-V", "quiet",
"--fix", "--db=%s" % (undo),
track.filename])
metadata = track.get_metadata()
self.assertNotEqual(metadata, bad_apev2)
self.assertEqual(metadata, fixed)
subprocess.call(["tracklint",
"-V", "quiet",
"--undo", "--db=%s" % (undo),
track.filename])
metadata = track.get_metadata()
self.assertEqual(metadata, bad_apev2)
self.assertNotEqual(metadata, fixed)
for tag in metadata.tags:
self.assertEqual(tag.data, bad_apev2[tag.key].data)
finally:
for f in os.listdir(tempdir):
os.unlink(os.path.join(tempdir, f))
os.rmdir(tempdir)
def __id3_text__(self, bad_id3v2):
fixed = audiotools.MetaData(
track_name=u"Track Name",
track_number=2,
album_number=3,
artist_name=u"Some Artist",
comment=u"Some Comment")
self.assertNotEqual(fixed, bad_id3v2)
tempdir = tempfile.mkdtemp()
tempmp = os.path.join(tempdir, "track.%s" % \
(audiotools.MP3Audio.SUFFIX))
undo = os.path.join(tempdir, "undo.db")
try:
track = audiotools.MP3Audio.from_pcm(
tempmp,
BLANK_PCM_Reader(10))
track.update_metadata(bad_id3v2)
metadata = track.get_metadata()
self.assertEqual(metadata, bad_id3v2)
for (key, value) in metadata.items():
self.assertEqual(value, bad_id3v2[key])
original_checksum = md5()
f = open(track.filename, 'rb')
audiotools.transfer_data(f.read, original_checksum.update)
f.close()
subprocess.call(["tracklint",
"-V", "quiet",
"--fix", "--db=%s" % (undo),
track.filename])
metadata = track.get_metadata()
self.assertNotEqual(metadata, bad_id3v2)
self.assertEqual(metadata, fixed)
subprocess.call(["tracklint",
"-V", "quiet",
"--undo", "--db=%s" % (undo),
track.filename])
metadata = track.get_metadata()
self.assertEqual(metadata, bad_id3v2)
self.assertNotEqual(metadata, fixed)
for (key, value) in metadata.items():
self.assertEqual(value, bad_id3v2[key])
finally:
for f in os.listdir(tempdir):
os.unlink(os.path.join(tempdir, f))
os.rmdir(tempdir)
def __id3_images__(self, metadata_class, bad_image, fixed_image):
temp_file = tempfile.NamedTemporaryFile(
suffix="." + audiotools.MP3Audio.SUFFIX)
try:
temp_track = audiotools.MP3Audio.from_pcm(
temp_file.name,
BLANK_PCM_Reader(5))
metadata = metadata_class([])
metadata.add_image(bad_image)
temp_track.set_metadata(metadata)
#first, ensure that the bad_image's fields stick
bad_image2 = temp_track.get_metadata().images()[0]
for attr in ["data", "mime_type", "width", "height",
"color_depth", "color_count", "description",
"type"]:
self.assertEqual(getattr(bad_image2, attr),
getattr(bad_image, attr))
#fix the track with tracklint
self.assertEqual(self.__run_app__(
["tracklint", "-V", "quiet", "--fix", temp_file.name]),
0)
temp_track = audiotools.open(temp_file.name)
#then, ensure that the good fields are now in place
good_image = temp_track.get_metadata().images()[0]
for attr in ["data", "mime_type", "width", "height",
"color_depth", "color_count", "description",
"type"]:
self.assertEqual(getattr(good_image, attr),
getattr(fixed_image, attr))
finally:
temp_file.close()
@UTIL_TRACKLINT
def test_id3v22(self):
self.__id3_text__(
audiotools.ID3v22Comment(
[audiotools.id3.ID3v22_T__Frame.converted(
"TT2", u"Track Name "),
audiotools.id3.ID3v22_T__Frame.converted(
"TRK", u"02"),
audiotools.id3.ID3v22_T__Frame.converted(
"TPA", u"003"),
audiotools.id3.ID3v22_T__Frame.converted(
"TP1", u" Some Artist\u0000"),
audiotools.id3.ID3v22_T__Frame.converted(
"TRC", u""),
audiotools.id3.ID3v22_T__Frame.converted(
"TYE", u""),
audiotools.id3.ID3v22_COM_Frame.converted(
"COM", u" Some Comment ")]))
#ID3v2.2 doesn't store most image fields internally
#so there's little point in testing them for inaccuracies
@UTIL_TRACKLINT
def test_id3v23(self):
self.__id3_text__(
audiotools.ID3v23Comment(
[audiotools.id3.ID3v23_T___Frame.converted(
"TIT2", u"Track Name "),
audiotools.id3.ID3v23_T___Frame.converted(
"TRCK", u"02"),
audiotools.id3.ID3v23_T___Frame.converted(
"TPOS", u"003"),
audiotools.id3.ID3v23_T___Frame.converted(
"TPE1", u" Some Artist\u0000"),
audiotools.id3.ID3v23_T___Frame.converted(
"TYER", u""),
audiotools.id3.ID3v23_T___Frame.converted(
"TCOP", u""),
audiotools.id3.ID3v23_COMM_Frame.converted(
"COMM", u" Some Comment ")]))
good_image = audiotools.Image.new(TEST_COVER1, u"Description", 0)
bad_image = audiotools.Image.new(TEST_COVER1, u"Description", 0)
#ID3v2.3 only stores MIME type internally
#the rest are derived
bad_image.width = 500
bad_image.height = 500
bad_image.color_depth = 24
bad_image.color_count = 0
bad_image.mime_type = u'img/jpg'
self.__id3_images__(audiotools.ID3v23Comment,
bad_image,
good_image)
@UTIL_TRACKLINT
def test_id3v24(self):
self.__id3_text__(
audiotools.ID3v24Comment(
[audiotools.id3.ID3v24_T___Frame.converted(
"TIT2", u"Track Name "),
audiotools.id3.ID3v24_T___Frame.converted(
"TRCK", u"02"),
audiotools.id3.ID3v24_T___Frame.converted(
"TPOS", u"003"),
audiotools.id3.ID3v24_T___Frame.converted(
"TPE1", u" Some Artist\u0000"),
audiotools.id3.ID3v24_T___Frame.converted(
"TYER", u""),
audiotools.id3.ID3v24_T___Frame.converted(
"TCOP", u""),
audiotools.id3.ID3v24_COMM_Frame.converted(
"COMM", u" Some Comment ")]))
good_image = audiotools.Image.new(TEST_COVER1, u"Description", 0)
bad_image = audiotools.Image.new(TEST_COVER1, u"Description", 0)
#ID3v2.4 only stores MIME type internally
#the rest are derived
bad_image.width = 500
bad_image.height = 500
bad_image.color_depth = 24
bad_image.color_count = 0
bad_image.mime_type = u'img/jpg'
self.__id3_images__(audiotools.ID3v24Comment,
bad_image,
good_image)
@UTIL_TRACKLINT
def test_mp3(self):
from audiotools.text import (ERR_ENCODING_ERROR)
track_file = tempfile.NamedTemporaryFile(
suffix="." + audiotools.MP3Audio.SUFFIX)
track_file_stat = os.stat(track_file.name)[0]
undo_db_dir = tempfile.mkdtemp()
undo_db = os.path.join(undo_db_dir, "undo.db")
try:
track = audiotools.MP3Audio.from_pcm(track_file.name,
BLANK_PCM_Reader(5))
track.set_metadata(audiotools.MetaData(
track_name=u"Track Name ",
track_number=1))
if (track.get_metadata() is not None):
#writable undo DB, unwritable file
os.chmod(track.filename,
track_file_stat & 0x7555)
self.assertEqual(self.__run_app__(
["tracklint", "--fix", "--db", undo_db,
track.filename]), 1)
self.__check_error__(ERR_ENCODING_ERROR %
(audiotools.Filename(track.filename),))
#no undo DB, unwritable file
self.assertEqual(self.__run_app__(
["tracklint", "--fix", track.filename]), 1)
self.__check_error__(ERR_ENCODING_ERROR %
(audiotools.Filename(track.filename),))
finally:
os.chmod(track_file.name, track_file_stat)
track_file.close()
for p in [os.path.join(undo_db_dir, f) for f in
os.listdir(undo_db_dir)]:
os.unlink(p)
os.rmdir(undo_db_dir)
@UTIL_TRACKLINT
def test_m4a(self):
from audiotools.m4a import M4A_Tree_Atom
from audiotools.m4a import M4A_META_Atom
from audiotools.m4a import M4A_HDLR_Atom
from audiotools.m4a import M4A_ILST_Leaf_Atom
from audiotools.m4a import M4A_ILST_Unicode_Data_Atom
from audiotools.m4a import M4A_ILST_TRKN_Data_Atom
from audiotools.m4a import M4A_ILST_DISK_Data_Atom
from audiotools.m4a import M4A_FREE_Atom
bad_m4a = M4A_META_Atom(
0, 0,
[M4A_HDLR_Atom(0, 0, '\x00\x00\x00\x00',
'mdir', 'appl', 0, 0, '', 0),
M4A_Tree_Atom(
'ilst',
[M4A_ILST_Leaf_Atom(
'\xa9nam',
[M4A_ILST_Unicode_Data_Atom(
0, 1,
'Track Name ')]),
M4A_ILST_Leaf_Atom(
'\xa9ART',
[M4A_ILST_Unicode_Data_Atom(
0, 1,
' Some Artist')]),
M4A_ILST_Leaf_Atom(
'cprt',
[M4A_ILST_Unicode_Data_Atom(
0, 1,
'')]),
M4A_ILST_Leaf_Atom(
'\xa9day',
[M4A_ILST_Unicode_Data_Atom(
0, 1,
' ')]),
M4A_ILST_Leaf_Atom(
'\xa9cmt',
[M4A_ILST_Unicode_Data_Atom(
0, 1,
' Some Comment ')]),
M4A_ILST_Leaf_Atom(
'trkn',
[M4A_ILST_TRKN_Data_Atom(2, 0)]),
M4A_ILST_Leaf_Atom(
'disk',
[M4A_ILST_DISK_Data_Atom(3, 0)])]),
M4A_FREE_Atom(1024)])
fixed = audiotools.MetaData(
track_name=u"Track Name",
track_number=2,
track_total=None,
album_number=3,
album_total=None,
artist_name=u"Some Artist",
comment=u"Some Comment")
self.assertNotEqual(fixed, bad_m4a)
for audio_class in [audiotools.M4AAudio,
audiotools.ALACAudio]:
tempdir = tempfile.mkdtemp()
tempmp = os.path.join(tempdir, "track.%s" % (audio_class.SUFFIX))
undo = os.path.join(tempdir, "undo.db")
try:
track = audio_class.from_pcm(
tempmp,
BLANK_PCM_Reader(10))
track.update_metadata(bad_m4a)
metadata = track.get_metadata()
self.assertEqual(metadata, bad_m4a)
for leaf in metadata.ilst_atom():
self.assertEqual(leaf, bad_m4a.ilst_atom()[leaf.name])
original_checksum = md5()
f = open(track.filename, 'rb')
audiotools.transfer_data(f.read, original_checksum.update)
f.close()
subprocess.call(["tracklint",
"-V", "quiet",
"--fix", "--db=%s" % (undo),
track.filename])
metadata = track.get_metadata()
self.assertNotEqual(metadata, bad_m4a)
self.assertEqual(metadata, fixed)
subprocess.call(["tracklint",
"-V", "quiet",
"--undo", "--db=%s" % (undo),
track.filename])
metadata = track.get_metadata()
self.assertEqual(metadata, bad_m4a)
self.assertNotEqual(metadata, fixed)
for leaf in metadata.ilst_atom():
self.assertEqual(leaf, bad_m4a.ilst_atom()[leaf.name])
finally:
for f in os.listdir(tempdir):
os.unlink(os.path.join(tempdir, f))
os.rmdir(tempdir)
@UTIL_TRACKLINT
def test_modtime1(self):
import stat
for audio_class in audiotools.AVAILABLE_TYPES:
track_file = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
try:
track = audio_class.from_pcm(track_file.name,
BLANK_PCM_Reader(5))
metadata = audiotools.MetaData(
track_name="Track Name",
track_number=1,
track_total=2)
track.set_metadata(metadata)
if (track.get_metadata() is not None):
orig_stat = os.stat(track.filename)
time.sleep(1)
#should make no metadata changes
self.assertEqual(self.__run_app__(
["tracklint", "--fix", track.filename]), 0)
self.assertEqual(track.get_metadata(),
metadata)
new_stat = os.stat(track.filename)
for field in [stat.ST_MODE,
stat.ST_INO,
stat.ST_DEV,
stat.ST_NLINK,
stat.ST_UID,
stat.ST_GID,
stat.ST_SIZE,
stat.ST_MTIME,
stat.ST_CTIME]:
self.assertEqual(orig_stat[field], new_stat[field])
finally:
track_file.close()
@UTIL_TRACKLINT
def test_modtime2(self):
import stat
for audio_class in audiotools.AVAILABLE_TYPES:
track_file = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
undo_db = tempfile.NamedTemporaryFile(
suffix=".db")
try:
track = audio_class.from_pcm(track_file.name,
BLANK_PCM_Reader(5))
metadata = audiotools.MetaData(
track_name="Track Name",
track_number=1,
track_total=2)
track.set_metadata(metadata)
if (track.get_metadata() is not None):
orig_stat = os.stat(track.filename)
time.sleep(1)
#should make no metadata changes
self.assertEqual(self.__run_app__(
["tracklint", "--db", undo_db.name,
"--fix", track.filename]), 0)
self.assertEqual(track.get_metadata(),
metadata)
new_stat = os.stat(track.filename)
for field in [stat.ST_MODE,
stat.ST_INO,
stat.ST_DEV,
stat.ST_NLINK,
stat.ST_UID,
stat.ST_GID,
stat.ST_SIZE,
stat.ST_MTIME,
stat.ST_CTIME]:
self.assertEqual(orig_stat[field], new_stat[field])
finally:
undo_db.close()
track_file.close()
@UTIL_TRACKLINT
def test_unicode(self):
for (input_filename,
backup_database) in Possibilities(
["track.flac",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.flac'.encode('utf-8')],
["undo.db",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.db'.encode('utf-8')]):
if (os.path.isfile(input_filename)):
os.unlink(input_filename)
if (os.path.isfile(backup_database)):
os.unlink(backup_database)
track = audiotools.FlacAudio.from_pcm(
input_filename,
BLANK_PCM_Reader(1))
metadata = track.get_metadata()
metadata.track_name = u"Track Name "
track.update_metadata(metadata)
self.assertEqual(
audiotools.open(input_filename).get_metadata().track_name,
u"Track Name ")
self.assertEqual(
self.__run_app__(["tracklint",
"--fix",
"--db", backup_database,
input_filename]), 0)
self.assertEqual(
audiotools.open(input_filename).get_metadata().track_name,
u"Track Name")
self.assertEqual(
self.__run_app__(["tracklint",
"--undo",
"--db", backup_database,
input_filename]), 0)
self.assertEqual(
audiotools.open(input_filename).get_metadata().track_name,
u"Track Name ")
if (os.path.isfile(input_filename)):
os.unlink(input_filename)
if (os.path.isfile(backup_database)):
os.unlink(backup_database)
@UTIL_TRACKLINT
def test_errors1(self):
from audiotools.text import (ERR_NO_UNDO_DB,
ERR_OPEN_IOERROR,
ERR_ENCODING_ERROR)
for audio_class in audiotools.AVAILABLE_TYPES:
track_file = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
track_file_stat = os.stat(track_file.name)[0]
undo_db_dir = tempfile.mkdtemp()
undo_db = os.path.join(undo_db_dir, "undo.db")
try:
track = audio_class.from_pcm(track_file.name,
BLANK_PCM_Reader(5))
track.set_metadata(audiotools.MetaData(
track_name=u"Track Name ",
track_number=1,
track_total=2))
#general-purpose errors
self.assertEqual(self.__run_app__(
["tracklint", "--undo", track.filename]), 1)
self.__check_error__(ERR_NO_UNDO_DB)
self.assertEqual(self.__run_app__(
["tracklint", "--fix", "--db", "/dev/null/foo.db",
track.filename]), 1)
self.__check_error__(
ERR_OPEN_IOERROR %
(audiotools.Filename("/dev/null/foo.db"),))
self.assertEqual(self.__run_app__(
["tracklint", "--undo", "--db", "/dev/null/foo.db",
track.filename]), 1)
self.__check_error__(
ERR_OPEN_IOERROR %
(audiotools.Filename("/dev/null/foo.db"),))
if (track.get_metadata() is not None):
#unwritable undo DB, writable file
self.assertEqual(self.__run_app__(
["tracklint", "--fix", "--db", "/dev/null/undo.db",
track.filename]), 1)
self.__check_error__(
ERR_OPEN_IOERROR %
(audiotools.Filename("/dev/null/undo.db"),))
self.assertEqual(self.__run_app__(
["tracklint", "--undo", "--db",
"/dev/null/undo.db",
track.filename]), 1)
self.__check_error__(
ERR_OPEN_IOERROR %
(audiotools.Filename("/dev/null/undo.db"),))
#unwritable undo DB, unwritable file
os.chmod(track.filename, track_file_stat & 0x7555)
self.assertEqual(self.__run_app__(
["tracklint", "--fix", "--db", "/dev/null/undo.db",
track.filename]), 1)
self.__check_error__(
ERR_OPEN_IOERROR %
(audiotools.Filename("/dev/null/undo.db"),))
self.assertEqual(self.__run_app__(
["tracklint", "--undo", "--db",
"/dev/null/undo.db",
track.filename]), 1)
self.__check_error__(
ERR_OPEN_IOERROR %
(audiotools.Filename("/dev/null/undo.db"),))
#restore from DB to unwritable file
os.chmod(track.filename, track_file_stat)
self.assertEqual(self.__run_app__(
["tracklint", "--fix", "--db", undo_db,
track.filename]), 0)
os.chmod(track.filename, track_file_stat & 0x7555)
self.assertEqual(self.__run_app__(
["tracklint", "--undo", "--db", undo_db,
track.filename]), 1)
self.__check_error__(
ERR_ENCODING_ERROR %
(audiotools.Filename(track.filename),))
finally:
os.chmod(track_file.name, track_file_stat)
track_file.close()
for p in [os.path.join(undo_db_dir, f) for f in
os.listdir(undo_db_dir)]:
os.unlink(p)
os.rmdir(undo_db_dir)
@UTIL_TRACKLINT
def test_errors2(self):
from audiotools.text import (ERR_ENCODING_ERROR)
for audio_class in audiotools.AVAILABLE_TYPES:
track_file = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
track_file_stat = os.stat(track_file.name)[0]
undo_db_dir = tempfile.mkdtemp()
undo_db = os.path.join(undo_db_dir, "undo.db")
try:
track = audio_class.from_pcm(track_file.name,
BLANK_PCM_Reader(5))
track.set_metadata(audiotools.MetaData(
track_name=u"Track Name ",
track_number=1))
if (track.get_metadata() is not None):
#writable undo DB, unwritable file
os.chmod(track.filename,
track_file_stat & 0x7555)
self.assertEqual(self.__run_app__(
["tracklint", "--fix", "--db", undo_db,
track.filename]), 1)
self.__check_error__(
ERR_ENCODING_ERROR %
(audiotools.Filename(track.filename),))
finally:
os.chmod(track_file.name, track_file_stat)
track_file.close()
for p in [os.path.join(undo_db_dir, f) for f in
os.listdir(undo_db_dir)]:
os.unlink(p)
os.rmdir(undo_db_dir)
class tracktag(UtilTest):
@UTIL_TRACKTAG
def setUp(self):
track_file_base = tempfile.NamedTemporaryFile()
self.initial_metadata = audiotools.MetaData(
track_name=u"Name 1",
track_number=1,
track_total=2,
album_name=u"Album 1",
artist_name=u"Artist 1",
album_number=3,
album_total=4,
ISRC=u'ABCD00000000',
comment=u"Comment 1")
self.image = audiotools.Image.new(TEST_COVER1, u"", 0)
self.initial_metadata.add_image(self.image)
track_base = audiotools.FlacAudio.from_pcm(
track_file_base.name,
BLANK_PCM_Reader(1))
track_base.set_metadata(self.initial_metadata)
self.track_data = open(track_base.filename, 'rb').read()
track_file_base.close()
self.track_file = tempfile.NamedTemporaryFile()
self.comment_file = tempfile.NamedTemporaryFile(suffix=".txt")
self.comment_file.write("Comment File")
self.comment_file.flush()
@UTIL_TRACKTAG
def tearDown(self):
self.track_file.close()
self.comment_file.close()
def populate_options(self, options):
populated = []
for option in sorted(options):
if (option == '--name'):
populated.append(option)
populated.append("Name 3")
elif (option == '--artist'):
populated.append(option)
populated.append("Artist 3")
elif (option == '--album'):
populated.append(option)
populated.append("Album 3")
elif (option == '--number'):
populated.append(option)
populated.append("5")
elif (option == '--track-total'):
populated.append(option)
populated.append("6")
elif (option == '--album-number'):
populated.append(option)
populated.append("7")
elif (option == '--album-total'):
populated.append(option)
populated.append("8")
elif (option == '--comment'):
populated.append(option)
populated.append("Comment 3")
elif (option == '--comment-file'):
populated.append(option)
populated.append(self.comment_file.name)
else:
populated.append(option)
return populated
@UTIL_TRACKTAG
def test_options(self):
from audiotools.text import (ERR_DUPLICATE_FILE,)
#start out with a bit of sanity checking
f = open(self.track_file.name, 'wb')
f.write(self.track_data)
f.close()
track = audiotools.open(self.track_file.name)
track.verify()
metadata = track.get_metadata()
self.assertEqual(metadata.images(),
[self.image])
#Why not test all of tracktag's options?
#The trouble is that it has 30 metadata-specific options
#and the set of all possible combinations from 1 to 30 options
#literally numbers in the millions.
#Since most of those options are straight text,
#we'll restrict the tests to the more interesting ones
#which is still over 8000 different option combinations.
most_options = ['-r', '--name', '--number', '--track-total',
'--album-number', '--comment', '--comment-file']
#ensure tagging the same file twice triggers an error
self.assertEqual(self.__run_app__(
["tracktag", "--name=Test",
self.track_file.name, self.track_file.name]), 1)
self.__check_error__(ERR_DUPLICATE_FILE %
(audiotools.Filename(self.track_file.name),))
for count in xrange(1, len(most_options) + 1):
for options in Combinations(most_options, count):
f = open(self.track_file.name, 'wb')
f.write(self.track_data)
f.close()
options = self.populate_options(options)
self.assertEqual(
self.__run_app__(["tracktag"] +
options +
[self.track_file.name]), 0)
track = audiotools.open(self.track_file.name)
track.verify()
metadata = track.get_metadata()
if ("--name" in options):
self.assertEqual(metadata.track_name, u"Name 3")
elif ("-r" in options):
self.assertEqual(metadata.track_name, None)
else:
self.assertEqual(metadata.track_name, u"Name 1")
if ("--artist" in options):
self.assertEqual(metadata.artist_name, u"Artist 3")
elif ("-r" in options):
self.assertEqual(metadata.artist_name, None)
else:
self.assertEqual(metadata.artist_name, u"Artist 1")
if ("--album" in options):
self.assertEqual(metadata.album_name, u"Album 3")
elif ("-r" in options):
self.assertEqual(metadata.album_name, None)
else:
self.assertEqual(metadata.album_name, u"Album 1")
if ("--number" in options):
self.assertEqual(metadata.track_number, 5)
elif ("-r" in options):
self.assertEqual(metadata.track_number, None)
else:
self.assertEqual(metadata.track_number, 1)
if ("--track-total" in options):
self.assertEqual(metadata.track_total, 6)
elif ("-r" in options):
self.assertEqual(metadata.track_total, None)
else:
self.assertEqual(metadata.track_total, 2)
if ("--album-number" in options):
self.assertEqual(metadata.album_number, 7)
elif ("-r" in options):
self.assertEqual(metadata.album_number, None)
else:
self.assertEqual(metadata.album_number, 3)
if ("--album-total" in options):
self.assertEqual(metadata.album_total, 8)
elif ("-r" in options):
self.assertEqual(metadata.album_total, None)
else:
self.assertEqual(metadata.album_total, 4)
if ("--comment-file" in options):
self.assertEqual(metadata.comment, u"Comment File")
elif ("--comment" in options):
self.assertEqual(metadata.comment, u"Comment 3")
elif ("-r" in options):
self.assertEqual(metadata.comment, None)
else:
self.assertEqual(metadata.comment, u"Comment 1")
if ("-r" in options):
self.assertEqual(metadata.ISRC, None)
else:
self.assertEqual(metadata.ISRC, u"ABCD00000000")
if ("--replay-gain" in options):
self.assert_(track.replay_gain() is not None)
@UTIL_TRACKTAG
def test_replaygain(self):
from audiotools.text import (RG_REPLAYGAIN_ADDED,
RG_REPLAYGAIN_APPLIED)
for audio_class in audiotools.AVAILABLE_TYPES:
if (audio_class.supports_replay_gain()):
track_file = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
try:
track = audio_class.from_pcm(
track_file.name,
BLANK_PCM_Reader(5))
self.assertEqual(
self.__run_app__(["tracktag", "--replay-gain",
track.filename]), 0)
if (audio_class.lossless_replay_gain()):
self.__check_info__(RG_REPLAYGAIN_ADDED)
track2 = audiotools.open(track_file.name)
self.assert_(track2.replay_gain() is not None)
else:
self.__check_info__(RG_REPLAYGAIN_APPLIED)
finally:
track_file.close()
@UTIL_TRACKTAG
def test_unicode(self):
for (input_filename,
(argument, attribute),
unicode_value) in Possibilities(
["track.flac", #check filename arguments
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.flac'.encode('utf-8')],
[("--name", "track_name"), #check text arguments
("--artist", "artist_name"),
("--album", "album_name"),
("--performer", "performer_name"),
("--composer", "composer_name"),
("--conductor", "conductor_name"),
("--catalog", "catalog"),
("--ISRC", "ISRC"),
("--publisher", "publisher"),
("--media-type", "media"),
("--year", "year"),
("--date", "date"),
("--copyright", "copyright"),
("--comment", "comment")],
[u"text",
u'value abc\xe0\xe7\xe8\u3041\u3044\u3046']):
self.assert_(isinstance(unicode_value, unicode))
if (os.path.isfile(input_filename)):
os.unlink(input_filename)
track = audiotools.FlacAudio.from_pcm(
input_filename,
BLANK_PCM_Reader(1))
self.assertEqual(
self.__run_app__(["tracktag",
argument,
unicode_value.encode('utf-8'),
input_filename]), 0)
set_value = getattr(audiotools.open(input_filename).get_metadata(),
attribute)
if (set_value is not None):
self.assertEqual(set_value, unicode_value)
if (os.path.isfile(input_filename)):
os.unlink(input_filename)
for (input_filename,
comment_filename) in Possibilities(
["track.flac", #check input filename arguments
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.flac'.encode('utf-8')],
["comment.txt", #check comment filename arguments
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.txt'.encode('utf-8')]):
if (os.path.isfile(input_filename)):
os.unlink(input_filename)
if (os.path.isfile(comment_filename)):
os.unlink(comment_filename)
track = audiotools.FlacAudio.from_pcm(
input_filename,
BLANK_PCM_Reader(1))
f = open(comment_filename, "wb")
f.write("Test Text")
f.close()
self.assertEqual(
self.__run_app__(["tracktag",
"--comment-file", comment_filename,
input_filename]), 0)
self.assertEqual(
audiotools.open(input_filename).get_metadata().comment,
u"Test Text")
if (os.path.isfile(input_filename)):
os.unlink(input_filename)
if (os.path.isfile(comment_filename)):
os.unlink(comment_filename)
class tracktag_errors(UtilTest):
@UTIL_TRACKTAG
def test_bad_options(self):
from audiotools.text import (ERR_OPEN_IOERROR,
ERR_ENCODING_ERROR,
ERR_TRACKTAG_COMMENT_IOERROR,
ERR_TRACKTAG_COMMENT_NOT_UTF8)
temp_comment = tempfile.NamedTemporaryFile(suffix=".txt")
temp_track_file = tempfile.NamedTemporaryFile(suffix=".flac")
temp_track_stat = os.stat(temp_track_file.name)[0]
try:
temp_track = audiotools.FlacAudio.from_pcm(
temp_track_file.name,
BLANK_PCM_Reader(5))
temp_track.set_metadata(audiotools.MetaData(track_name=u"Foo"))
self.assertEqual(self.__run_app__(
["tracktag", "--comment-file=/dev/null/foo.txt",
temp_track.filename]), 1)
self.__check_error__(ERR_TRACKTAG_COMMENT_IOERROR %
(audiotools.Filename("/dev/null/foo.txt"),))
temp_comment.write(
os.urandom(1024) + ((u"\uFFFD".encode('utf-8')) * 103))
temp_comment.flush()
self.assertEqual(self.__run_app__(
["tracktag", "--comment-file=%s" % (temp_comment.name),
temp_track.filename]), 1)
self.__check_error__(ERR_TRACKTAG_COMMENT_NOT_UTF8 %
(audiotools.Filename(temp_comment.name),))
os.chmod(temp_track_file.name, temp_track_stat & 07555)
self.assertEqual(self.__run_app__(
["tracktag", "--name=Bar",
temp_track.filename]), 1)
self.__check_error__(ERR_ENCODING_ERROR %
(audiotools.Filename(temp_track.filename),))
finally:
os.chmod(temp_track_file.name, temp_track_stat)
temp_track_file.close()
temp_comment.close()
@UTIL_TRACKTAG
def test_oversized_metadata(self):
for audio_class in [audiotools.FlacAudio,
audiotools.OggFlacAudio]:
tempflac = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
tempwv = tempfile.NamedTemporaryFile(
suffix="." + audiotools.WavPackAudio.SUFFIX)
big_text = tempfile.NamedTemporaryFile(suffix=".txt")
try:
flac = audio_class.from_pcm(
tempflac.name,
BLANK_PCM_Reader(5))
flac.set_metadata(audiotools.MetaData(track_name=u"Foo"))
big_text.write("QlpoOTFBWSZTWYmtEk8AgICBAKAAAAggADCAKRoBANIBAOLuSKcKEhE1okng".decode('base64').decode('bz2'))
big_text.flush()
orig_md5 = md5()
pcm = flac.to_pcm()
audiotools.transfer_framelist_data(pcm, orig_md5.update)
pcm.close()
#ensure that setting big text via tracktag
#doesn't break the file
subprocess.call(["tracktag", "-V", "quiet",
"--comment-file=%s" % (big_text.name),
flac.filename])
new_md5 = md5()
pcm = flac.to_pcm()
audiotools.transfer_framelist_data(pcm, new_md5.update)
pcm.close()
self.assertEqual(orig_md5.hexdigest(),
new_md5.hexdigest())
subprocess.call(["track2track", "-V", "quiet", "-t", "wv",
"-o", tempwv.name,
flac.filename])
wv = audiotools.open(tempwv.name)
self.assertEqual(flac, wv)
self.assertEqual(subprocess.call(
["tracktag", "-V", "quiet",
"--comment-file=%s" % (big_text.name),
wv.filename]), 0)
self.assert_(len(wv.get_metadata().comment) > 0)
subprocess.call(["track2track", "-V", "quiet",
"-t", audio_class.NAME, "-o",
flac.filename, wv.filename])
flac = audiotools.open(tempflac.name)
self.assertEqual(flac, wv)
finally:
tempflac.close()
tempwv.close()
big_text.close()
class NoMetaData(Exception):
pass
class tracktag_misc(UtilTest):
@UTIL_TRACKTAG
def test_text_options(self):
def number_fields_values(fields, metadata_class):
values = set([])
for field in audiotools.MetaData.INTEGER_FIELDS:
if (field in fields):
values.add(
(field,
audiotools.MetaData.INTEGER_FIELDS.index(
field) + 1))
else:
values.add((field, None))
return values
def deleted_number_fields_values(fields, metadata_class):
values = set([])
for field in audiotools.MetaData.INTEGER_FIELDS:
if (field not in fields):
values.add(
(field,
audiotools.MetaData.INTEGER_FIELDS.index(
field) + 1))
else:
values.add((field, None))
return values
def metadata_fields_values(metadata):
values = set([])
for field in audiotools.MetaData.INTEGER_FIELDS:
values.add((field, getattr(metadata, field)))
return values
for audio_type in audiotools.AVAILABLE_TYPES:
temp_file = tempfile.NamedTemporaryFile(
suffix="." + audio_type.SUFFIX)
try:
track = audio_type.from_pcm(temp_file.name,
BLANK_PCM_Reader(1))
for (field_name,
add_field,
remove_field) in zip(
['track_name',
'artist_name',
'performer_name',
'composer_name',
'conductor_name',
'album_name',
'catalog',
'ISRC',
'publisher',
'media',
'year',
'date',
'copyright',
'comment'],
['--name',
'--artist',
'--performer',
'--composer',
'--conductor',
'--album',
'--catalog',
'--ISRC',
'--publisher',
'--media-type',
'--year',
'--date',
'--copyright',
'--comment'],
['--remove-name',
'--remove-artist',
'--remove-performer',
'--remove-composer',
'--remove-conductor',
'--remove-album',
'--remove-catalog',
'--remove-ISRC',
'--remove-publisher',
'--remove-media-type',
'--remove-year',
'--remove-date',
'--remove-copyright',
'--remove-comment']):
self.assertEqual(
self.__run_app__(['tracktag', add_field, 'foo',
track.filename]), 0)
new_track = audiotools.open(track.filename)
metadata = new_track.get_metadata()
if (metadata is None):
break
elif (getattr(metadata, field_name) is not None):
self.assertEqual(getattr(metadata, field_name),
u'foo')
self.assertEqual(
self.__run_app__(['tracktag', remove_field,
track.filename]), 0)
metadata = audiotools.open(
track.filename).get_metadata()
self.assertEqual(
getattr(metadata, field_name),
None,
"remove option failed for %s field %s" %
(audio_type.NAME, remove_field))
number_fields = ['track_number',
'track_total',
'album_number',
'album_total']
try:
#make sure the number fields get set properly, if possible
for count in xrange(1, len(number_fields) + 1):
for fields in Combinations(number_fields, count):
self.assertEqual(
self.__run_app__(
["tracktag", '-r', track.filename] +
self.populate_set_number_fields(fields)),
0)
metadata = audiotools.open(
track.filename).get_metadata()
if (metadata is None):
raise NoMetaData()
self.assert_(
metadata_fields_values(metadata).issubset(
number_fields_values(
fields, metadata.__class__)),
"%s not subset of %s for fields %s" % (
metadata_fields_values(metadata),
number_fields_values(
fields, metadata.__class__),
repr(fields)))
#make sure the number fields get removed properly, also
number_metadata = audiotools.MetaData(track_number=1,
track_total=2,
album_number=3,
album_total=4)
for count in xrange(1, len(number_fields) + 1):
for fields in Combinations(number_fields, count):
audiotools.open(track.filename).set_metadata(
number_metadata)
self.assertEqual(
self.__run_app__(
["tracktag", track.filename] +
self.populate_delete_number_fields(fields)),
0)
metadata = audiotools.open(
track.filename).get_metadata()
self.assert_(
metadata_fields_values(metadata).issubset(
deleted_number_fields_values(
fields, metadata.__class__)),
"%s not subset of %s for options %s, fields %s, type %s" %
(metadata_fields_values(metadata),
deleted_number_fields_values(
fields, metadata.__class__),
self.populate_delete_number_fields(
fields),
fields,
audio_type.NAME))
except NoMetaData:
pass
finally:
temp_file.close()
def populate_set_number_fields(self, fields):
options = []
for field in fields:
if (field == 'track_number'):
options.append('--number')
options.append(str(1))
elif (field == 'track_total'):
options.append('--track-total')
options.append(str(2))
elif (field == 'album_number'):
options.append('--album-number')
options.append(str(3))
elif (field == 'album_total'):
options.append('--album-total')
options.append(str(4))
return options
def populate_delete_number_fields(self, fields):
options = []
for field in fields:
if (field == 'track_number'):
options.append('--remove-number')
elif (field == 'track_total'):
options.append('--remove-track-total')
elif (field == 'album_number'):
options.append('--remove-album-number')
elif (field == 'album_total'):
options.append('--remove-album-total')
return options
class covertag(UtilTest):
@UTIL_COVERTAG
def setUp(self):
track_file_base = tempfile.NamedTemporaryFile()
self.initial_metadata = audiotools.MetaData(
track_name=u"Name 1",
track_number=1,
track_total=2,
album_name=u"Album 1",
artist_name=u"Artist 1",
album_number=3,
album_total=4,
ISRC=u'ABCD00000000',
comment=u"Comment 1")
self.image = audiotools.Image.new(TEST_COVER1, u"", 0)
self.initial_metadata.add_image(self.image)
track_base = audiotools.FlacAudio.from_pcm(
track_file_base.name,
BLANK_PCM_Reader(1))
track_base.set_metadata(self.initial_metadata)
self.track_data = open(track_base.filename, 'rb').read()
track_file_base.close()
self.track_file = tempfile.NamedTemporaryFile()
self.front_cover1 = tempfile.NamedTemporaryFile(suffix=".png")
self.front_cover1.write(TEST_COVER4)
self.front_cover1.flush()
self.front_cover2 = tempfile.NamedTemporaryFile(suffix=".jpg")
self.front_cover2.write(TEST_COVER3)
self.front_cover2.flush()
self.back_cover = tempfile.NamedTemporaryFile(suffix=".png")
self.back_cover.write(TEST_COVER2)
self.back_cover.flush()
self.leaflet = tempfile.NamedTemporaryFile(suffix=".jpg")
self.leaflet.write(TEST_COVER1)
self.leaflet.flush()
self.media = tempfile.NamedTemporaryFile(suffix=".png")
self.media.write(TEST_COVER2)
self.media.flush()
self.other = tempfile.NamedTemporaryFile(suffix=".png")
self.other.write(TEST_COVER4)
self.other.flush()
self.front_cover1_image = audiotools.Image.new(
TEST_COVER4, u"", 0)
self.front_cover2_image = audiotools.Image.new(
TEST_COVER3, u"", 0)
self.back_cover_image = audiotools.Image.new(
TEST_COVER2, u"", 1)
self.leaflet_image = audiotools.Image.new(
TEST_COVER1, u"", 2)
self.media_image = audiotools.Image.new(
TEST_COVER2, u"", 3)
self.other_image = audiotools.Image.new(
TEST_COVER4, u"", 4)
@UTIL_COVERTAG
def tearDown(self):
self.track_file.close()
self.front_cover1.close()
self.front_cover2.close()
self.back_cover.close()
self.leaflet.close()
self.media.close()
self.other.close()
def populate_options(self, options):
populated = []
front_covers = [self.front_cover1.name, self.front_cover2.name]
for option in sorted(options):
if (option == '--front-cover'):
populated.append(option)
populated.append(front_covers.pop(0))
elif (option == '--back-cover'):
populated.append(option)
populated.append(self.back_cover.name)
elif (option == '--leaflet'):
populated.append(option)
populated.append(self.leaflet.name)
elif (option == '--media'):
populated.append(option)
populated.append(self.media.name)
elif (option == '--other-image'):
populated.append(option)
populated.append(self.other.name)
else:
populated.append(option)
return populated
@UTIL_COVERTAG
def test_options(self):
from audiotools.text import (ERR_DUPLICATE_FILE,)
#start out with a bit of sanity checking
f = open(self.track_file.name, 'wb')
f.write(self.track_data)
f.close()
track = audiotools.open(self.track_file.name)
track.verify()
metadata = track.get_metadata()
self.assertEqual(metadata.images(),
[self.image])
covertag_options = ['-r', '--front-cover', '--front-cover',
'--back-cover', '--leaflet', '--media',
'--other-image']
#ensure tagging the same file twice triggers an error
self.assertEqual(self.__run_app__(
["covertag", "--front-cover", self.front_cover1.name,
self.track_file.name, self.track_file.name]), 1)
self.__check_error__(ERR_DUPLICATE_FILE %
(audiotools.Filename(self.track_file.name),))
for count in xrange(1, len(covertag_options) + 1):
for options in Combinations(covertag_options, count):
f = open(self.track_file.name, 'wb')
f.write(self.track_data)
f.close()
options = self.populate_options(options)
self.assertEqual(
self.__run_app__(["covertag"] +
options +
[self.track_file.name]), 0)
track = audiotools.open(self.track_file.name)
track.verify()
metadata = track.get_metadata()
if ('-r' in options):
if (options.count('--front-cover') == 0):
self.assertEqual(metadata.front_covers(),
[])
elif (options.count('--front-cover') == 1):
self.assertEqual(metadata.front_covers(),
[self.front_cover1_image])
elif (options.count('--front-cover') == 2):
self.assertEqual(metadata.front_covers(),
[self.front_cover1_image,
self.front_cover2_image])
else:
if (options.count('--front-cover') == 0):
self.assertEqual(metadata.front_covers(),
[self.image])
elif (options.count('--front-cover') == 1):
self.assertEqual(metadata.front_covers(),
[self.image,
self.front_cover1_image])
elif (options.count('--front-cover') == 2):
self.assertEqual(metadata.front_covers(),
[self.image,
self.front_cover1_image,
self.front_cover2_image])
if ('--back-cover' in options):
self.assertEqual(metadata.back_covers(),
[self.back_cover_image])
else:
self.assertEqual(metadata.back_covers(),
[])
if ('--leaflet' in options):
self.assertEqual(metadata.leaflet_pages(),
[self.leaflet_image])
else:
self.assertEqual(metadata.leaflet_pages(),
[])
if ('--media' in options):
self.assertEqual(metadata.media_images(),
[self.media_image])
else:
self.assertEqual(metadata.media_images(),
[])
if ('--other-image' in options):
self.assertEqual(metadata.other_images(),
[self.other_image])
else:
self.assertEqual(metadata.other_images(),
[])
@UTIL_COVERTAG
def test_unicode(self):
from shutil import rmtree
for (file_path,
option,
image_path) in Possibilities(
["test.flac", #check filename arguments
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.flac'.encode('utf-8')],
["--front-cover",
"--back-cover",
"--leaflet",
"--media",
"--other-image"],
["image.jpg", #check image path arguments
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.jpg'.encode('utf-8')]):
if (os.path.isfile(file_path)):
os.unlink(file_path)
if (os.path.isfile(image_path)):
os.unlink(image_path)
track = audiotools.FlacAudio.from_pcm(
file_path,
BLANK_PCM_Reader(1))
f = open(image_path, "wb")
f.write(TEST_COVER1)
f.close()
self.assertEqual(
self.__run_app__(
["covertag", option, image_path, file_path]), 0)
self.assertEqual(
audiotools.open(file_path).get_metadata().images()[0].data,
TEST_COVER1)
if (os.path.isfile(file_path)):
os.unlink(file_path)
if (os.path.isfile(image_path)):
os.unlink(image_path)
class covertag_errors(UtilTest):
@UTIL_COVERTAG
def test_bad_options(self):
from audiotools.text import (ERR_OPEN_IOERROR,)
temp_track_file = tempfile.NamedTemporaryFile(suffix=".flac")
temp_track_stat = os.stat(temp_track_file.name)[0]
try:
temp_track = audiotools.FlacAudio.from_pcm(
temp_track_file.name,
BLANK_PCM_Reader(5))
self.assertEqual(self.__run_app__(
["covertag", "--front-cover=/dev/null/foo.jpg",
temp_track.filename]), 1)
self.__check_error__(
ERR_OPEN_IOERROR % (audiotools.Filename(u"/dev/null/foo.jpg"),))
finally:
os.chmod(temp_track_file.name, temp_track_stat)
temp_track_file.close()
@UTIL_COVERTAG
def test_oversized_metadata(self):
for audio_class in [audiotools.FlacAudio,
audiotools.OggFlacAudio]:
tempflac = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
big_bmp = tempfile.NamedTemporaryFile(suffix=".bmp")
try:
flac = audio_class.from_pcm(
tempflac.name,
BLANK_PCM_Reader(5))
flac.set_metadata(audiotools.MetaData(track_name=u"Foo"))
big_bmp.write(HUGE_BMP.decode('bz2'))
big_bmp.flush()
orig_md5 = md5()
pcm = flac.to_pcm()
audiotools.transfer_framelist_data(pcm, orig_md5.update)
pcm.close()
#ensure that setting a big image via covertag
#doesn't break the file
subprocess.call(["covertag", "-V", "quiet",
"--front-cover=%s" % (big_bmp.name),
flac.filename])
new_md5 = md5()
pcm = flac.to_pcm()
audiotools.transfer_framelist_data(pcm, new_md5.update)
pcm.close()
self.assertEqual(orig_md5.hexdigest(),
new_md5.hexdigest())
finally:
tempflac.close()
big_bmp.close()
class trackrename(UtilTest):
@UTIL_TRACKRENAME
def setUp(self):
self.type = audiotools.FlacAudio
self.format = "%(track_number)2.2d.%(suffix)s"
self.input_dir = tempfile.mkdtemp()
self.track_names = ["02 - name." + self.type.SUFFIX,
"name." + self.type.SUFFIX,
"02 - name." + self.type.SUFFIX,
"name." + self.type.SUFFIX]
self.track_metadata = [None,
None,
audiotools.MetaData(track_name=u"Track 1",
album_name=u"Album 1",
artist_name=u"Artist 1",
track_number=1),
audiotools.MetaData(track_name=u"Track 1",
album_name=u"Album 1",
artist_name=u"Artist 1",
track_number=1)]
@UTIL_TRACKRENAME
def tearDown(self):
for f in os.listdir(self.input_dir):
os.unlink(os.path.join(self.input_dir, f))
os.rmdir(self.input_dir)
def clean_input_directory(self):
for f in os.listdir(self.input_dir):
os.unlink(os.path.join(self.input_dir, f))
def populate_options(self, options):
populated = []
for option in options:
if (option == '--format'):
populated.append(option)
populated.append(self.format)
else:
populated.append(option)
return populated
@UTIL_TRACKRENAME
def test_options(self):
from audiotools.text import (LAB_ENCODE)
all_options = ["--format"]
for count in xrange(0, len(all_options) + 1):
for (name, metadata) in zip(self.track_names, self.track_metadata):
for options in Combinations(all_options, count):
options = self.populate_options(options)
self.clean_input_directory()
track = self.type.from_pcm(
os.path.join(self.input_dir, name),
BLANK_PCM_Reader(1))
if (metadata is not None):
track.set_metadata(metadata)
original_metadata = track.get_metadata()
track_data = open(track.filename, 'rb').read()
self.assertEqual(
self.__run_app__(["trackrename", "-V", "normal",
track.filename] + options), 0)
if ("--format" in options):
output_format = self.format
else:
output_format = None
if (metadata is not None):
base_metadata = metadata
else:
#track number via filename applies
#only if the file has no other metadata
if (name.startswith("02") and
(original_metadata is None)):
base_metadata = audiotools.MetaData(
track_number=2)
else:
base_metadata = None
destination_filename = os.path.join(
self.input_dir,
self.type.track_name(
file_path="",
track_metadata=base_metadata,
format=output_format))
self.__check_info__(
LAB_ENCODE %
{"source":
audiotools.Filename(track.filename),
"destination":
audiotools.Filename(destination_filename)})
#check that the file is identical
self.assertEqual(track_data,
open(destination_filename, 'rb').read())
@UTIL_TRACKRENAME
def test_duplicate(self):
from audiotools.text import (ERR_DUPLICATE_FILE,
ERR_DUPLICATE_OUTPUT_FILE,
)
name1 = "01 - name." + self.type.SUFFIX
name2 = "02 - name." + self.type.SUFFIX
track1 = self.type.from_pcm(
os.path.join(self.input_dir, name1),
BLANK_PCM_Reader(1))
track1.set_metadata(audiotools.MetaData(track_number=1))
track2 = self.type.from_pcm(
os.path.join(self.input_dir, name2),
BLANK_PCM_Reader(1))
track2.set_metadata(audiotools.MetaData(track_number=2))
self.assertEqual(
self.__run_app__(["trackrename", "-V", "normal",
"--format", self.format,
track1.filename, track1.filename]), 1)
self.__check_error__(
ERR_DUPLICATE_FILE %
(audiotools.Filename(track1.filename),))
self.assertEqual(
self.__run_app__(["trackrename", "-V", "normal",
"--format", "foo",
track1.filename, track2.filename]), 1)
self.__check_error__(
ERR_DUPLICATE_OUTPUT_FILE %
(audiotools.Filename(
os.path.join(
os.path.dirname(track1.filename), "foo")),))
@UTIL_TRACKRENAME
def test_errors(self):
from audiotools.text import (ERR_FILES_REQUIRED,
ERR_UNKNOWN_FIELD,
LAB_SUPPORTED_FIELDS,
)
tempdir = tempfile.mkdtemp()
tempdir_stat = os.stat(tempdir)[0]
track = self.type.from_pcm(
os.path.join(tempdir, "01 - track.%s" % (self.type.SUFFIX)),
BLANK_PCM_Reader(1))
track.set_metadata(audiotools.MetaData(track_name=u"Name",
track_number=1,
album_name=u"Album"))
try:
self.assertEqual(self.__run_app__(["trackrename"]), 1)
self.__check_error__(ERR_FILES_REQUIRED)
self.assertEqual(self.__run_app__(
["trackrename", "--format=%(foo)s", track.filename]), 1)
self.__check_error__(ERR_UNKNOWN_FIELD % ("foo"))
self.__check_info__(LAB_SUPPORTED_FIELDS)
for field in sorted(audiotools.MetaData.FIELDS + \
("album_track_number", "suffix")):
if (field == 'track_number'):
self.__check_info__(u"%(track_number)2.2d")
else:
self.__check_info__(u"%%(%s)s" % (field))
self.__check_info__(u"%(basename)s")
if (track.get_metadata() is not None):
os.chmod(tempdir, tempdir_stat & 0x7555)
self.assertEqual(self.__run_app__(
["trackrename",
'--format=%(album_name)s/%(track_number)2.2d - %(track_name)s.%(suffix)s',
track.filename]), 1)
self.__check_error__(
u"[Errno 13] Permission denied: \'%s\'" % \
(audiotools.Filename(
os.path.join(
os.path.dirname(track.filename), "Album")),))
self.assertEqual(self.__run_app__(
["trackrename",
'--format=%(track_number)2.2d - %(track_name)s.%(suffix)s',
track.filename]), 1)
finally:
os.chmod(tempdir, tempdir_stat)
os.unlink(track.filename)
os.rmdir(tempdir)
@UTIL_TRACKRENAME
def test_unicode(self):
for (file_path,
format_string) in Possibilities(
["file.flac",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.flac'.encode('utf-8')],
["new_file.flac",
u'abc\xe0\xe7\xe8\u3041\u3044\u3046-2.flac'.encode('utf-8')]):
if (os.path.isfile(file_path)):
os.unlink(file_path)
if (os.path.isfile(format_string)):
os.unlink(format_string)
track = audiotools.FlacAudio.from_pcm(
file_path,
BLANK_PCM_Reader(1))
self.assertEqual(os.path.isfile(file_path), True)
self.assertEqual(os.path.isfile(format_string), False)
self.assertEqual(
self.__run_app__(
["trackrename", "--format", format_string, file_path]), 0)
self.assertEqual(os.path.isfile(file_path), False)
self.assertEqual(os.path.isfile(format_string), True)
if (os.path.isfile(file_path)):
os.unlink(file_path)
if (os.path.isfile(format_string)):
os.unlink(format_string)
class tracksplit(UtilTest):
@UTIL_TRACKSPLIT
def setUp(self):
self.type = audiotools.FlacAudio
self.quality = "1"
self.output_dir = tempfile.mkdtemp()
self.format = "%(track_number)2.2d.%(suffix)s"
self.unsplit_file = tempfile.NamedTemporaryFile(suffix=".flac")
self.unsplit_metadata = audiotools.MetaData(
track_name=u"Track 1",
track_number=1,
track_total=2,
album_number=4,
album_total=5,
album_name=u"Album 1",
artist_name=u"Artist 1",
performer_name=u"Performer 1")
self.cuesheet = tempfile.NamedTemporaryFile(suffix=".cue")
self.cuesheet.write('FILE "CDImage.wav" WAVE\r\n TRACK 01 AUDIO\r\n ISRC JPPI00652340\r\n INDEX 01 00:00:00\r\n TRACK 02 AUDIO\r\n ISRC JPPI00652349\r\n INDEX 00 00:03:00\r\n INDEX 01 00:05:00\r\n TRACK 03 AUDIO\r\n ISRC JPPI00652341\r\n INDEX 00 00:9:00\r\n INDEX 01 00:11:00\r\n')
self.cuesheet.flush()
self.cuesheet2 = tempfile.NamedTemporaryFile(suffix=".cue")
self.cuesheet2.write('FILE "CDImage.wav" WAVE\r\n TRACK 01 AUDIO\r\n ISRC ABCD00000001\r\n INDEX 01 00:00:00\r\n TRACK 02 AUDIO\r\n ISRC ABCD00000002\r\n INDEX 00 00:03:00\r\n INDEX 01 00:05:00\r\n TRACK 03 AUDIO\r\n ISRC ABCD00000003\r\n INDEX 00 00:9:00\r\n INDEX 01 00:11:00\r\n')
self.cuesheet2.flush()
self.cuesheet3 = tempfile.NamedTemporaryFile(suffix=".cue")
self.cuesheet3.write('FILE "CDImage.wav" WAVE\r\n TRACK 01 AUDIO\r\n ISRC JPPI00652340\r\n INDEX 01 00:00:00\r\n')
self.cuesheet3.flush()
self.unsplit_file2 = tempfile.NamedTemporaryFile(suffix=".flac")
self.stream = test_streams.Sine16_Stereo(793800, 44100,
8820.0, 0.70,
4410.0, 0.29, 1.0)
self.cwd_dir = tempfile.mkdtemp()
self.original_dir = os.getcwd()
os.chdir(self.cwd_dir)
self.unwritable_dir = tempfile.mkdtemp()
os.chmod(self.unwritable_dir, 0)
@UTIL_TRACKSPLIT
def tearDown(self):
os.chdir(self.original_dir)
self.unsplit_file.close()
self.unsplit_file2.close()
self.cuesheet.close()
self.cuesheet2.close()
self.cuesheet3.close()
for f in os.listdir(self.output_dir):
os.unlink(os.path.join(self.output_dir, f))
os.rmdir(self.output_dir)
for f in os.listdir(self.cwd_dir):
os.unlink(os.path.join(self.cwd_dir, f))
os.rmdir(self.cwd_dir)
os.chmod(self.unwritable_dir, 0700)
os.rmdir(self.unwritable_dir)
def clean_output_dirs(self):
for f in os.listdir(self.output_dir):
os.unlink(os.path.join(self.output_dir, f))
for f in os.listdir(self.cwd_dir):
os.unlink(os.path.join(self.cwd_dir, f))
def populate_options(self, options):
populated = ["--no-musicbrainz", "--no-freedb"]
for option in sorted(options):
if (option == '-t'):
populated.append(option)
populated.append(self.type.NAME)
elif (option == '-q'):
populated.append(option)
populated.append(self.quality)
elif (option == '-d'):
populated.append(option)
populated.append(self.output_dir)
elif (option == '--format'):
populated.append(option)
populated.append(self.format)
elif (option == '--cue'):
populated.append(option)
populated.append(self.cuesheet.name)
else:
populated.append(option)
return populated
@UTIL_TRACKSPLIT
def test_options_no_embedded_cue(self):
from audiotools.text import (ERR_UNSUPPORTED_COMPRESSION_MODE,
ERR_TRACKSPLIT_NO_CUESHEET)
all_options = ["--cue", "-t", "-q", "-d", "--format"]
self.stream.reset()
track = self.type.from_pcm(self.unsplit_file.name, self.stream)
track.set_metadata(self.unsplit_metadata)
for count in xrange(1, len(all_options) + 1):
for options in Combinations(all_options, count):
self.clean_output_dirs()
options = self.populate_options(options)
if ("-t" in options):
output_type = audiotools.FlacAudio
else:
output_type = audiotools.TYPE_MAP[audiotools.DEFAULT_TYPE]
if (("-q" in options) and
("1" not in output_type.COMPRESSION_MODES)):
self.assertEqual(
self.__run_app__(["tracksplit", "-V", "normal",
"--no-freedb", "--no-musicbrainz"] +
options + [track.filename]), 1)
self.__check_error__(
ERR_UNSUPPORTED_COMPRESSION_MODE %
{"quality": "1",
"type": output_type.NAME})
continue
if ("--cue" not in options):
self.assertEqual(
self.__run_app__(["tracksplit", "-V", "normal",
"--no-freedb", "--no-musicbrainz"] +
options + [track.filename]), 1)
self.__check_error__(ERR_TRACKSPLIT_NO_CUESHEET)
continue
self.assertEqual(
self.__run_app__(["tracksplit", "-V", "normal",
"--no-freedb", "--no-musicbrainz"] +
options + [track.filename]), 0)
if ("--format" in options):
output_format = self.format
else:
output_format = None
if ("-d" in options):
output_dir = self.output_dir
else:
output_dir = "."
base_metadata = audiotools.MetaData(
track_total=3,
album_number=4,
album_total=5,
album_name=u"Album 1",
artist_name=u"Artist 1",
performer_name=u"Performer 1")
output_filenames = []
for i in xrange(3):
base_metadata.track_number = i + 1
output_filenames.append(
output_type.track_name(
file_path="",
track_metadata=base_metadata,
format=output_format))
#check that the output is being generated correctly
for (i, path) in enumerate(output_filenames):
self.__check_info__(
audiotools.output_progress(
u"%(source)s -> %(destination)s" %
{"source":
audiotools.Filename(track.filename),
"destination":
audiotools.Filename(
os.path.join(output_dir, path))},
i + 1, len(output_filenames)))
#make sure no track data has been lost
output_tracks = [
audiotools.open(os.path.join(output_dir, filename))
for filename in output_filenames]
self.stream.reset()
self.assert_(
audiotools.pcm_frame_cmp(
audiotools.PCMCat([t.to_pcm() for t in output_tracks]),
self.stream) is None)
#make sure metadata fits our expectations
for i in xrange(len(output_tracks)):
metadata = output_tracks[i].get_metadata()
if (metadata is not None):
self.assertEqual(metadata.track_name, None)
self.assertEqual(metadata.album_name, u"Album 1")
self.assertEqual(metadata.artist_name, u"Artist 1")
self.assertEqual(metadata.track_number, i + 1)
self.assertEqual(metadata.track_total, 3)
self.assertEqual(metadata.album_number, 4)
self.assertEqual(metadata.album_total, 5)
self.assertEqual(metadata.performer_name,
u"Performer 1")
if ("--cue" in options):
for (i, ISRC) in enumerate([u"JPPI00652340",
u"JPPI00652349",
u"JPPI00652341"]):
metadata = output_tracks[i].get_metadata()
if (metadata is not None):
self.assertEqual(metadata.ISRC, ISRC)
@UTIL_TRACKSPLIT
def test_options_embedded_cue(self):
from audiotools.text import (ERR_UNSUPPORTED_COMPRESSION_MODE,
LAB_ENCODE,
)
all_options = ["--cue", "-t", "-q", "-d", "--format"]
self.stream.reset()
track = self.type.from_pcm(self.unsplit_file.name, self.stream)
track.set_metadata(self.unsplit_metadata)
track.set_cuesheet(audiotools.read_sheet(self.cuesheet2.name))
self.assert_(track.get_cuesheet() is not None)
for count in xrange(1, len(all_options) + 1):
for options in Combinations(all_options, count):
self.clean_output_dirs()
options = self.populate_options(options)
if ("-t" in options):
output_type = audiotools.FlacAudio
else:
output_type = audiotools.TYPE_MAP[audiotools.DEFAULT_TYPE]
if (("-q" in options) and
("1" not in output_type.COMPRESSION_MODES)):
self.assertEqual(
self.__run_app__(["tracksplit", "-V", "normal",
"--no-freedb", "--no-musicbrainz"] +
options + [track.filename]), 1)
self.__check_error__(
ERR_UNSUPPORTED_COMPRESSION_MODE %
{"quality": "1",
"type": output_type.NAME})
continue
self.assertEqual(
self.__run_app__(["tracksplit", "-V", "normal",
"--no-freedb", "--no-musicbrainz"] +
options + [track.filename]), 0)
if ("--format" in options):
output_format = self.format
else:
output_format = None
if ("-d" in options):
output_dir = self.output_dir
else:
output_dir = "."
base_metadata = audiotools.MetaData(
track_total=3,
album_number=4,
album_total=5,
album_name=u"Album 1",
artist_name=u"Artist 1",
performer_name=u"Performer 1")
output_filenames = []
for i in xrange(3):
base_metadata.track_number = i + 1
output_filenames.append(
output_type.track_name(
"",
base_metadata,
output_format))
#check that the output is being generated correctly
for (i, path) in enumerate(output_filenames):
self.__check_info__(
audiotools.output_progress(
LAB_ENCODE %
{"source":
audiotools.Filename(track.filename),
"destination":
audiotools.Filename(
os.path.join(output_dir, path))},
i + 1, len(output_filenames)))
#make sure no track data has been lost
output_tracks = [
audiotools.open(os.path.join(output_dir, filename))
for filename in output_filenames]
self.stream.reset()
self.assert_(
audiotools.pcm_frame_cmp(
audiotools.PCMCat([t.to_pcm() for t in output_tracks]),
self.stream) is None)
#make sure metadata fits our expectations
for i in xrange(len(output_tracks)):
metadata = output_tracks[i].get_metadata()
if (metadata is not None):
self.assertEqual(metadata.track_name, None)
self.assertEqual(metadata.album_name, u"Album 1")
self.assertEqual(metadata.artist_name, u"Artist 1")
self.assertEqual(metadata.track_number, i + 1)
self.assertEqual(metadata.track_total, 3)
self.assertEqual(metadata.album_number, 4)
self.assertEqual(metadata.album_total, 5)
self.assertEqual(metadata.performer_name,
u"Performer 1")
#check ISRC data
if ("--cue" in options):
for (i, ISRC) in enumerate([u"JPPI00652340",
u"JPPI00652349",
u"JPPI00652341"]):
metadata = output_tracks[i].get_metadata()
if (metadata is not None):
self.assertEqual(metadata.ISRC, ISRC)
else:
for (i, ISRC) in enumerate([u"ABCD00000001",
u"ABCD00000002",
u"ABCD00000003"]):
metadata = output_tracks[i].get_metadata()
if (metadata is not None):
self.assertEqual(metadata.ISRC, ISRC)
@UTIL_TRACKSPLIT
def test_unicode(self):
import shutil
for (input_filename,
cuesheet_file,
output_directory,
output_format) in Possibilities(
["track.flac", #check filename arguments
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.flac'.encode('utf-8')],
["cuesheet.cue", #check --cue argument
u'abc\xe0\xe7\xe8\u3041\u3044\u3046.cue'.encode('utf-8')],
["testdir", #check --dir argument
u'abc\xe0\xe7\xe8\u3041\u3044\u3046-dir'.encode('utf-8')],
["%(track_number)d.%(suffix)s", #check --format argument
u'%(track_number)d - abc\xe0\xe7\xe8\u3041\u3044\u3046.%(suffix)s'.encode('utf-8')]):
if (os.path.isfile(input_filename)):
os.unlink(input_filename)
if (os.path.isfile(cuesheet_file)):
os.unlink(cuesheet_file)
if (os.path.isdir(output_directory)):
shutil.rmtree(output_directory)
track = audiotools.FlacAudio.from_pcm(
input_filename,
EXACT_BLANK_PCM_Reader(sum([220500, 264600, 308700])))
f = open(cuesheet_file, "wb")
f.write('FILE "CDImage.wav" WAVE\r\n TRACK 01 AUDIO\r\n ISRC JPPI00652340\r\n INDEX 01 00:00:00\r\n TRACK 02 AUDIO\r\n ISRC JPPI00652349\r\n INDEX 00 00:03:00\r\n INDEX 01 00:05:00\r\n TRACK 03 AUDIO\r\n ISRC JPPI00652341\r\n INDEX 00 00:9:00\r\n INDEX 01 00:11:00\r\n')
f.close()
self.assertEqual(
self.__run_app__(
["tracksplit",
"--type", "flac",
"--cue", cuesheet_file,
"--dir", output_directory,
"--format", output_format,
input_filename]), 0)
output_filenames = [output_format % {"track_number":i,
"suffix":"flac"}
for i in range(1, 4)]
for f in output_filenames:
self.assertEqual(
os.path.isfile(os.path.join(output_directory, f)), True)
tracks = [audiotools.open(os.path.join(output_directory, f))
for f in output_filenames]
self.assertEqual(
audiotools.pcm_frame_cmp(
track.to_pcm(),
audiotools.PCMCat([t.to_pcm() for t in tracks])),
None)
if (os.path.isfile(input_filename)):
os.unlink(input_filename)
if (os.path.isfile(cuesheet_file)):
os.unlink(cuesheet_file)
if (os.path.isdir(output_directory)):
shutil.rmtree(output_directory)
def populate_bad_options(self, options):
populated = ["--no-musicbrainz", "--no-freedb"]
for option in sorted(options):
if (option == '-t'):
populated.append(option)
populated.append("foo")
elif (option == '-q'):
populated.append(option)
populated.append("bar")
elif (option == '-d'):
populated.append(option)
populated.append(self.unwritable_dir)
elif (option == '--format'):
populated.append(option)
populated.append("%(foo)s.%(suffix)s")
else:
populated.append(option)
return populated
@UTIL_TRACKSPLIT
def test_errors(self):
from audiotools.text import (ERR_OUTPUT_IS_INPUT,
ERR_DUPLICATE_OUTPUT_FILE,
ERR_UNSUPPORTED_COMPRESSION_MODE,
ERR_UNKNOWN_FIELD,
LAB_SUPPORTED_FIELDS,
ERR_1_FILE_REQUIRED,
ERR_TRACKSPLIT_NO_CUESHEET,
ERR_TRACKSPLIT_OVERLONG_CUESHEET,
)
#ensure that unsplitting file to itself generates an error
track = self.type.from_pcm(self.unsplit_file.name,
BLANK_PCM_Reader(18))
self.assertEqual(
self.__run_app__(
["tracksplit", self.unsplit_file.name,
"--no-freedb", "--no-musicbrainz",
"--cue", self.cuesheet3.name,
"-d", os.path.dirname(self.unsplit_file.name),
"--format", os.path.basename(self.unsplit_file.name)]), 1)
self.__check_error__(ERR_OUTPUT_IS_INPUT %
(audiotools.Filename(self.unsplit_file.name),))
#ensure that unsplitting file to identical names generates an error
self.assertEqual(
self.__run_app__(
["tracksplit", self.unsplit_file.name,
"--no-freedb", "--no-musicbrainz",
"--cue", self.cuesheet.name,
"-d", os.path.dirname(self.unsplit_file.name),
"--format", "foo"]), 1)
self.__check_error__(
ERR_DUPLICATE_OUTPUT_FILE %
(audiotools.Filename(
os.path.join(os.path.dirname(self.unsplit_file.name),
"foo")),))
track1 = self.type.from_pcm(self.unsplit_file.name,
BLANK_PCM_Reader(18))
track2 = self.type.from_pcm(self.unsplit_file2.name,
BLANK_PCM_Reader(5))
all_options = ["-t", "-q", "-d", "--format"]
for count in xrange(1, len(all_options) + 1):
for options in Combinations(all_options, count):
options = self.populate_bad_options(options)
if ("-t" in options):
self.assertEqual(
self.__run_app__(["tracksplit", track1.filename] +
options),
2)
continue
else:
output_type = audiotools.TYPE_MAP[audiotools.DEFAULT_TYPE]
self.assertEqual(
self.__run_app__(["tracksplit", "--cue",
self.cuesheet.name,
track1.filename] +
options),
1)
if ("-q" in options):
self.__check_error__(
ERR_UNSUPPORTED_COMPRESSION_MODE %
{"quality": "bar",
"type": audiotools.DEFAULT_TYPE})
continue
if ("--format" in options):
self.__check_error__(
ERR_UNKNOWN_FIELD % ("foo"))
self.__check_info__(LAB_SUPPORTED_FIELDS)
for field in sorted(audiotools.MetaData.FIELDS + \
("album_track_number", "suffix")):
if (field == 'track_number'):
self.__check_info__(u"%(track_number)2.2d")
else:
self.__check_info__(u"%%(%s)s" % (field))
self.__check_info__(u"%(basename)s")
continue
if ("-d" in options):
output_path = os.path.join(
self.unwritable_dir,
output_type.track_name(
"",
audiotools.MetaData(track_number=1,
track_total=3)))
self.__check_error__(
u"[Errno 13] Permission denied: \'%s\'" %
(output_path))
continue
self.assertEqual(self.__run_app__(
["tracksplit", "-t", "flac", "-d", self.output_dir]), 1)
self.__check_error__(ERR_1_FILE_REQUIRED)
self.assertEqual(self.__run_app__(
["tracksplit", "-t", "flac", "-d", self.output_dir,
self.unsplit_file.name, self.unsplit_file2.name]), 1)
self.__check_error__(ERR_1_FILE_REQUIRED)
self.assertEqual(self.__run_app__(
["tracksplit", "-t", "flac", "-d", self.output_dir,
self.unsplit_file.name]), 1)
self.__check_error__(ERR_TRACKSPLIT_NO_CUESHEET)
self.assertEqual(self.__run_app__(
["tracksplit", "-t", "flac", "-d", self.output_dir,
"--cue", self.cuesheet.name, track2.filename]), 1)
self.__check_error__(ERR_TRACKSPLIT_OVERLONG_CUESHEET)
#FIXME? - check for broken cue sheet output?
| Excito/audiotools | test/test_utils.py | Python | gpl-2.0 | 208,131 | [
"Brian"
] | 24dd36ef639c0754c6367253d7bcf339e65ef5045b98be503560b18b58a4b36a |
# -*- coding: utf8 -*-
# $Id: __init__.py 6385 2010-08-13 12:17:01Z milde $
# Author: Engelbert Gruber <grubert@users.sourceforge.net>
# Copyright: This module has been placed in the public domain.
"""LaTeX2e document tree Writer."""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # i.e. ##.
import sys
import os
import time
import re
import string
from docutils import frontend, nodes, languages, writers, utils, io
from docutils.transforms import writer_aux
# compatibility module for Python 2.3
if not hasattr(string, 'Template'):
import docutils._string_template_compat
string.Template = docutils._string_template_compat.Template
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
default_template = 'default.tex'
default_template_path = os.path.dirname(__file__)
default_preamble = '\n'.join([r'% PDF Standard Fonts',
r'\usepackage{mathptmx} % Times',
r'\usepackage[scaled=.90]{helvet}',
r'\usepackage{courier}'])
settings_spec = (
'LaTeX-Specific Options',
None,
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default is "a4paper".',
['--documentoptions'],
{'default': 'a4paper', }),
('Footnotes with numbers/symbols by Docutils. (default)',
['--docutils-footnotes'],
{'default': True, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Alias for --docutils-footnotes (deprecated)',
['--use-latex-footnotes'],
{'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for footnote text (deprecated)',
['--figure-footnotes'],
{'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use \\cite command for citations. ',
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for citations '
'(might get mixed with real figures). (default)',
['--figure-citations'],
{'dest': 'use_latex_citations', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify LaTeX packages/stylesheets. '
' A style is referenced with \\usepackage if extension is '
'".sty" or omitted and with \\input else. '
' Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'default': '', 'metavar': '<file>',
'overrides': 'stylesheet_path'}),
('Like --stylesheet, but the path is rewritten '
'relative to the output file. ',
['--stylesheet-path'],
{'metavar': '<file>', 'overrides': 'stylesheet'}),
('Link to the stylesheet(s) in the output file. (default)',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Embed the stylesheet(s) in the output file. '
'Stylesheets must be accessible during processing. ',
['--embed-stylesheet'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Customization by LaTeX code in the preamble. '
'Default: select PDF standard fonts (Times, Helvetica, Courier).',
['--latex-preamble'],
{'default': default_preamble}),
('Specify the template file. Default: "%s".' % default_template,
['--template'],
{'default': default_template, 'metavar': '<file>'}),
('Table of contents by LaTeX. (default) ',
['--use-latex-toc'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table of contents by Docutils (without page numbers). ',
['--use-docutils-toc'],
{'dest': 'use_latex_toc', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Add parts on top of the section hierarchy.',
['--use-part-section'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Attach author and date to the document info table. (default) ',
['--use-docutils-docinfo'],
{'dest': 'use_latex_docinfo', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Attach author and date to the document title.',
['--use-latex-docinfo'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
("Typeset abstract as topic. (default)",
['--topic-abstract'],
{'dest': 'use_latex_abstract', 'action': 'store_false',
'validator': frontend.validate_boolean}),
("Use LaTeX abstract environment for the document's abstract. ",
['--use-latex-abstract'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "0" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii"). Default: disabled.',
['--compound-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. '
'This is the default.',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators.'
'Default: disabled.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. '
'This is the default.',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default is "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possibile, use the specified environment for literal-blocks. '
'Default is quoting of whitespace and special chars.',
['--literal-block-env'],
{'default': ''}),
('When possibile, use verbatim for literal-blocks. '
'Compatibility alias for "--literal-block-env=verbatim".',
['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "borderless". '
'Default: "standard"',
['--table-style'],
{'choices': ['standard', 'booktabs','nolines', 'borderless'],
'default': 'standard',
'metavar': '<format>'}),
('LaTeX graphicx package option. '
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "", "T1" (default), "OT1", "LGR,T1" or '
'any other combination of options to the `fontenc` package. ',
['--font-encoding'],
{'default': 'T1'}),
('Per default the latex-writer puts the reference title into '
'hyperreferences. Specify "ref*" or "pageref*" to get the section '
'number or the page number.',
['--reference-label'],
{'default': None, }),
('Specify style and database for bibtex, for example '
'"--use-bibtex=mystyle,mydb1,mydb2".',
['--use-bibtex'],
{'default': None, }),
),)
settings_defaults = {'sectnum_depth': 0 # updated by SectNum transform
}
relative_path_settings = ('stylesheet_path',)
config_section = 'latex2e writer'
config_section_dependencies = ('writers',)
head_parts = ('head_prefix', 'requirements', 'latex_preamble',
'stylesheet', 'fallbacks', 'pdfsetup', 'title', 'subtitle')
visitor_attributes = head_parts + ('body_pre_docinfo', 'docinfo',
'dedication', 'abstract', 'body')
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
# Override parent method to add latex-specific transforms
def get_transforms(self):
# call the parent class' method
transform_list = writers.Writer.get_transforms(self)
# print transform_list
# Convert specific admonitions to generic one
transform_list.append(writer_aux.Admonitions)
# TODO: footnote collection transform
# transform_list.append(footnotes.collect)
return transform_list
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
# copy parts
for part in self.visitor_attributes:
setattr(self, part, getattr(visitor, part))
# get template string from file
try:
file = open(self.document.settings.template, 'rb')
except IOError:
file = open(os.path.join(os.path.dirname(__file__),
self.document.settings.template), 'rb')
template = string.Template(unicode(file.read(), 'utf-8'))
file.close()
# fill template
self.assemble_parts() # create dictionary of parts
self.output = template.substitute(self.parts)
def assemble_parts(self):
"""Assemble the `self.parts` dictionary of output fragments."""
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
lines = getattr(self, part)
if part in self.head_parts:
if lines:
lines.append('') # to get a trailing newline
self.parts[part] = '\n'.join(lines)
else:
# body contains inline elements, so join without newline
self.parts[part] = ''.join(lines)
class Babel(object):
"""Language specifics for LaTeX."""
# country code by a.schlock.
# partly manually converted from iso and babel stuff, dialects and some
_ISO639_TO_BABEL = {
'no': 'norsk', #XXX added by hand ( forget about nynorsk?)
'gd': 'scottish', #XXX added by hand
'hu': 'magyar', #XXX added by hand
'pt': 'portuguese',#XXX added by hand
'sl': 'slovenian',
'af': 'afrikaans',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'fr': 'french',
# french, francais, canadien, acadian
'de': 'ngerman', #XXX rather than german
# ngerman, naustrian, german, germanb, austrian
'el': 'greek',
'en': 'english',
# english, USenglish, american, UKenglish, british, canadian
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
'fi': 'finnish',
'ga': 'irish',
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hu': 'hungarian',
'is': 'icelandic',
'it': 'italian',
'la': 'latin',
'nl': 'dutch',
'pl': 'polish',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sk': 'slovak',
'sr': 'serbian',
'sv': 'swedish',
'tr': 'turkish',
'uk': 'ukrainian'
}
def __init__(self, lang):
self.language = lang
self.quote_index = 0
self.quotes = ('``', "''")
self.setup = '' # language dependent configuration code
# double quotes are "active" in some languages (e.g. German).
# TODO: use \textquotedbl in OT1 font encoding?
self.literal_double_quote = u'"'
if self.language.startswith('de'):
self.quotes = (r'\glqq{}', r'\grqq{}')
self.literal_double_quote = ur'\dq{}'
if self.language.startswith('it'):
self.literal_double_quote = ur'{\char`\"}'
if self.language.startswith('es'):
# reset tilde ~ to the original binding (nobreakspace):
self.setup = ('\n'
r'\addto\shorthandsspanish{\spanishdeactivate{."~<>}}')
def next_quote(self):
q = self.quotes[self.quote_index]
self.quote_index = (self.quote_index+1) % 2
return q
def quote_quotes(self,text):
t = None
for part in text.split('"'):
if t == None:
t = part
else:
t += self.next_quote() + part
return t
def get_language(self):
lang = self.language.split('_')[0] # filter dialects
return self._ISO639_TO_BABEL.get(lang, "")
# Building blocks for the latex preamble
# --------------------------------------
class SortableDict(dict):
"""Dictionary with additional sorting methods
Tip: use key starting with with '_' for sorting before small letters
and with '~' for sorting after small letters.
"""
def sortedkeys(self):
"""Return sorted list of keys"""
keys = self.keys()
keys.sort()
return keys
def sortedvalues(self):
"""Return list of values sorted by keys"""
return [self[key] for key in self.sortedkeys()]
# PreambleCmds
# `````````````
# A container for LaTeX code snippets that can be
# inserted into the preamble if required in the document.
#
# .. The package 'makecmds' would enable shorter definitions using the
# \providelength and \provideenvironment commands.
# However, it is pretty non-standard (texlive-latex-extra).
class PreambleCmds(object):
"""Building blocks for the latex preamble."""
PreambleCmds.abstract = r"""
% abstract title
\providecommand*{\DUtitleabstract}[1]{\centerline{\textbf{#1}}}"""
PreambleCmds.admonition = r"""
% admonition (specially marked topic)
\providecommand{\DUadmonition}[2][class-arg]{%
% try \DUadmonition#1{#2}:
\ifcsname DUadmonition#1\endcsname%
\csname DUadmonition#1\endcsname{#2}%
\else
\begin{center}
\fbox{\parbox{0.9\textwidth}{#2}}
\end{center}
\fi
}"""
## PreambleCmds.caption = r"""% configure caption layout
## \usepackage{caption}
## \captionsetup{singlelinecheck=false}% no exceptions for one-liners"""
PreambleCmds.color = r"""\usepackage{color}"""
PreambleCmds.docinfo = r"""
% docinfo (width of docinfo table)
\DUprovidelength{\DUdocinfowidth}{0.9\textwidth}"""
# PreambleCmds.docinfo._depends = 'providelength'
PreambleCmds.embedded_package_wrapper = r"""\makeatletter
%% embedded stylesheet: %s
%s
\makeatother"""
PreambleCmds.dedication = r"""
% dedication topic
\providecommand{\DUtopicdedication}[1]{\begin{center}#1\end{center}}"""
PreambleCmds.error = r"""
% error admonition title
\providecommand*{\DUtitleerror}[1]{\DUtitle{\color{red}#1}}"""
# PreambleCmds.errortitle._depends = 'color'
PreambleCmds.fieldlist = r"""
% fieldlist environment
\ifthenelse{\isundefined{\DUfieldlist}}{
\newenvironment{DUfieldlist}%
{\quote\description}
{\enddescription\endquote}
}{}"""
PreambleCmds.float_settings = r"""\usepackage{float} % float configuration
\floatplacement{figure}{H} % place figures here definitely"""
PreambleCmds.footnotes = r"""% numeric or symbol footnotes with hyperlinks
\providecommand*{\DUfootnotemark}[3]{%
\raisebox{1em}{\hypertarget{#1}{}}%
\hyperlink{#2}{\textsuperscript{#3}}%
}
\providecommand{\DUfootnotetext}[4]{%
\begingroup%
\renewcommand{\thefootnote}{%
\protect\raisebox{1em}{\protect\hypertarget{#1}{}}%
\protect\hyperlink{#2}{#3}}%
\footnotetext{#4}%
\endgroup%
}"""
PreambleCmds.footnote_floats = r"""% settings for footnotes as floats:
\setlength{\floatsep}{0.5em}
\setlength{\textfloatsep}{\fill}
\addtolength{\textfloatsep}{3em}
\renewcommand{\textfraction}{0.5}
\renewcommand{\topfraction}{0.5}
\renewcommand{\bottomfraction}{0.5}
\setcounter{totalnumber}{50}
\setcounter{topnumber}{50}
\setcounter{bottomnumber}{50}"""
PreambleCmds.graphicx_auto = r"""% Check output format
\ifx\pdftexversion\undefined
\usepackage{graphicx}
\else
\usepackage[pdftex]{graphicx}
\fi'))"""
PreambleCmds.inline = r"""
% inline markup (custom roles)
% \DUrole{#1}{#2} tries \DUrole#1{#2}
\providecommand*{\DUrole}[2]{%
\ifcsname DUrole#1\endcsname%
\csname DUrole#1\endcsname{#2}%
\else% backwards compatibility: try \docutilsrole#1{#2}
\ifcsname docutilsrole#1\endcsname%
\csname docutilsrole#1\endcsname{#2}%
\else%
#2%
\fi%
\fi%
}"""
PreambleCmds.legend = r"""
% legend environment
\ifthenelse{\isundefined{\DUlegend}}{
\newenvironment{DUlegend}{\small}{}
}{}"""
PreambleCmds.lineblock = r"""
% lineblock environment
\DUprovidelength{\DUlineblockindent}{2.5em}
\ifthenelse{\isundefined{\DUlineblock}}{
\newenvironment{DUlineblock}[1]{%
\list{}{\setlength{\partopsep}{\parskip}
\addtolength{\partopsep}{\baselineskip}
\setlength{\topsep}{0pt}
\setlength{\itemsep}{0.15\baselineskip}
\setlength{\parsep}{0pt}
\setlength{\leftmargin}{#1}}
\raggedright
}
{\endlist}
}{}"""
# PreambleCmds.lineblock._depends = 'providelength'
PreambleCmds.linking = r"""
%% hyperlinks:
\ifthenelse{\isundefined{\hypersetup}}{
\usepackage[unicode,colorlinks=%s,linkcolor=%s,urlcolor=%s]{hyperref}
\urlstyle{same} %% normal text font (alternatives: tt, rm, sf)
}{}"""
PreambleCmds.minitoc = r"""%% local table of contents
\usepackage{minitoc}"""
PreambleCmds.optionlist = r"""
% optionlist environment
\providecommand*{\DUoptionlistlabel}[1]{\bf #1 \hfill}
\DUprovidelength{\DUoptionlistindent}{3cm}
\ifthenelse{\isundefined{\DUoptionlist}}{
\newenvironment{DUoptionlist}{%
\list{}{\setlength{\labelwidth}{\DUoptionlistindent}
\setlength{\rightmargin}{1cm}
\setlength{\leftmargin}{\rightmargin}
\addtolength{\leftmargin}{\labelwidth}
\addtolength{\leftmargin}{\labelsep}
\renewcommand{\makelabel}{\DUoptionlistlabel}}
}
{\endlist}
}{}"""
# PreambleCmds.optionlist._depends = 'providelength'
PreambleCmds.providelength = r"""
% providelength (provide a length variable and set default, if it is new)
\providecommand*{\DUprovidelength}[2]{
\ifthenelse{\isundefined{#1}}{\newlength{#1}\setlength{#1}{#2}}{}
}"""
PreambleCmds.rubric = r"""
% rubric (informal heading)
\providecommand*{\DUrubric}[2][class-arg]{%
\subsubsection*{\centering\textit{\textmd{#2}}}}"""
PreambleCmds.sidebar = r"""
% sidebar (text outside the main text flow)
\providecommand{\DUsidebar}[2][class-arg]{%
\begin{center}
\colorbox[gray]{0.80}{\parbox{0.9\textwidth}{#2}}
\end{center}
}"""
PreambleCmds.subtitle = r"""
% subtitle (for topic/sidebar)
\providecommand*{\DUsubtitle}[2][class-arg]{\par\emph{#2}\smallskip}"""
PreambleCmds.table = r"""\usepackage{longtable}
\usepackage{array}
\setlength{\extrarowheight}{2pt}
\newlength{\DUtablewidth} % internal use in tables"""
# Options [force,almostfull] prevent spurious error messages, see
# de.comp.text.tex/2005-12/msg01855
PreambleCmds.textcomp = """\
\\usepackage{textcomp} % text symbol macros"""
PreambleCmds.documenttitle = r"""
%% Document title
\title{%s}
\author{%s}
\date{%s}
\maketitle
"""
PreambleCmds.titlereference = r"""
% titlereference role
\providecommand*{\DUroletitlereference}[1]{\textsl{#1}}"""
PreambleCmds.title = r"""
% title for topics, admonitions and sidebar
\providecommand*{\DUtitle}[2][class-arg]{%
% call \DUtitle#1{#2} if it exists:
\ifcsname DUtitle#1\endcsname%
\csname DUtitle#1\endcsname{#2}%
\else
\smallskip\noindent\textbf{#2}\smallskip%
\fi
}"""
PreambleCmds.topic = r"""
% topic (quote with heading)
\providecommand{\DUtopic}[2][class-arg]{%
\ifcsname DUtopic#1\endcsname%
\csname DUtopic#1\endcsname{#2}%
\else
\begin{quote}#2\end{quote}
\fi
}"""
PreambleCmds.transition = r"""
% transition (break, fancybreak, anonymous section)
\providecommand*{\DUtransition}[1][class-arg]{%
\hspace*{\fill}\hrulefill\hspace*{\fill}
\vskip 0.5\baselineskip
}"""
class DocumentClass(object):
"""Details of a LaTeX document class."""
def __init__(self, document_class, with_part=False):
self.document_class = document_class
self._with_part = with_part
self.sections = ['section', 'subsection', 'subsubsection',
'paragraph', 'subparagraph']
if self.document_class in ('book', 'memoir', 'report',
'scrbook', 'scrreprt'):
self.sections.insert(0, 'chapter')
if self._with_part:
self.sections.insert(0, 'part')
def section(self, level):
"""Return the LaTeX section name for section `level`.
The name depends on the specific document class.
Level is 1,2,3..., as level 0 is the title.
"""
if level <= len(self.sections):
return self.sections[level-1]
else:
return self.sections[-1]
class Table(object):
"""Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
Table style might be
:standard: horizontal and vertical lines
:booktabs: only horizontal lines (requires "booktabs" LaTeX package)
:borderless: no borders around table cells
:nolines: alias for borderless
"""
def __init__(self,translator,latex_type,table_style):
self._translator = translator
self._latex_type = latex_type
self._table_style = table_style
self._open = 0
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
self.stubs = []
self._in_thead = 0
def open(self):
self._open = 1
self._col_specs = []
self.caption = []
self._attrs = {}
self._in_head = 0 # maybe context with search
def close(self):
self._open = 0
self._col_specs = None
self.caption = []
self._attrs = {}
self.stubs = []
def is_open(self):
return self._open
def set_table_style(self, table_style):
if not table_style in ('standard','booktabs','borderless','nolines'):
return
self._table_style = table_style
def get_latex_type(self):
return self._latex_type
def set(self,attr,value):
self._attrs[attr] = value
def get(self,attr):
if attr in self._attrs:
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self._table_style == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row,
def get_opening(self):
if self._latex_type == 'longtable':
# otherwise longtable might move before paragraph and subparagraph
prefix = '\\leavevmode\n'
else:
prefix = ''
prefix += '\setlength{\DUtablewidth}{\linewidth}'
return '%s\n\\begin{%s}[c]' % (prefix, self._latex_type)
def get_closing(self):
line = ''
if self._table_style == 'booktabs':
line = '\\bottomrule\n'
elif self._table_style == 'standard':
lines = '\\hline\n'
return '%s\\end{%s}' % (line,self._latex_type)
def visit_colspec(self, node):
self._col_specs.append(node)
# "stubs" list is an attribute of the tgroup element:
self.stubs.append(node.attributes.get('stub'))
def get_colspecs(self):
"""Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
self._col_width = []
self._rowspan = []
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
bar = self.get_vertical_bar()
latex_table_spec = ''
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
self._rowspan.append(0)
latex_table_spec += '%sp{%.3f\\DUtablewidth}' % (bar, colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
"""Return columnwidth for current cell (not multicell)."""
return '%.2f\\DUtablewidth' % self._col_width[self._cell_in_row-1]
def get_caption(self):
if not self.caption:
return ''
caption = ''.join(self.caption)
if 1 == self._translator.thead_depth():
return r'\caption{%s}\\' '\n' % caption
return r'\caption[]{%s (... continued)}\\' '\n' % caption
def need_recurse(self):
if self._latex_type == 'longtable':
return 1 == self._translator.thead_depth()
return 0
def visit_thead(self):
self._in_thead += 1
if self._table_style == 'standard':
return ['\\hline\n']
elif self._table_style == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self._table_style == 'standard':
# a.append('\\hline\n')
if self._table_style == 'booktabs':
a.append('\\midrule\n')
if self._latex_type == 'longtable':
if 1 == self._translator.thead_depth():
a.append('\\endfirsthead\n')
else:
a.append('\\endhead\n')
a.append(r'\multicolumn{%d}{c}' % len(self._col_specs) +
r'{\hfill ... continued on next page} \\')
a.append('\n\\endfoot\n\\endlastfoot\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead -= 1
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self._table_style == 'standard':
rowspans = [i+1 for i in range(len(self._rowspan))
if (self._rowspan[i]<=0)]
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while 1:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start,c_start)
res.append(cline)
return res
def set_rowspan(self,cell,value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self,cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
def is_stub_column(self):
if len(self.stubs) >= self._cell_in_row:
return self.stubs[self._cell_in_row-1]
return False
class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
# Config setting defaults
# -----------------------
# TODO: use mixins for different implementations.
# list environment for docinfo. else tabularx
## use_optionlist_for_docinfo = False # TODO: NOT YET IN USE
# Use compound enumerations (1.A.1.)
compound_enumerators = 0
# If using compound enumerations, include section information.
section_prefix_for_enumerators = 0
# This is the character that separates the section ("." subsection ...)
# prefix from the regular list enumerator.
section_enumerator_separator = '-'
# default link color
hyperlink_color = 'blue'
# Auxiliary variables
# -------------------
has_latex_toc = False # is there a toc in the doc? (needed by minitoc)
is_toc_list = False # is the current bullet_list a ToC?
section_level = 0
# Flags to encode():
# inside citation reference labels underscores dont need to be escaped
inside_citation_reference_label = False
verbatim = False # do not encode
insert_non_breaking_blanks = False # replace blanks by "~"
insert_newline = False # add latex newline commands
literal = False # literal text (block or inline)
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
# Reporter
# ~~~~~~~~
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
# ~~~~~~~~
self.settings = settings = document.settings
self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo
self._use_latex_citations = settings.use_latex_citations
self.embed_stylesheet = settings.embed_stylesheet
self._reference_label = settings.reference_label
self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators
self.font_encoding = settings.font_encoding
self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators)
self.section_enumerator_separator = (
settings.section_enumerator_separator.replace('_', '\\_'))
# literal blocks:
self.literal_block_env = ''
self.literal_block_options = ''
if settings.literal_block_env != '':
(none,
self.literal_block_env,
self.literal_block_options,
none ) = re.split('(\w+)(.*)', settings.literal_block_env)
elif settings.use_verbatim_when_possible:
self.literal_block_env = 'verbatim'
#
if self.settings.use_bibtex:
self.bibtex = self.settings.use_bibtex.split(',',1)
# TODO avoid errors on not declared citations.
else:
self.bibtex = None
# language:
# (labels, bibliographic_fields, and author_separators)
self.language = languages.get_language(settings.language_code)
self.babel = Babel(settings.language_code)
self.author_separator = self.language.author_separators[0]
self.d_options = [self.settings.documentoptions,
self.babel.get_language()]
self.d_options = ','.join([opt for opt in self.d_options if opt])
self.d_class = DocumentClass(settings.documentclass,
settings.use_part_section)
# graphic package options:
if self.settings.graphicx_option == '':
self.graphicx_package = r'\usepackage{graphicx}'
elif self.settings.graphicx_option.lower() == 'auto':
self.graphicx_package = PreambleCmds.graphicx_auto
else:
self.graphicx_package = (r'\usepackage[%s]{graphicx}' %
self.settings.graphicx_option)
# footnotes:
self.docutils_footnotes = settings.docutils_footnotes
if settings.use_latex_footnotes:
self.docutils_footnotes = True
self.warn('`use_latex_footnotes` is deprecated. '
'The setting has been renamed to `docutils_footnotes` '
'and the alias will be removed in a future version.')
self.figure_footnotes = settings.figure_footnotes
if self.figure_footnotes:
self.docutils_footnotes = True
self.warn('The "figure footnotes" workaround/setting is strongly '
'deprecated and will be removed in a future version.')
# Output collection stacks
# ~~~~~~~~~~~~~~~~~~~~~~~~
# Document parts
self.head_prefix = [r'\documentclass[%s]{%s}' %
(self.d_options, self.settings.documentclass)]
self.requirements = SortableDict() # made a list in depart_document()
self.latex_preamble = [settings.latex_preamble]
self.stylesheet = []
self.fallbacks = SortableDict() # made a list in depart_document()
self.pdfsetup = [] # PDF properties (hyperref package)
self.title = []
self.subtitle = []
## self.body_prefix = ['\\begin{document}\n']
self.body_pre_docinfo = [] # title data and \maketitle
self.docinfo = []
self.dedication = []
self.abstract = []
self.body = []
## self.body_suffix = ['\\end{document}\n']
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
# Title metadata:
self.title_labels = []
self.subtitle_labels = []
# (if use_latex_docinfo: collects lists of
# author/organization/contact/address lines)
self.author_stack = []
# date (the default supresses the "auto-date" feature of \maketitle)
self.date = []
# PDF properties: pdftitle, pdfauthor
# TODO?: pdfcreator, pdfproducer, pdfsubject, pdfkeywords
self.pdfinfo = []
self.pdfauthor = []
# Stack of section counters so that we don't have to use_latex_toc.
# This will grow and shrink as processing occurs.
# Initialized for potential first-level sections.
self._section_number = [0]
# The current stack of enumerations so that we can expand
# them into a compound enumeration.
self._enumeration_counters = []
# The maximum number of enumeration counters we've used.
# If we go beyond this number, we need to create a new
# counter; otherwise, just reuse an old one.
self._max_enumeration_counters = 0
self._bibitems = []
# object for a table while proccessing.
self.table_stack = []
self.active_table = Table(self, 'longtable', settings.table_style)
# Where to collect the output of visitor methods (default: body)
self.out = self.body
self.out_stack = [] # stack of output collectors
# Process settings
# ~~~~~~~~~~~~~~~~
# Static requirements
# TeX font encoding
if self.font_encoding:
encodings = [r'\usepackage[%s]{fontenc}' % self.font_encoding]
else:
encodings = [r'%\usepackage[OT1]{fontenc}'] # just a comment
# Docutils' output-encoding => TeX input encoding:
if self.latex_encoding != 'ascii':
encodings.append(r'\usepackage[%s]{inputenc}'
% self.latex_encoding)
self.requirements['_static'] = '\n'.join(
encodings + [
r'\usepackage{ifthen}',
# multi-language support (language is in document options)
'\\usepackage{babel}%s' % self.babel.setup,
])
# page layout with typearea (if there are relevant document options)
if (settings.documentclass.find('scr') == -1 and
(self.d_options.find('DIV') != -1 or
self.d_options.find('BCOR') != -1)):
self.requirements['typearea'] = r'\usepackage{typearea}'
# Stylesheets
# get list of style sheets from settings
styles = utils.get_stylesheet_list(settings)
# adapt path if --stylesheet_path is used
if settings.stylesheet_path and not(self.embed_stylesheet):
styles = [utils.relative_path(settings._destination, sheet)
for sheet in styles]
for sheet in styles:
(base, ext) = os.path.splitext(sheet)
is_package = ext in ['.sty', '']
if self.embed_stylesheet:
if is_package:
sheet = base + '.sty' # adapt package name
# wrap in \makeatletter, \makeatother
wrapper = PreambleCmds.embedded_package_wrapper
else:
wrapper = '%% embedded stylesheet: %s\n%s'
settings.record_dependencies.add(sheet)
self.stylesheet.append(wrapper %
(sheet, io.FileInput(source_path=sheet, encoding='utf-8').read()))
else: # link to style sheet
if is_package:
self.stylesheet.append(r'\usepackage{%s}' % base)
else:
self.stylesheet.append(r'\input{%s}' % sheet)
# PDF setup
if self.hyperlink_color == '0':
self.hyperlink_color = 'black'
self.colorlinks = 'false'
else:
self.colorlinks = 'true'
# LaTeX Toc
# include all supported sections in toc and PDF bookmarks
# (or use documentclass-default (as currently))?
## if self.use_latex_toc:
## self.requirements['tocdepth'] = (r'\setcounter{tocdepth}{%d}' %
## len(self.d_class.sections))
# LaTeX section numbering
if not self.settings.sectnum_xform: # section numbering by LaTeX:
# sectnum_depth:
# None "sectnum" directive without depth arg -> LaTeX default
# 0 no "sectnum" directive -> no section numbers
# else value of the "depth" argument: translate to LaTeX level
# -1 part (0 with "article" document class)
# 0 chapter (missing in "article" document class)
# 1 section
# 2 subsection
# 3 subsubsection
# 4 paragraph
# 5 subparagraph
if settings.sectnum_depth is not None:
# limit to supported levels
sectnum_depth = min(settings.sectnum_depth,
len(self.d_class.sections))
# adjust to document class and use_part_section settings
if 'chapter' in self.d_class.sections:
sectnum_depth -= 1
if self.d_class.sections[0] == 'part':
sectnum_depth -= 1
self.requirements['sectnum_depth'] = (
r'\setcounter{secnumdepth}{%d}' % sectnum_depth)
# start with specified number:
if (hasattr(settings, 'sectnum_start') and
settings.sectnum_start != 1):
self.requirements['sectnum_start'] = (
r'\setcounter{%s}{%d}' % (self.d_class.sections[0],
settings.sectnum_start-1))
# currently ignored (configure in a stylesheet):
## settings.sectnum_prefix
## settings.sectnum_suffix
# Auxiliary Methods
# -----------------
def to_latex_encoding(self,docutils_encoding):
"""Translate docutils encoding name into LaTeX's.
Default method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { 'iso-8859-1': 'latin1', # west european
'iso-8859-2': 'latin2', # east european
'iso-8859-3': 'latin3', # esperanto, maltese
'iso-8859-4': 'latin4', # north european, scandinavian, baltic
'iso-8859-5': 'iso88595', # cyrillic (ISO)
'iso-8859-9': 'latin5', # turkish
'iso-8859-15': 'latin9', # latin9, update to latin1.
'mac_cyrillic': 'maccyr', # cyrillic (on Mac)
'windows-1251': 'cp1251', # cyrillic (on Windows)
'koi8-r': 'koi8-r', # cyrillic (Russian)
'koi8-u': 'koi8-u', # cyrillic (Ukrainian)
'windows-1250': 'cp1250', #
'windows-1252': 'cp1252', #
'us-ascii': 'ascii', # ASCII (US)
# unmatched encodings
#'': 'applemac',
#'': 'ansinew', # windows 3.1 ansi
#'': 'ascii', # ASCII encoding for the range 32--127.
#'': 'cp437', # dos latin us
#'': 'cp850', # dos latin 1
#'': 'cp852', # dos latin 2
#'': 'decmulti',
#'': 'latin10',
#'iso-8859-6': '' # arabic
#'iso-8859-7': '' # greek
#'iso-8859-8': '' # hebrew
#'iso-8859-10': '' # latin6, more complete iso-8859-4
}
encoding = docutils_encoding.lower()
if encoding in tr:
return tr[encoding]
# convert: latin-1, latin_1, utf-8 and similar things
encoding = encoding.replace('_', '').replace('-', '')
# strip the error handler
return encoding.split(':')[0]
def language_label(self, docutil_label):
return self.language.labels[docutil_label]
def ensure_math(self, text):
if not hasattr(self, 'ensure_math_re'):
chars = { # lnot,pm,twosuperior,threesuperior,mu,onesuperior,times,div
'latin1' : '\xac\xb1\xb2\xb3\xb5\xb9\xd7\xf7' , # ¬±²³µ¹×÷
# TODO?: use texcomp instead.
}
self.ensure_math_re = re.compile('([%s])' % chars['latin1'])
text = self.ensure_math_re.sub(r'\\ensuremath{\1}', text)
return text
def encode(self, text):
"""Return text with 'problematic' characters escaped.
Escape the ten special printing characters ``# $ % & ~ _ ^ \ { }``,
square brackets ``[ ]``, double quotes and (in OT1) ``< | >``.
Separate ``-`` (and more in literal text) to prevent input ligatures.
Translate non-supported Unicode characters.
"""
if self.verbatim:
return text
# Separate compound characters, e.g. '--' to '-{}-'.
separate_chars = '-'
# In monospace-font, we also separate ',,', '``' and "''" and some
# other characters which can't occur in non-literal text.
if self.literal:
separate_chars += ',`\'"<>'
# LaTeX encoding maps:
special_chars = {
ord('#'): ur'\#',
ord('$'): ur'\$',
ord('%'): ur'\%',
ord('&'): ur'\&',
ord('~'): ur'\textasciitilde{}',
ord('_'): ur'\_',
ord('^'): ur'\textasciicircum{}',
ord('\\'): ur'\textbackslash{}',
ord('{'): ur'\{',
ord('}'): ur'\}',
# Square brackets are ordinary chars and cannot be escaped with '\',
# so we put them in a group '{[}'. (Alternative: ensure that all
# macros with optional arguments are terminated with {} and text
# inside any optional argument is put in a group ``[{text}]``).
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
ord('['): ur'{[}',
ord(']'): ur'{]}'
}
# Unicode chars that are not recognized by LaTeX's utf8 encoding
unsupported_unicode_chars = {
0x00A0: ur'~', # NO-BREAK SPACE
0x00AD: ur'\-', # SOFT HYPHEN
#
0x2011: ur'\hbox{-}', # NON-BREAKING HYPHEN
0x21d4: ur'$\Leftrightarrow$',
# Docutils footnote symbols:
0x2660: ur'$\spadesuit$',
0x2663: ur'$\clubsuit$',
}
# Unicode chars that are recognized by LaTeX's utf8 encoding
unicode_chars = {
0x200C: ur'\textcompwordmark', # ZERO WIDTH NON-JOINER
0x2013: ur'\textendash{}',
0x2014: ur'\textemdash{}',
0x2018: ur'\textquoteleft{}',
0x2019: ur'\textquoteright{}',
0x201A: ur'\quotesinglbase{}', # SINGLE LOW-9 QUOTATION MARK
0x201C: ur'\textquotedblleft{}',
0x201D: ur'\textquotedblright{}',
0x201E: ur'\quotedblbase{}', # DOUBLE LOW-9 QUOTATION MARK
0x2030: ur'\textperthousand{}', # PER MILLE SIGN
0x2031: ur'\textpertenthousand{}', # PER TEN THOUSAND SIGN
0x2039: ur'\guilsinglleft{}',
0x203A: ur'\guilsinglright{}',
0x2423: ur'\textvisiblespace{}', # OPEN BOX
0x2020: ur'\dag{}',
0x2021: ur'\ddag{}',
0x2026: ur'\dots{}',
0x2122: ur'\texttrademark{}',
}
# Unicode chars that require a feature/package to render
pifont_chars = {
0x2665: ur'\ding{170}', # black heartsuit
0x2666: ur'\ding{169}', # black diamondsuit
}
# recognized with 'utf8', if textcomp is loaded
textcomp_chars = {
# Latin-1 Supplement
0x00a2: ur'\textcent{}', # ¢ CENT SIGN
0x00a4: ur'\textcurrency{}', # ¤ CURRENCY SYMBOL
0x00a5: ur'\textyen{}', # ¥ YEN SIGN
0x00a6: ur'\textbrokenbar{}', # ¦ BROKEN BAR
0x00a7: ur'\textsection{}', # § SECTION SIGN
0x00a8: ur'\textasciidieresis{}', # ¨ DIAERESIS
0x00a9: ur'\textcopyright{}', # © COPYRIGHT SIGN
0x00aa: ur'\textordfeminine{}', # ª FEMININE ORDINAL INDICATOR
0x00ac: ur'\textlnot{}', # ¬ NOT SIGN
0x00ae: ur'\textregistered{}', # ® REGISTERED SIGN
0x00af: ur'\textasciimacron{}', # ¯ MACRON
0x00b0: ur'\textdegree{}', # ° DEGREE SIGN
0x00b1: ur'\textpm{}', # ± PLUS-MINUS SIGN
0x00b2: ur'\texttwosuperior{}', # ² SUPERSCRIPT TWO
0x00b3: ur'\textthreesuperior{}', # ³ SUPERSCRIPT THREE
0x00b4: ur'\textasciiacute{}', # ´ ACUTE ACCENT
0x00b5: ur'\textmu{}', # µ MICRO SIGN
0x00b6: ur'\textparagraph{}', # ¶ PILCROW SIGN # not equal to \textpilcrow
0x00b9: ur'\textonesuperior{}', # ¹ SUPERSCRIPT ONE
0x00ba: ur'\textordmasculine{}', # º MASCULINE ORDINAL INDICATOR
0x00bc: ur'\textonequarter{}', # 1/4 FRACTION
0x00bd: ur'\textonehalf{}', # 1/2 FRACTION
0x00be: ur'\textthreequarters{}', # 3/4 FRACTION
0x00d7: ur'\texttimes{}', # × MULTIPLICATION SIGN
0x00f7: ur'\textdiv{}', # ÷ DIVISION SIGN
#
0x0192: ur'\textflorin{}', # LATIN SMALL LETTER F WITH HOOK
0x02b9: ur'\textasciiacute{}', # MODIFIER LETTER PRIME
0x02ba: ur'\textacutedbl{}', # MODIFIER LETTER DOUBLE PRIME
0x2016: ur'\textbardbl{}', # DOUBLE VERTICAL LINE
0x2022: ur'\textbullet{}', # BULLET
0x2032: ur'\textasciiacute{}', # PRIME
0x2033: ur'\textacutedbl{}', # DOUBLE PRIME
0x2035: ur'\textasciigrave{}', # REVERSED PRIME
0x2036: ur'\textgravedbl{}', # REVERSED DOUBLE PRIME
0x203b: ur'\textreferencemark{}', # REFERENCE MARK
0x203d: ur'\textinterrobang{}', # INTERROBANG
0x2044: ur'\textfractionsolidus{}', # FRACTION SLASH
0x2045: ur'\textlquill{}', # LEFT SQUARE BRACKET WITH QUILL
0x2046: ur'\textrquill{}', # RIGHT SQUARE BRACKET WITH QUILL
0x2052: ur'\textdiscount{}', # COMMERCIAL MINUS SIGN
0x20a1: ur'\textcolonmonetary{}', # COLON SIGN
0x20a3: ur'\textfrenchfranc{}', # FRENCH FRANC SIGN
0x20a4: ur'\textlira{}', # LIRA SIGN
0x20a6: ur'\textnaira{}', # NAIRA SIGN
0x20a9: ur'\textwon{}', # WON SIGN
0x20ab: ur'\textdong{}', # DONG SIGN
0x20ac: ur'\texteuro{}', # EURO SIGN
0x20b1: ur'\textpeso{}', # PESO SIGN
0x20b2: ur'\textguarani{}', # GUARANI SIGN
0x2103: ur'\textcelsius{}', # DEGREE CELSIUS
0x2116: ur'\textnumero{}', # NUMERO SIGN
0x2117: ur'\textcircledP{}', # SOUND RECORDING COYRIGHT
0x211e: ur'\textrecipe{}', # PRESCRIPTION TAKE
0x2120: ur'\textservicemark{}', # SERVICE MARK
0x2122: ur'\texttrademark{}', # TRADE MARK SIGN
0x2126: ur'\textohm{}', # OHM SIGN
0x2127: ur'\textmho{}', # INVERTED OHM SIGN
0x212e: ur'\textestimated{}', # ESTIMATED SYMBOL
0x2190: ur'\textleftarrow{}', # LEFTWARDS ARROW
0x2191: ur'\textuparrow{}', # UPWARDS ARROW
0x2192: ur'\textrightarrow{}', # RIGHTWARDS ARROW
0x2193: ur'\textdownarrow{}', # DOWNWARDS ARROW
0x2212: ur'\textminus{}', # MINUS SIGN
0x2217: ur'\textasteriskcentered{}', # ASTERISK OPERATOR
0x221a: ur'\textsurd{}', # SQUARE ROOT
0x2422: ur'\textblank{}', # BLANK SYMBOL
0x25e6: ur'\textopenbullet{}', # WHITE BULLET
0x25ef: ur'\textbigcircle{}', # LARGE CIRCLE
0x266a: ur'\textmusicalnote{}', # EIGHTH NOTE
0x26ad: ur'\textmarried{}', # MARRIAGE SYMBOL
0x26ae: ur'\textdivorced{}', # DIVORCE SYMBOL
0x27e8: ur'\textlangle{}', # MATHEMATICAL LEFT ANGLE BRACKET
0x27e9: ur'\textrangle{}', # MATHEMATICAL RIGHT ANGLE BRACKET
}
# TODO: greek alphabet ... ?
# see also LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# and unimap.py from TeXML
# set up the translation table:
table = special_chars
# keep the underscore in citation references
if self.inside_citation_reference_label:
del(table[ord('_')])
# Workarounds for OT1 font-encoding
if self.font_encoding in ['OT1', '']:
# * out-of-order characters in cmtt
if self.literal:
# replace underscore by underlined blank,
# because this has correct width.
table[ord('_')] = u'\\underline{~}'
# the backslash doesn't work, so we use a mirrored slash.
# \reflectbox is provided by graphicx:
self.requirements['graphicx'] = self.graphicx_package
table[ord('\\')] = ur'\reflectbox{/}'
# * ``< | >`` come out as different chars (except for cmtt):
else:
table[ord('|')] = ur'\textbar{}'
table[ord('<')] = ur'\textless{}'
table[ord('>')] = ur'\textgreater{}'
if self.insert_non_breaking_blanks:
table[ord(' ')] = ur'~'
if self.literal:
# double quotes are 'active' in some languages
table[ord('"')] = self.babel.literal_double_quote
else:
text = self.babel.quote_quotes(text)
# Unicode chars:
table.update(unsupported_unicode_chars)
table.update(pifont_chars)
if not self.latex_encoding.startswith('utf8'):
table.update(unicode_chars)
table.update(textcomp_chars)
# Characters that require a feature/package to render
for ch in text:
if ord(ch) in pifont_chars:
self.requirements['pifont'] = '\\usepackage{pifont}'
if ord(ch) in textcomp_chars:
self.requirements['textcomp'] = PreambleCmds.textcomp
text = text.translate(table)
# Break up input ligatures
for char in separate_chars * 2:
# Do it twice ("* 2") because otherwise we would replace
# '---' by '-{}--'.
text = text.replace(char + char, char + '{}' + char)
# Literal line breaks (in address or literal blocks):
if self.insert_newline:
# for blank lines, insert a protected space, to avoid
# ! LaTeX Error: There's no line here to end.
textlines = [line + '~'*(not line.lstrip())
for line in text.split('\n')]
text = '\\\\\n'.join(textlines)
if self.literal and not self.insert_non_breaking_blanks:
# preserve runs of spaces but allow wrapping
text = text.replace(' ', ' ~')
if not self.latex_encoding.startswith('utf8'):
text = self.ensure_math(text)
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
# TODO: is this used anywhere? (update or delete)
## def astext(self):
## """Assemble document parts and return as string."""
## head = '\n'.join(self.head_prefix + self.stylesheet + self.head)
## body = ''.join(self.body_prefix + self.body + self.body_suffix)
## return head + '\n' + body
def is_inline(self, node):
"""Check whether a node represents an inline element"""
return isinstance(node.parent, nodes.TextElement)
def append_hypertargets(self, node):
"""Append hypertargets for all ids of `node`"""
# hypertarget places the anchor at the target's baseline,
# so we raise it explicitely
self.out.append('%\n'.join(['\\raisebox{1em}{\\hypertarget{%s}{}}' %
id for id in node['ids']]))
def ids_to_labels(self, node, set_anchor=True):
"""Return list of label definitions for all ids of `node`
If `set_anchor` is True, an anchor is set with \phantomsection.
"""
labels = ['\\label{%s}' % id for id in node.get('ids', [])]
if set_anchor and labels:
labels.insert(0, '\\phantomsection')
return labels
def push_output_collector(self, new_out):
self.out_stack.append(self.out)
self.out = new_out
def pop_output_collector(self):
self.out = self.out_stack.pop()
# Visitor methods
# ---------------
def visit_Text(self, node):
self.out.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node):
self.fallbacks['admonition'] = PreambleCmds.admonition
if 'error' in node['classes']:
self.fallbacks['error'] = PreambleCmds.error
# strip the generic 'admonition' from the list of classes
node['classes'] = [cls for cls in node['classes']
if cls != 'admonition']
self.out.append('\n\\DUadmonition[%s]{\n' % ','.join(node['classes']))
def depart_admonition(self, node=None):
self.out.append('}\n')
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
self.out.append( '%\n\\begin{quote}\n')
def depart_block_quote(self, node):
self.out.append( '\n\\end{quote}\n')
def visit_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '%\n\\begin{list}{}{}\n' )
else:
self.out.append( '%\n\\begin{itemize}\n' )
def depart_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '\n\\end{list}\n' )
else:
self.out.append( '\n\\end{itemize}\n' )
def visit_superscript(self, node):
self.out.append(r'\textsuperscript{')
if node['classes']:
self.visit_inline(node)
def depart_superscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_subscript(self, node):
self.out.append(r'\textsubscript{') # requires `fixltx2e`
if node['classes']:
self.visit_inline(node)
def depart_subscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_caption(self, node):
self.out.append( '\\caption{' )
def depart_caption(self, node):
self.out.append('}\n')
def visit_title_reference(self, node):
self.fallbacks['titlereference'] = PreambleCmds.titlereference
self.out.append(r'\DUroletitlereference{')
if node['classes']:
self.visit_inline(node)
def depart_title_reference(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '}' )
def visit_citation(self, node):
# TODO maybe use cite bibitems
if self._use_latex_citations:
self.push_output_collector([])
else:
# TODO: do we need these?
## self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append(r'\begin{figure}[b]')
self.append_hypertargets(node)
def depart_citation(self, node):
if self._use_latex_citations:
label = self.out[0]
text = ''.join(self.out[1:])
self._bibitems.append([label, text])
self.pop_output_collector()
else:
self.out.append('\\end{figure}\n')
def visit_citation_reference(self, node):
if self._use_latex_citations:
if not self.inside_citation_reference_label:
self.out.append(r'\cite{')
self.inside_citation_reference_label = 1
else:
assert self.body[-1] in (' ', '\n'),\
'unexpected non-whitespace while in reference label'
del self.body[-1]
else:
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
self.out.append('[\\hyperlink{%s}{' % href)
def depart_citation_reference(self, node):
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
next_siblings = node.traverse(descend=0, siblings=1,
include_self=0)
if len(next_siblings) > 1:
next = next_siblings[0]
if (isinstance(next, nodes.Text) and
next.astext() in (' ', '\n')):
if next_siblings[1].__class__ == node.__class__:
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append('}]')
def visit_classifier(self, node):
self.out.append( '(\\textbf{' )
def depart_classifier(self, node):
self.out.append( '})\n' )
def visit_colspec(self, node):
self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
def visit_comment(self, node):
# Precede every line with a comment sign, wrap in newlines
self.out.append('\n%% %s\n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
def depart_comment(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
# header and footer
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
self.out.append('\n')
def visit_definition_list(self, node):
self.out.append( '%\n\\begin{description}\n' )
def depart_definition_list(self, node):
self.out.append( '\\end{description}\n' )
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.out.append(' ')
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.push_output_collector(self.docinfo)
def depart_docinfo(self, node):
self.pop_output_collector()
# Some itmes (e.g. author) end up at other places
if self.docinfo:
# tabularx: automatic width of columns, no page breaks allowed.
self.requirements['tabularx'] = r'\usepackage{tabularx}'
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['docinfo'] = PreambleCmds.docinfo
#
self.docinfo.insert(0, '\n% Docinfo\n'
'\\begin{center}\n'
'\\begin{tabularx}{\\DUdocinfowidth}{lX}\n')
self.docinfo.append('\\end{tabularx}\n'
'\\end{center}\n')
def visit_docinfo_item(self, node, name):
if name == 'author':
self.pdfauthor.append(self.attval(node.astext()))
if self.use_latex_docinfo:
if name in ('author', 'organization', 'contact', 'address'):
# We attach these to the last author. If any of them precedes
# the first author, put them in a separate "author" group
# (in lack of better semantics).
if name == 'author' or not self.author_stack:
self.author_stack.append([])
if name == 'address': # newlines are meaningful
self.insert_newline = 1
text = self.encode(node.astext())
self.insert_newline = False
else:
text = self.attval(node.astext())
self.author_stack[-1].append(text)
raise nodes.SkipNode
elif name == 'date':
self.date.append(self.attval(node.astext()))
raise nodes.SkipNode
self.out.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
self.insert_newline = 1
self.out.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
def depart_docinfo_item(self, node):
self.out.append(self.context.pop())
# for address we did set insert_newline
self.insert_newline = False
def visit_doctest_block(self, node):
self.visit_literal_block(node)
def depart_doctest_block(self, node):
self.depart_literal_block(node)
def visit_document(self, node):
# titled document?
if (self.use_latex_docinfo or len(node) and
isinstance(node[0], nodes.title)):
self.title_labels += self.ids_to_labels(node)
def depart_document(self, node):
# Complete header with information gained from walkabout
# a) conditional requirements (before style sheet)
self.requirements = self.requirements.sortedvalues()
# b) coditional fallback definitions (after style sheet)
self.fallbacks = self.fallbacks.sortedvalues()
# c) PDF properties
self.pdfsetup.append(PreambleCmds.linking % (self.colorlinks,
self.hyperlink_color,
self.hyperlink_color))
if self.pdfauthor:
authors = self.author_separator.join(self.pdfauthor)
self.pdfinfo.append(' pdfauthor={%s}' % authors)
if self.pdfinfo:
self.pdfsetup += [r'\hypersetup{'] + self.pdfinfo + ['}']
# Complete body
# a) document title (part 'body_prefix'):
# NOTE: Docutils puts author/date into docinfo, so normally
# we do not want LaTeX author/date handling (via \maketitle).
# To deactivate it, we add \title, \author, \date,
# even if the arguments are empty strings.
if self.title or self.author_stack or self.date:
authors = ['\\\\\n'.join(author_entry)
for author_entry in self.author_stack]
title = [''.join(self.title)] + self.title_labels
if self.subtitle:
title += [r'\\ % subtitle',
r'\large{%s}' % ''.join(self.subtitle)
] + self.subtitle_labels
self.body_pre_docinfo.append(PreambleCmds.documenttitle % (
'%\n '.join(title),
' \\and\n'.join(authors),
', '.join(self.date)))
# b) bibliography
# TODO insertion point of bibliography should be configurable.
if self._use_latex_citations and len(self._bibitems)>0:
if not self.bibtex:
widest_label = ''
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.out.append('\n\\begin{thebibliography}{%s}\n' %
widest_label)
for bi in self._bibitems:
# cite_key: underscores must not be escaped
cite_key = bi[0].replace(r'\_','_')
self.out.append('\\bibitem[%s]{%s}{%s}\n' %
(bi[0], cite_key, bi[1]))
self.out.append('\\end{thebibliography}\n')
else:
self.out.append('\n\\bibliographystyle{%s}\n' %
self.bibtex[0])
self.out.append('\\bibliography{%s}\n' % self.bibtex[1])
# c) make sure to generate a toc file if needed for local contents:
if 'minitoc' in self.requirements and not self.has_latex_toc:
self.out.append('\n\\faketableofcontents % for local ToCs\n')
def visit_emphasis(self, node):
self.out.append('\\emph{')
if node['classes']:
self.visit_inline(node)
def depart_emphasis(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_entry(self, node):
self.active_table.visit_entry()
# cell separation
# BUG: the following fails, with more than one multirow
# starting in the second column (or later) see
# ../../../test/functional/input/data/latex.txt
if self.active_table.get_entry_number() == 1:
# if the first row is a multirow, this actually is the second row.
# this gets hairy if rowspans follow each other.
if self.active_table.get_rowspan(0):
count = 0
while self.active_table.get_rowspan(count):
count += 1
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
else:
self.out.append(' & ')
# multirow, multicolumn
# IN WORK BUG TODO HACK continues here
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if 'morerows' in node and 'morecols' in node:
raise NotImplementedError('Cells that '
'span multiple rows *and* columns are not supported, sorry.')
if 'morerows' in node:
self.requirements['multirow'] = r'\usepackage{multirow}'
count = node['morerows'] + 1
self.active_table.set_rowspan(
self.active_table.get_entry_number()-1,count)
self.out.append('\\multirow{%d}{%s}{%%' %
(count,self.active_table.get_column_width()))
self.context.append('}')
elif 'morecols' in node:
# the vertical bar before column is missing if it is the first
# column. the one after always.
if self.active_table.get_entry_number() == 1:
bar1 = self.active_table.get_vertical_bar()
else:
bar1 = ''
count = node['morecols'] + 1
self.out.append('\\multicolumn{%d}{%sl%s}{' %
(count, bar1, self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
# header / not header
if isinstance(node.parent.parent, nodes.thead):
self.out.append('\\textbf{%')
self.context.append('}')
elif self.active_table.is_stub_column():
self.out.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
def depart_entry(self, node):
self.out.append(self.context.pop()) # header / not header
self.out.append(self.context.pop()) # multirow/column
# if following row is spanned from above.
if self.active_table.get_rowspan(self.active_table.get_entry_number()):
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.out.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node):
# We create our own enumeration list environment.
# This allows to set the style and starting value
# and unlimited nesting.
enum_style = {'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman' }
enum_suffix = ''
if 'suffix' in node:
enum_suffix = node['suffix']
enum_prefix = ''
if 'prefix' in node:
enum_prefix = node['prefix']
if self.compound_enumerators:
pref = ''
if self.section_prefix_for_enumerators and self.section_level:
for i in range(self.section_level):
pref += '%d.' % self._section_number[i]
pref = pref[:-1] + self.section_enumerator_separator
enum_prefix += pref
for ctype, cname in self._enumeration_counters:
enum_prefix += '\\%s{%s}.' % (ctype, cname)
enum_type = 'arabic'
if 'enumtype' in node:
enum_type = node['enumtype']
if enum_type in enum_style:
enum_type = enum_style[enum_type]
counter_name = 'listcnt%d' % len(self._enumeration_counters)
self._enumeration_counters.append((enum_type, counter_name))
# If we haven't used this counter name before, then create a
# new counter; otherwise, reset & reuse the old counter.
if len(self._enumeration_counters) > self._max_enumeration_counters:
self._max_enumeration_counters = len(self._enumeration_counters)
self.out.append('\\newcounter{%s}\n' % counter_name)
else:
self.out.append('\\setcounter{%s}{0}\n' % counter_name)
self.out.append('\\begin{list}{%s\\%s{%s}%s}\n' %
(enum_prefix,enum_type,counter_name,enum_suffix))
self.out.append('{\n')
self.out.append('\\usecounter{%s}\n' % counter_name)
# set start after usecounter, because it initializes to zero.
if 'start' in node:
self.out.append('\\addtocounter{%s}{%d}\n' %
(counter_name,node['start']-1))
## set rightmargin equal to leftmargin
self.out.append('\\setlength{\\rightmargin}{\\leftmargin}\n')
self.out.append('}\n')
def depart_enumerated_list(self, node):
self.out.append('\\end{list}\n')
self._enumeration_counters.pop()
def visit_field(self, node):
# real output is done in siblings: _argument, _body, _name
pass
def depart_field(self, node):
self.out.append('\n')
##self.out.append('%[depart_field]\n')
def visit_field_argument(self, node):
self.out.append('%[visit_field_argument]\n')
def depart_field_argument(self, node):
self.out.append('%[depart_field_argument]\n')
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
if self.out is self.docinfo:
self.out.append(r'\\')
def visit_field_list(self, node):
if self.out is not self.docinfo:
self.fallbacks['fieldlist'] = PreambleCmds.fieldlist
self.out.append('%\n\\begin{DUfieldlist}\n')
def depart_field_list(self, node):
if self.out is not self.docinfo:
self.out.append('\\end{DUfieldlist}\n')
def visit_field_name(self, node):
if self.out is self.docinfo:
self.out.append('\\textbf{')
else:
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_field_name(self, node):
if self.out is self.docinfo:
self.out.append('}: &')
else:
self.out.append(':}]')
def visit_figure(self, node):
self.requirements['float_settings'] = PreambleCmds.float_settings
# ! the 'align' attribute should set "outer alignment" !
# For "inner alignment" use LaTeX default alignment (similar to HTML)
## if ('align' not in node.attributes or
## node.attributes['align'] == 'center'):
## align = '\n\\centering'
## align_end = ''
## else:
## # TODO non vertical space for other alignments.
## align = '\\begin{flush%s}' % node.attributes['align']
## align_end = '\\end{flush%s}' % node.attributes['align']
## self.out.append( '\\begin{figure}%s\n' % align )
## self.context.append( '%s\\end{figure}\n' % align_end )
self.out.append('\\begin{figure}')
if node.get('ids'):
self.out += ['\n'] + self.ids_to_labels(node)
def depart_figure(self, node):
self.out.append('\\end{figure}\n')
def visit_footer(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUfooter}{')
def depart_footer(self, node):
self.out.append('}')
self.requirements['~footer'] = ''.join(self.out)
self.pop_output_collector()
def visit_footnote(self, node):
try:
backref = node['backrefs'][0]
except IndexError:
backref = node['ids'][0] # no backref, use self-ref instead
if self.settings.figure_footnotes:
self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append('\\begin{figure}[b]')
self.append_hypertargets(node)
if node.get('id') == node.get('name'): # explicite label
self.out += self.ids_to_labels(node)
elif self.docutils_footnotes:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
num,text = node.astext().split(None,1)
if self.settings.footnote_references == 'brackets':
num = '[%s]' % num
self.out.append('%%\n\\DUfootnotetext{%s}{%s}{%s}{' %
(node['ids'][0], backref, self.encode(num)))
if node['ids'] == node['names']:
self.out += self.ids_to_labels(node)
# mask newline to prevent spurious whitespace:
self.out.append('%')
## else: # TODO: "real" LaTeX \footnote{}s
def depart_footnote(self, node):
if self.figure_footnotes:
self.out.append('\\end{figure}\n')
else:
self.out.append('}\n')
def visit_footnote_reference(self, node):
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
# if not self.docutils_footnotes:
# TODO: insert footnote content at (or near) this place
# print "footnote-ref to", node['refid']
# footnotes = (self.document.footnotes +
# self.document.autofootnotes +
# self.document.symbol_footnotes)
# for footnote in footnotes:
# # print footnote['ids']
# if node.get('refid', '') in footnote['ids']:
# print 'matches', footnote['ids']
format = self.settings.footnote_references
if format == 'brackets':
self.append_hypertargets(node)
self.out.append('\\hyperlink{%s}{[' % href)
self.context.append(']}')
else:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
self.out.append(r'\DUfootnotemark{%s}{%s}{' %
(node['ids'][0], href))
self.context.append('}')
def depart_footnote_reference(self, node):
self.out.append(self.context.pop())
# footnote/citation label
def label_delim(self, node, bracket, superscript):
if isinstance(node.parent, nodes.footnote):
if not self.figure_footnotes:
raise nodes.SkipNode
if self.settings.footnote_references == 'brackets':
self.out.append(bracket)
else:
self.out.append(superscript)
else:
assert isinstance(node.parent, nodes.citation)
if not self._use_latex_citations:
self.out.append(bracket)
def visit_label(self, node):
"""footnote or citation label: in brackets or as superscript"""
self.label_delim(node, '[', '\\textsuperscript{')
def depart_label(self, node):
self.label_delim(node, ']', '}')
# elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUheader}{')
def depart_header(self, node):
self.out.append('}')
self.requirements['~header'] = ''.join(self.out)
self.pop_output_collector()
def to_latex_length(self, length_str):
"""Convert string with rst lenght to LaTeX"""
match = re.match('(\d*\.?\d*)\s*(\S*)', length_str)
if not match:
return length_str
value, unit = match.groups()[:2]
# no unit or "DTP" points (called 'bp' in TeX):
if unit in ('', 'pt'):
length_str = '%sbp' % value
# percentage: relate to current line width
elif unit == '%':
length_str = '%.3f\\linewidth' % (float(value)/100.0)
return length_str
def visit_image(self, node):
self.requirements['graphicx'] = self.graphicx_package
attrs = node.attributes
# Add image URI to dependency list, assuming that it's
# referring to a local file.
self.settings.record_dependencies.add(attrs['uri'])
# alignment defaults:
if not 'align' in attrs:
# Set default align of image in a figure to 'center'
if isinstance(node.parent, nodes.figure):
attrs['align'] = 'center'
# query 'align-*' class argument
for cls in node['classes']:
if cls.startswith('align-'):
attrs['align'] = cls.split('-')[1]
# pre- and postfix (prefix inserted in reverse order)
pre = []
post = []
include_graphics_options = []
display_style = ('block-', 'inline-')[self.is_inline(node)]
align_codes = {
# inline images: by default latex aligns the bottom.
'bottom': ('', ''),
'middle': (r'\raisebox{-0.5\height}{', '}'),
'top': (r'\raisebox{-\height}{', '}'),
# block level images:
'center': (r'\noindent\makebox[\textwidth][c]{', '}'),
'left': (r'\noindent{', r'\hfill}'),
'right': (r'\noindent{\hfill', '}'),}
if 'align' in attrs:
try:
align_code = align_codes[attrs['align']]
pre.append(align_code[0])
post.append(align_code[1])
except KeyError:
pass # TODO: warn?
if 'height' in attrs:
include_graphics_options.append('height=%s' %
self.to_latex_length(attrs['height']))
if 'scale' in attrs:
include_graphics_options.append('scale=%f' %
(attrs['scale'] / 100.0))
if 'width' in attrs:
include_graphics_options.append('width=%s' %
self.to_latex_length(attrs['width']))
if not self.is_inline(node):
pre.append('\n')
post.append('\n')
pre.reverse()
self.out.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % (','.join(include_graphics_options))
self.out.append('\\includegraphics%s{%s}' % (options, attrs['uri']))
self.out.extend(post)
def depart_image(self, node):
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def visit_interpreted(self, node):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self.visit_literal(node)
def depart_interpreted(self, node):
self.depart_literal(node)
def visit_legend(self, node):
self.fallbacks['legend'] = PreambleCmds.legend
self.out.append('\\begin{DUlegend}')
def depart_legend(self, node):
self.out.append('\\end{DUlegend}\n')
def visit_line(self, node):
self.out.append('\item[] ')
def depart_line(self, node):
self.out.append('\n')
def visit_line_block(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['lineblock'] = PreambleCmds.lineblock
if isinstance(node.parent, nodes.line_block):
self.out.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
else:
self.out.append('\n\\begin{DUlineblock}{0em}\n')
def depart_line_block(self, node):
self.out.append('\\end{DUlineblock}\n')
def visit_list_item(self, node):
self.out.append('\n\\item ')
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.literal = True
self.out.append('\\texttt{')
if node['classes']:
self.visit_inline(node)
def depart_literal(self, node):
self.literal = False
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Literal blocks are used for '::'-prefixed literal-indented
# blocks of text, where the inline markup is not recognized,
# but are also the product of the "parsed-literal" directive,
# where the markup is respected.
#
# In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all
# the others we must use \mbox or \alltt.
#
# We can distinguish between the two kinds by the number of
# siblings that compose this node: if it is composed by a
# single element, it's either
# * a real one,
# * a parsed-literal that does not contain any markup, or
# * a parsed-literal containing just one markup construct.
def is_plaintext(self, node):
"""Check whether a node can be typeset verbatim"""
return (len(node) == 1) and isinstance(node[0], nodes.Text)
def visit_literal_block(self, node):
"""Render a literal block."""
# environments and packages to typeset literal blocks
packages = {'listing': r'\usepackage{moreverb}',
'lstlisting': r'\usepackage{listings}',
'Verbatim': r'\usepackage{fancyvrb}',
# 'verbatim': '',
'verbatimtab': r'\usepackage{moreverb}'}
if not self.active_table.is_open():
# no quote inside tables, to avoid vertical space between
# table border and literal block.
# BUG: fails if normal text preceeds the literal block.
self.out.append('%\n\\begin{quote}')
self.context.append('\n\\end{quote}\n')
else:
self.out.append('\n')
self.context.append('\n')
if self.literal_block_env != '' and self.is_plaintext(node):
self.requirements['literal_block'] = packages.get(
self.literal_block_env, '')
self.verbatim = True
self.out.append('\\begin{%s}%s\n' % (self.literal_block_env,
self.literal_block_options))
else:
self.literal = True
self.insert_newline = True
self.insert_non_breaking_blanks = True
self.out.append('{\\ttfamily \\raggedright \\noindent\n')
def depart_literal_block(self, node):
if self.verbatim:
self.out.append('\n\\end{%s}\n' % self.literal_block_env)
self.verbatim = False
else:
self.out.append('\n}')
self.insert_non_breaking_blanks = False
self.insert_newline = False
self.literal = False
self.out.append(self.context.pop())
## def visit_meta(self, node):
## self.out.append('[visit_meta]\n')
# TODO: set keywords for pdf?
# But:
# The reStructuredText "meta" directive creates a "pending" node,
# which contains knowledge that the embedded "meta" node can only
# be handled by HTML-compatible writers. The "pending" node is
# resolved by the docutils.transforms.components.Filter transform,
# which checks that the calling writer supports HTML; if it doesn't,
# the "pending" node (and enclosed "meta" node) is removed from the
# document.
# --- docutils/docs/peps/pep-0258.html#transformer
## def depart_meta(self, node):
## self.out.append('[depart_meta]\n')
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.out.append(', ')
def depart_option(self, node):
# flag tha the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""Append the delimiter betweeen an option and its argument to body."""
self.out.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
self.out.append('\n\\item[')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
self.out.append('] ')
def visit_option_list(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['optionlist'] = PreambleCmds.optionlist
self.out.append('%\n\\begin{DUoptionlist}\n')
def depart_option_list(self, node):
self.out.append('\n\\end{DUoptionlist}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_string(self, node):
##self.out.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.out.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
# no newline if the paragraph is first in a list item
if ((isinstance(node.parent, nodes.list_item) or
isinstance(node.parent, nodes.description)) and
node is node.parent[0]):
return
index = node.parent.index(node)
if (isinstance(node.parent, nodes.compound) and
index > 0 and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
return
self.out.append('\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def depart_paragraph(self, node):
self.out.append('\n')
def visit_problematic(self, node):
self.requirements['color'] = PreambleCmds.color
self.out.append('%\n')
self.append_hypertargets(node)
self.out.append(r'\hyperlink{%s}{\textbf{\color{red}' % node['refid'])
def depart_problematic(self, node):
self.out.append('}}')
def visit_raw(self, node):
if not 'latex' in node.get('format', '').split():
raise nodes.SkipNode
if node['classes']:
self.visit_inline(node)
# append "as-is" skipping any LaTeX-encoding
self.verbatim = True
def depart_raw(self, node):
self.verbatim = False
if node['classes']:
self.depart_inline(node)
def has_unbalanced_braces(self, string):
"""Test whether there are unmatched '{' or '}' characters."""
level = 0
for ch in string:
if ch == '{':
level += 1
if ch == '}':
level -= 1
if level < 0:
return True
return level != 0
def visit_reference(self, node):
# We need to escape #, \, and % if we use the URL in a command.
special_chars = {ord('#'): ur'\#',
ord('%'): ur'\%',
ord('\\'): ur'\\',
}
# external reference (URL)
if 'refuri' in node:
href = unicode(node['refuri']).translate(special_chars)
# problematic chars double caret and unbalanced braces:
if href.find('^^') != -1 or self.has_unbalanced_braces(href):
self.error(
'External link "%s" not supported by LaTeX.\n'
' (Must not contain "^^" or unbalanced braces.)' % href)
if node['refuri'] == node.astext():
self.out.append(r'\url{%s}' % href)
raise nodes.SkipNode
self.out.append(r'\href{%s}{' % href)
return
# internal reference
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
else:
raise AssertionError('Unknown reference.')
if not self.is_inline(node):
self.out.append('\n')
self.out.append('\\hyperref[%s]{' % href)
if self._reference_label:
self.out.append('\\%s{%s}}' %
(self._reference_label, href.replace('#', '')))
raise nodes.SkipNode
def depart_reference(self, node):
self.out.append('}')
if not self.is_inline(node):
self.out.append('\n')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_section(self, node):
self.section_level += 1
# Initialize counter for potential subsections:
self._section_number.append(0)
# Counter for this section's level (initialized by parent section):
self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
# Remove counter for potential subsections:
self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['sidebar'] = PreambleCmds.sidebar
self.out.append('\n\\DUsidebar{\n')
def depart_sidebar(self, node):
self.out.append('}\n')
attribution_formats = {'dash': ('---', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.out.append('\n\\begin{flushright}\n')
self.out.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.out.append(self.context.pop() + '\n')
self.out.append('\\end{flushright}\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.out.append('\\textbf{')
if node['classes']:
self.visit_inline(node)
def depart_strong(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.push_output_collector(self.subtitle)
self.subtitle_labels += self.ids_to_labels(node, set_anchor=False)
# section subtitle: "starred" (no number, not in ToC)
elif isinstance(node.parent, nodes.section):
self.out.append(r'\%s*{' %
self.d_class.section(self.section_level + 1))
else:
self.fallbacks['subtitle'] = PreambleCmds.subtitle
self.out.append('\n\\DUsubtitle[%s]{' % node.parent.tagname)
def depart_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.pop_output_collector()
else:
self.out.append('}\n')
def visit_system_message(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['title'] = PreambleCmds.title
node['classes'] = ['system-message']
self.visit_admonition(node)
self.out.append('\\DUtitle[system-message]{system-message}\n')
self.append_hypertargets(node)
try:
line = ', line~%s' % node['line']
except KeyError:
line = ''
self.out.append('\n\n{\color{red}%s/%s} in \\texttt{%s}%s\n' %
(node['type'], node['level'],
self.encode(node['source']), line))
if len(node['backrefs']) == 1:
self.out.append('\n\\hyperlink{%s}{' % node['backrefs'][0])
self.context.append('}')
else:
backrefs = ['\\hyperlink{%s}{%d}' % (href, i+1)
for (i, href) in enumerate(node['backrefs'])]
self.context.append('backrefs: ' + ' '.join(backrefs))
def depart_system_message(self, node):
self.out.append(self.context.pop())
self.depart_admonition()
def visit_table(self, node):
self.requirements['table'] = PreambleCmds.table
if self.active_table.is_open():
self.table_stack.append(self.active_table)
# nesting longtable does not work (e.g. 2007-04-18)
self.active_table = Table(self,'tabular',self.settings.table_style)
self.active_table.open()
for cls in node['classes']:
self.active_table.set_table_style(cls)
if self.active_table._table_style == 'booktabs':
self.requirements['booktabs'] = r'\usepackage{booktabs}'
self.out.append('\n' + self.active_table.get_opening())
def depart_table(self, node):
self.out.append(self.active_table.get_closing() + '\n')
self.active_table.close()
if len(self.table_stack)>0:
self.active_table = self.table_stack.pop()
else:
self.active_table.set_table_style(self.settings.table_style)
# Insert hyperlabel after (long)table, as
# other places (beginning, caption) result in LaTeX errors.
if node.get('ids'):
self.out += self.ids_to_labels(node, set_anchor=False) + ['\n']
def visit_target(self, node):
# Skip indirect targets:
if ('refuri' in node # external hyperlink
or 'refid' in node # resolved internal link
or 'refname' in node): # unresolved internal link
## self.out.append('%% %s\n' % node) # for debugging
return
self.out.append('%\n')
# do we need an anchor (\phantomsection)?
set_anchor = not(isinstance(node.parent, nodes.caption) or
isinstance(node.parent, nodes.title))
# TODO: where else can/must we omit the \phantomsection?
self.out += self.ids_to_labels(node, set_anchor)
def depart_target(self, node):
pass
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if not self.active_table.get('preamble written'):
self.visit_thead(None)
self.depart_thead(None)
def depart_tbody(self, node):
pass
def visit_term(self, node):
"""definition list term"""
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_term(self, node):
# \leavevmode results in a line break if the
# term is followed by an item list.
self.out.append('}] \leavevmode ')
def visit_tgroup(self, node):
#self.out.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def depart_tgroup(self, node):
pass
_thead_depth = 0
def thead_depth (self):
return self._thead_depth
def visit_thead(self, node):
self._thead_depth += 1
if 1 == self.thead_depth():
self.out.append('{%s}\n' % self.active_table.get_colspecs())
self.active_table.set('preamble written',1)
self.out.append(self.active_table.get_caption())
self.out.extend(self.active_table.visit_thead())
def depart_thead(self, node):
if node is not None:
self.out.extend(self.active_table.depart_thead())
if self.active_table.need_recurse():
node.walkabout(self)
self._thead_depth -= 1
def bookmark(self, node):
"""Return label and pdfbookmark string for titles."""
result = ['']
if self.settings.sectnum_xform: # "starred" section cmd
# add to the toc and pdfbookmarks
section_name = self.d_class.section(max(self.section_level, 1))
section_title = self.encode(node.astext())
result.append(r'\phantomsection')
result.append(r'\addcontentsline{toc}{%s}{%s}' %
(section_name, section_title))
result += self.ids_to_labels(node.parent, set_anchor=False)
return '%\n '.join(result) + '%\n'
def visit_title(self, node):
"""Append section and other titles."""
# Document title
if node.parent.tagname == 'document':
self.push_output_collector(self.title)
self.context.append('')
self.pdfinfo.append(' pdftitle={%s},' %
self.encode(node.astext()))
# Topic titles (topic, admonition, sidebar)
elif (isinstance(node.parent, nodes.topic) or
isinstance(node.parent, nodes.admonition) or
isinstance(node.parent, nodes.sidebar)):
self.fallbacks['title'] = PreambleCmds.title
classes = ','.join(node.parent['classes'])
if not classes:
classes = node.tagname
self.out.append('\\DUtitle[%s]{' % classes)
self.context.append('}\n')
# Table caption
elif isinstance(node.parent, nodes.table):
self.push_output_collector(self.active_table.caption)
self.context.append('')
# Section title
else:
self.out.append('\n\n')
self.out.append('%' + '_' * 75)
self.out.append('\n\n')
#
section_name = self.d_class.section(self.section_level)
# number sections?
if (self.settings.sectnum_xform # numbering by Docutils
or (self.section_level > len(self.d_class.sections))):
section_star = '*'
else: # LaTeX numbered sections
section_star = ''
self.out.append(r'\%s%s{' % (section_name, section_star))
# System messages heading in red:
if ('system-messages' in node.parent['classes']):
self.requirements['color'] = PreambleCmds.color
self.out.append('\color{red}')
# label and ToC entry:
self.context.append(self.bookmark(node) + '}\n')
# MAYBE postfix paragraph and subparagraph with \leavemode to
# ensure floats stay in the section and text starts on a new line.
def depart_title(self, node):
self.out.append(self.context.pop())
if (isinstance(node.parent, nodes.table) or
node.parent.tagname == 'document'):
self.pop_output_collector()
def minitoc(self, node, title, depth):
"""Generate a local table of contents with LaTeX package minitoc"""
section_name = self.d_class.section(self.section_level)
# name-prefix for current section level
minitoc_names = {'part': 'part', 'chapter': 'mini'}
if 'chapter' not in self.d_class.sections:
minitoc_names['section'] = 'sect'
try:
minitoc_name = minitoc_names[section_name]
except KeyError: # minitoc only supports part- and toplevel
self.warn('Skipping local ToC at %s level.\n' % section_name +
' Feature not supported with option "use-latex-toc"',
base_node=node)
return
# Requirements/Setup
self.requirements['minitoc'] = PreambleCmds.minitoc
self.requirements['minitoc-'+minitoc_name] = (r'\do%stoc' %
minitoc_name)
# depth: (Docutils defaults to unlimited depth)
maxdepth = len(self.d_class.sections)
self.requirements['minitoc-%s-depth' % minitoc_name] = (
r'\mtcsetdepth{%stoc}{%d}' % (minitoc_name, maxdepth))
# Process 'depth' argument (!Docutils stores a relative depth while
# minitoc expects an absolute depth!):
offset = {'sect': 1, 'mini': 0, 'part': 0}
if 'chapter' in self.d_class.sections:
offset['part'] = -1
if depth:
self.out.append('\\setcounter{%stocdepth}{%d}' %
(minitoc_name, depth + offset[minitoc_name]))
# title:
self.out.append('\\mtcsettitle{%stoc}{%s}\n' % (minitoc_name, title))
# the toc-generating command:
self.out.append('\\%stoc\n' % minitoc_name)
def visit_topic(self, node):
# Topic nodes can be generic topic, abstract, dedication, or ToC.
# table of contents:
if 'contents' in node['classes']:
self.out.append('\n')
self.out += self.ids_to_labels(node)
# add contents to PDF bookmarks sidebar
if isinstance(node.next_node(), nodes.title):
self.out.append('\n\\pdfbookmark[%d]{%s}{%s}\n' %
(self.section_level+1,
node.next_node().astext(),
node.get('ids', ['contents'])[0]
))
if self.use_latex_toc:
title = ''
if isinstance(node.next_node(), nodes.title):
title = self.encode(node.pop(0).astext())
depth = node.get('depth', 0)
if 'local' in node['classes']:
self.minitoc(node, title, depth)
self.context.append('')
return
if depth:
self.out.append('\\setcounter{tocdepth}{%d}\n' % depth)
if title != 'Contents':
self.out.append('\\renewcommand{\\contentsname}{%s}\n' %
title)
self.out.append('\\tableofcontents\n\n')
self.has_latex_toc = True
else: # Docutils generated contents list
# set flag for visit_bullet_list() and visit_title()
self.is_toc_list = True
self.context.append('')
elif ('abstract' in node['classes'] and
self.settings.use_latex_abstract):
self.push_output_collector(self.abstract)
self.out.append('\\begin{abstract}')
self.context.append('\\end{abstract}\n')
if isinstance(node.next_node(), nodes.title):
node.pop(0) # LaTeX provides its own title
else:
self.fallbacks['topic'] = PreambleCmds.topic
# special topics:
if 'abstract' in node['classes']:
self.fallbacks['abstract'] = PreambleCmds.abstract
self.push_output_collector(self.abstract)
if 'dedication' in node['classes']:
self.fallbacks['dedication'] = PreambleCmds.dedication
self.push_output_collector(self.dedication)
self.out.append('\n\\DUtopic[%s]{\n' % ','.join(node['classes']))
self.context.append('}\n')
def depart_topic(self, node):
self.out.append(self.context.pop())
self.is_toc_list = False
if ('abstract' in node['classes'] or
'dedication' in node['classes']):
self.pop_output_collector()
def visit_inline(self, node): # <span>, i.e. custom roles
# insert fallback definition
self.fallbacks['inline'] = PreambleCmds.inline
self.out += [r'\DUrole{%s}{' % cls for cls in node['classes']]
self.context.append('}' * (len(node['classes'])))
def depart_inline(self, node):
self.out.append(self.context.pop())
def visit_rubric(self, node):
self.fallbacks['rubric'] = PreambleCmds.rubric
self.out.append('\n\\DUrubric{')
self.context.append('}\n')
def depart_rubric(self, node):
self.out.append(self.context.pop())
def visit_transition(self, node):
self.fallbacks['transition'] = PreambleCmds.transition
self.out.append('\n\n')
self.out.append('%' + '_' * 75 + '\n')
self.out.append(r'\DUtransition')
self.out.append('\n\n')
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s' %
node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
| bancek/egradebook | src/lib/docutils/writers/latex2e/__init__.py | Python | gpl-3.0 | 111,472 | [
"VisIt"
] | 430ff96b79be70b4e0c86abebfff48c47912f2f6c11866ee29a41f45873707aa |
#!/usr/bin/env python
"""
Populates the database with the current installations of components
This script assumes that the InstalledComponentsDB, the
ComponentMonitoring service and the Notification service are installed and running
"""
__RCSID__ = "$Id$"
import sys
from datetime import datetime
from DIRAC import exit as DIRACexit
from DIRAC import S_OK, gLogger, gConfig
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import getSetup
from DIRAC.Core.Base import Script
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.FrameworkSystem.Client.SystemAdministratorIntegrator \
import SystemAdministratorIntegrator
from DIRAC.FrameworkSystem.Client.ComponentMonitoringClient \
import ComponentMonitoringClient
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
global excludedHosts
excludedHosts = []
def setExcludedHosts( value ):
global excludedHosts
excludedHosts = value.split( ',' )
return S_OK()
Script.registerSwitch( "e:", "exclude=", "Comma separated list of hosts to be excluded from the scanning process", setExcludedHosts )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... [debug]' % Script.scriptName ] ) )
Script.parseCommandLine( ignoreErrors = False )
args = Script.getPositionalArgs()
componentType = ''
# Get my setup
mySetup = gConfig.getValue( 'DIRAC/Setup' )
# Retrieve information from all the hosts
client = SystemAdministratorIntegrator( exclude = excludedHosts )
resultAll = client.getOverallStatus()
# Retrieve user installing the component
result = getProxyInfo()
if result[ 'OK' ]:
user = result[ 'Value' ][ 'username' ]
else:
DIRACexit( -1 )
if not user:
user = 'unknown'
notificationClient = NotificationClient()
for host in resultAll[ 'Value' ]:
if not resultAll[ 'Value' ][ host ][ 'OK' ]:
# If the host cannot be contacted, exclude it and send message
excludedHosts.append( host )
result = notificationClient.sendMail( Operations().getValue( 'EMail/Production', [] ), 'Unreachable host', '\ndirac-populate-component-db: Could not fill the database with the components from unreachable host %s\n' % host )
if not result[ 'OK' ]:
gLogger.error( 'Can not send unreachable host notification mail: %s' % result[ 'Message' ] )
if not resultAll[ 'OK' ]:
gLogger.error( resultAll[ 'Message' ] )
DIRACexit( -1 )
resultHosts = client.getHostInfo()
if not resultHosts[ 'OK' ]:
gLogger.error( resultHosts[ 'Message' ] )
DIRACexit( -1 )
resultInfo = client.getInfo()
if not resultInfo[ 'OK' ]:
gLogger.error( resultInfo[ 'Message' ] )
DIRACexit( -1 )
resultMySQL = client.getMySQLStatus()
if not resultMySQL[ 'OK' ]:
gLogger.error( resultMySQL[ 'Message' ] )
DIRACexit( -1 )
resultAllDB = client.getDatabases()
if not resultAllDB[ 'OK' ]:
gLogger.error( resultAllDB[ 'Message' ] )
DIRACexit( -1 )
resultAvailableDB = client.getAvailableDatabases()
if not resultAvailableDB[ 'OK' ]:
gLogger.error( resultAvailableDB[ 'Message' ] )
DIRACexit( -1 )
records = []
finalSet = list( set( resultAll[ 'Value' ] ) - set( excludedHosts ) )
for host in finalSet:
hasMySQL = True
result = resultAll[ 'Value' ][ host ]
hostResult = resultHosts[ 'Value' ][ host ]
infoResult = resultInfo[ 'Value' ][ host ]
mySQLResult = resultMySQL[ 'Value' ][ host ]
allDBResult = resultAllDB[ 'Value' ][ host ]
availableDBResult = resultAvailableDB[ 'Value' ][ host ]
if not result[ 'OK' ]:
gLogger.error( 'Host %s: %s' % ( host, result[ 'Message' ] ) )
continue
if not hostResult[ 'OK' ]:
gLogger.error( 'Host %s: %s' % ( host, hostResult[ 'Message' ] ) )
continue
if not infoResult[ 'OK' ]:
gLogger.error( 'Host %s: %s' % ( host, infoResult[ 'Message' ] ) )
continue
if mySQLResult[ 'OK' ]:
if not allDBResult[ 'OK' ]:
gLogger.error( 'Host %s: %s' % ( host, allDBResult[ 'Message' ] ) )
continue
if not availableDBResult[ 'OK' ]:
gLogger.error( 'Host %s: %s' % ( host, availableDBResult[ 'Message' ] ) )
continue
else:
hasMySQL = False
setup = infoResult[ 'Value' ][ 'Setup' ]
if setup != mySetup:
continue
cpu = hostResult[ 'Value' ][ 'CPUModel' ].strip()
rDict = result[ 'Value' ]
# Components other than databases
for compType in rDict:
if componentType and componentType != compType:
continue
for system in rDict[ compType ]:
components = rDict[ compType ][ system ].keys()
components.sort()
for component in components:
record = { 'Installation': {}, 'Component': {}, 'Host': {} }
if rDict[ compType ][ system ][ component ][ 'Installed' ] and \
component != 'ComponentMonitoring':
runitStatus = \
str( rDict[ compType ][ system ][ component ][ 'RunitStatus' ] )
if runitStatus != 'Unknown':
module = \
str( rDict[ compType ][ system ][ component ][ 'Module' ] )
record[ 'Component' ][ 'System' ] = system
record[ 'Component' ][ 'Module' ] = module
# Transform 'Services' into 'service', 'Agents' into 'agent' ...
record[ 'Component' ][ 'Type' ] = compType.lower()[ :-1 ]
record[ 'Host' ][ 'HostName' ] = host
record[ 'Host' ][ 'CPU' ] = cpu
record[ 'Installation' ][ 'Instance' ] = component
record[ 'Installation' ][ 'InstallationTime' ] = datetime.utcnow()
record[ 'Installation' ][ 'InstalledBy' ] = user
records.append( record )
# Databases
csClient = CSAPI()
cfg = csClient.getCurrentCFG()[ 'Value' ]
if hasMySQL:
allDB = allDBResult[ 'Value' ]
availableDB = availableDBResult[ 'Value' ]
for db in allDB:
# Check for DIRAC only databases
if db in availableDB.keys() and db != 'InstalledComponentsDB':
# Check for 'installed' databases
isSection = cfg.isSection \
( 'Systems/' + availableDB[ db ][ 'System' ] + '/' +
cfg.getOption( 'DIRAC/Setups/' + setup + '/' +
availableDB[ db ][ 'System' ] ) + '/Databases/' + db +
'/' )
if isSection:
record = { 'Installation': {}, 'Component': {}, 'Host': {} }
record[ 'Component' ][ 'System' ] = availableDB[ db ][ 'System' ]
record[ 'Component' ][ 'Module' ] = db
record[ 'Component' ][ 'Type' ] = 'DB'
record[ 'Host' ][ 'HostName' ] = host
record[ 'Host' ][ 'CPU' ] = cpu
record[ 'Installation' ][ 'Instance' ] = db
record[ 'Installation' ][ 'InstallationTime' ] = datetime.utcnow()
record[ 'Installation' ][ 'InstalledBy' ] = user
records.append( record )
monitoringClient = ComponentMonitoringClient()
# Add the installations to the database
for record in records:
result = monitoringClient.addInstallation \
( record[ 'Installation' ], record[ 'Component' ], record[ 'Host' ], True )
if not result[ 'OK' ]:
gLogger.error( result[ 'Message' ] )
| vmendez/DIRAC | FrameworkSystem/scripts/dirac-populate-component-db.py | Python | gpl-3.0 | 7,273 | [
"DIRAC"
] | 95c8a9e818e0e08a386b6ec3b3b626f65c0c3047f658dee5a6fb78498e73f4cc |
import io
import os
import sys
from aussieaddonscommon import session
from aussieaddonscommon import utils
from aussieaddonscommon.exceptions import AussieAddonsException
from pycaption import SRTWriter
from pycaption import WebVTTReader
import resources.lib.classes as classes
import resources.lib.comm as comm
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
def play(url):
try:
# Remove cookies.dat for Kodi < 17.0 - causes issues with playback
addon = xbmcaddon.Addon()
cookies_dat = xbmc.translatePath('special://home/cache/cookies.dat')
if os.path.isfile(cookies_dat):
os.remove(cookies_dat)
p = classes.Program()
p.parse_kodi_url(url)
stream_data = comm.get_stream_url(p.get_house_number(), p.get_url())
stream_url = stream_data.get('stream_url')
if not stream_url:
utils.log('Not Playable: {0}'.format(repr(stream_data)))
raise AussieAddonsException(
'Not available: {0}\n{1}'.format(stream_data.get('msg'),
stream_data.get(
'availability')))
use_ia = addon.getSetting('USE_IA') == 'true'
if use_ia:
if addon.getSetting('IGNORE_DRM') == 'false':
try:
import drmhelper
if not drmhelper.check_inputstream(drm=False):
return
except ImportError:
utils.log("Failed to import drmhelper")
utils.dialog_message(
'DRM Helper is needed for inputstream.adaptive '
'playback. Disable "Use inputstream.adaptive for '
'playback" in settings or install drmhelper. For '
'more information, please visit: '
'http://aussieaddons.com/drm')
return
hdrs = stream_url[stream_url.find('|') + 1:]
listitem = xbmcgui.ListItem(label=p.get_list_title(),
path=stream_url)
thumb = p.get_thumb()
listitem.setArt({'icon': thumb,
'thumb': thumb})
if use_ia:
listitem.setProperty('inputstreamaddon', 'inputstream.adaptive')
listitem.setProperty('inputstream.adaptive.manifest_type', 'hls')
listitem.setProperty('inputstream.adaptive.stream_headers', hdrs)
listitem.setProperty('inputstream.adaptive.license_key',
stream_url)
listitem.setInfo('video', p.get_kodi_list_item())
# Add subtitles if available
if p.is_captions():
captions_url = stream_data.get('captions_url')
profile = xbmcaddon.Addon().getAddonInfo('profile')
path = xbmc.translatePath(profile)
if not os.path.isdir(path):
os.makedirs(path)
caption_file = os.path.join(path, 'subtitles.eng.srt')
if os.path.isfile(caption_file):
os.remove(caption_file)
try:
sess = session.Session()
webvtt_data = sess.get(captions_url).text
if webvtt_data:
with io.BytesIO() as buf:
webvtt_captions = WebVTTReader().read(webvtt_data)
srt_captions = SRTWriter().write(webvtt_captions)
srt_unicode = srt_captions.encode('utf-8')
buf.write(srt_unicode)
with io.open(caption_file, "wb") as f:
f.write(buf.getvalue())
if hasattr(listitem, 'setSubtitles'):
listitem.setSubtitles([caption_file])
except Exception as e:
utils.log(
'Subtitles not available for this program: {0}'.format(e))
if hasattr(listitem, 'addStreamInfo'):
listitem.addStreamInfo('audio', p.get_kodi_audio_stream_info())
listitem.addStreamInfo('video', p.get_kodi_video_stream_info())
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem=listitem)
except Exception:
utils.handle_error('Unable to play video')
| andybotting/xbmc-addon-abc-iview | resources/lib/play.py | Python | gpl-3.0 | 4,342 | [
"VisIt"
] | e2fea17879ef65a121d9b8ce28decd2848c2bc3f4436df2a608f32fde3dc5c27 |
# Copyright (c) 2012, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core.sparse_gp_mpi import SparseGP_MPI
from .. import likelihoods
from .. import kern
from ..inference.latent_function_inference import VarDTC
from GPy.core.parameterization.variational import NormalPosterior
class SparseGPRegression(SparseGP_MPI):
"""
Gaussian Process model for regression
This is a thin wrapper around the SparseGP class, with a set of sensible defalts
:param X: input observations
:param X_variance: input uncertainties, one per input X
:param Y: observed values
:param kernel: a GPy kernel, defaults to rbf+white
:param Z: inducing inputs (optional, see note)
:type Z: np.ndarray (num_inducing x input_dim) | None
:param num_inducing: number of inducing points (ignored if Z is passed, see note)
:type num_inducing: int
:rtype: model object
.. Note:: If no Z array is passed, num_inducing (default 10) points are selected from the data. Other wise num_inducing is ignored
.. Note:: Multiple independent outputs are allowed using columns of Y
"""
def __init__(self, X, Y, kernel=None, Z=None, num_inducing=10, X_variance=None, mean_function=None, normalizer=None, mpi_comm=None, name='sparse_gp'):
num_data, input_dim = X.shape
# kern defaults to rbf (plus white for stability)
if kernel is None:
kernel = kern.RBF(input_dim)# + kern.white(input_dim, variance=1e-3)
# Z defaults to a subset of the data
if Z is None:
i = np.random.permutation(num_data)[:min(num_inducing, num_data)]
Z = X.view(np.ndarray)[i].copy()
else:
assert Z.shape[1] == input_dim
likelihood = likelihoods.Gaussian()
if not (X_variance is None):
X = NormalPosterior(X,X_variance)
if mpi_comm is not None:
from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
infr = VarDTC_minibatch(mpi_comm=mpi_comm)
else:
infr = VarDTC()
SparseGP_MPI.__init__(self, X, Y, Z, kernel, likelihood, mean_function=mean_function,
inference_method=infr, normalizer=normalizer, mpi_comm=mpi_comm, name=name)
def parameters_changed(self):
from ..inference.latent_function_inference.var_dtc_parallel import update_gradients_sparsegp,VarDTC_minibatch
if isinstance(self.inference_method,VarDTC_minibatch):
update_gradients_sparsegp(self, mpi_comm=self.mpi_comm)
else:
super(SparseGPRegression, self).parameters_changed()
| befelix/GPy | GPy/models/sparse_gp_regression.py | Python | bsd-3-clause | 2,664 | [
"Gaussian"
] | 5b368d8c3c32b2e23cba86b593edbdb882169563fdcd97d8307b3524566e670e |
"""
Job Base Class
This class provides generic job definition functionality suitable for any VO.
Helper functions are documented with example usage for the DIRAC API. An example
script (for a simple executable) would be::
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.Interfaces.API.Job import Job
j = Job()
j.setCPUTime(500)
j.setExecutable('/bin/echo hello')
j.setExecutable('yourPythonScript.py')
j.setExecutable('/bin/echo hello again')
j.setName('MyJobName')
dirac = Dirac()
jobID = dirac.submitJob(j)
print 'Submission Result: ',jobID
Note that several executables can be provided and wil be executed sequentially.
"""
import re
import os
import shlex
from io import StringIO
from urllib.parse import quote
from DIRAC import S_OK, gLogger
from DIRAC.Core.Base.API import API
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Workflow.Parameter import Parameter
from DIRAC.Core.Workflow.Workflow import Workflow
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.Core.Utilities.List import uniqueElements
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getCESiteMapping
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.Workflow.Utilities.Utils import getStepDefinition, addStepToWorkflow
COMPONENT_NAME = "/Interfaces/API/Job"
class Job(API):
"""DIRAC jobs"""
#############################################################################
def __init__(self, script=None, stdout="std.out", stderr="std.err"):
"""Instantiates the Workflow object and some default parameters."""
super(Job, self).__init__()
self.stepCount = 0
self.owner = "NotSpecified"
self.name = "Name"
self.type = "User"
self.priority = 1
vo = ""
ret = getProxyInfo(disableVOMS=True)
if ret["OK"] and "group" in ret["Value"]:
vo = getVOForGroup(ret["Value"]["group"])
self.group = vo
self.site = None
self.stdout = stdout
self.stderr = stderr
self.logLevel = "INFO"
self.executable = "dirac-jobexec" # to be clarified
self.addToInputSandbox = []
self.addToOutputSandbox = []
self.addToInputData = []
# #Add member to handle Parametric jobs
self.numberOfParameters = 0
self.parameterSeqs = {}
self.wfArguments = {}
self.parametricWFArguments = {}
# loading the function that will be used to determine the platform (it can be VO specific)
res = ObjectLoader().loadObject("ConfigurationSystem.Client.Helpers.Resources", "getDIRACPlatforms")
if not res["OK"]:
self.log.fatal(res["Message"])
self.getDIRACPlatforms = res["Value"]
self.script = script
if not script:
self.workflow = Workflow()
self.__setJobDefaults()
else:
self.workflow = Workflow(script)
#############################################################################
def setExecutable(self, executable, arguments="", logFile="", modulesList=None, parameters=None, paramValues=None):
"""Helper function.
Specify executable script to run with optional arguments and log file
for standard output.
These can be either:
- Submission of a python or shell script to DIRAC
- Can be inline scripts e.g. C{'/bin/ls'}
- Scripts as executables e.g. python or shell script file
Example usage:
>>> job = Job()
>>> job.setExecutable('myScript.py')
:param str executable: Executable
:param str arguments: Optional arguments to executable
:param str logFile: Optional log file name
:param list modulesList: Optional list of modules (to be used mostly when extending this method)
:param parameters: Optional list of parameters (to be used mostly when extending this method)
:type parameters: python:list of tuples
:param paramValues: Optional list of parameters values (to be used mostly when extending this method)
:type parameters: python:list of tuples
"""
kwargs = {"executable": executable, "arguments": arguments, "logFile": logFile}
if not isinstance(executable, str) or not isinstance(arguments, str) or not isinstance(logFile, str):
return self._reportError("Expected strings for executable and arguments", **kwargs)
if os.path.exists(executable):
self.log.verbose("Found script executable file %s" % (executable))
self.addToInputSandbox.append(executable)
logName = "%s.log" % (os.path.basename(executable))
else:
self.log.warn("The executable code could not be found locally")
logName = "CodeOutput.log"
self.stepCount += 1
stepName = "RunScriptStep%s" % (self.stepCount)
if logFile:
if isinstance(logFile, str):
logName = str(logFile)
else:
logName = "Script%s_%s" % (self.stepCount, logName)
if not modulesList:
modulesList = ["Script"]
if not parameters:
parameters = [
("executable", "string", "", "Executable Script"),
("arguments", "string", "", "Arguments for executable Script"),
("applicationLog", "string", "", "Log file name"),
]
step = getStepDefinition("ScriptStep%s" % (self.stepCount), modulesList, parametersList=parameters)
self.addToOutputSandbox.append(logName)
stepInstance = addStepToWorkflow(self.workflow, step, stepName)
stepInstance.setValue("applicationLog", logName)
stepInstance.setValue("executable", executable)
if arguments:
# If arguments are expressed in terms of parameters, pass them to Workflow
# These arguments will be resolved on the server side for each parametric job
if re.search(r"%\(.*\)s", arguments) or re.search("%n", arguments):
self.parametricWFArguments["arguments"] = arguments
else:
stepInstance.setValue("arguments", arguments)
if paramValues:
for param, value in paramValues:
stepInstance.setValue(param, value)
return S_OK(stepInstance)
#############################################################################
def setName(self, jobName):
"""Helper function.
A name for the job can be specified if desired. This will appear
in the JobName field of the monitoring webpage. If nothing is
specified a default value will appear.
Example usage:
>>> job=Job()
>>> job.setName("myJobName")
:param str jobName: Name of job
"""
kwargs = {"jobname": jobName}
if not isinstance(jobName, str):
return self._reportError("Expected strings for job name", **kwargs)
self.workflow.setName(jobName)
self._addParameter(self.workflow, "JobName", "JDL", jobName, "User specified name")
return S_OK()
#############################################################################
def setInputSandbox(self, files):
"""Helper function.
Specify input sandbox files less than 10MB in size. If over 10MB, files
or a directory may be uploaded to Grid storage, see C{dirac.uploadSandbox()}.
Paths to the options file and (if required) 'lib/' directory of the DLLs
are specified here. Default is local directory.
Executables may be placed in the lib/ directory if desired. The lib/ directory
is transferred to the Grid Worker Node before the job executes.
Files / directories can be specified using the `*` character e.g. `*.txt` these
are resolved correctly before job execution on the WN.
Example usage:
>>> job = Job()
>>> job.setInputSandbox(['DaVinci.opts'])
:param files: Input sandbox files, can specify full path
:type files: Single string or list of strings ['','']
"""
if isinstance(files, list) and files:
resolvedFiles = self._resolveInputSandbox(files)
fileList = ";".join(resolvedFiles)
description = "Input sandbox file list"
self._addParameter(self.workflow, "InputSandbox", "JDL", fileList, description)
# self.sandboxFiles=resolvedFiles
elif isinstance(files, str):
resolvedFiles = self._resolveInputSandbox([files])
fileList = ";".join(resolvedFiles)
description = "Input sandbox file"
# self.sandboxFiles = [files]
self._addParameter(self.workflow, "InputSandbox", "JDL", fileList, description)
else:
kwargs = {"files": files}
return self._reportError("Expected file string or list of files for input sandbox contents", **kwargs)
return S_OK()
#############################################################################
def setOutputSandbox(self, files):
"""Helper function.
Specify output sandbox files. If specified files are over 10MB, these
may be uploaded to Grid storage with a notification returned in the
output sandbox.
Example usage:
>>> job = Job()
>>> job.setOutputSandbox(['DaVinci_v19r12.log','DVNTuples.root'])
:param files: Output sandbox files
:type files: Single string or list of strings ['','']
"""
if isinstance(files, list) and files:
fileList = ";".join(files)
description = "Output sandbox file list"
self._addParameter(self.workflow, "OutputSandbox", "JDL", fileList, description)
elif isinstance(files, str):
description = "Output sandbox file"
self._addParameter(self.workflow, "OutputSandbox", "JDL", files, description)
else:
kwargs = {"files": files}
return self._reportError("Expected file string or list of files for output sandbox contents", **kwargs)
return S_OK()
#############################################################################
def setInputData(self, lfns):
"""Helper function.
Specify input data by Logical File Name (LFN).
Example usage:
>>> job = Job()
>>> job.setInputData(['/lhcb/production/DC04/v2/DST/00000742_00003493_10.dst'])
:param lfns: Logical File Names
:type lfns: Single LFN string or list of LFNs
"""
if isinstance(lfns, list) and lfns:
for i, _ in enumerate(lfns):
lfns[i] = lfns[i].replace("LFN:", "")
inputData = ["LFN:" + x for x in lfns]
inputDataStr = ";".join(inputData)
description = "List of input data specified by LFNs"
self._addParameter(self.workflow, "InputData", "JDL", inputDataStr, description)
elif isinstance(lfns, str): # single LFN
description = "Input data specified by LFN"
self._addParameter(self.workflow, "InputData", "JDL", lfns, description)
else:
kwargs = {"lfns": lfns}
return self._reportError("Expected lfn string or list of lfns for input data", **kwargs)
return S_OK()
#############################################################################
def setParameterSequence(self, name, parameterList, addToWorkflow=False):
"""Function to define a sequence of values for parametric jobs.
:param str name: sequence parameter name
:param list parameterList: list of parameter values
:param bool addToWorkflow: flag to add parameter to the workflow on the fly, if str, then
use as the workflow parameter
:return: S_OK/S_ERROR
"""
if self.numberOfParameters == 0:
self.numberOfParameters = len(parameterList)
elif self.numberOfParameters != len(parameterList):
return self._reportError("Parameter sequences of different length", name="setParameterSequence")
self.parameterSeqs[name] = parameterList
if addToWorkflow:
if isinstance(addToWorkflow, str):
self.wfArguments[name] = addToWorkflow
else:
self.wfArguments[name] = name
return S_OK()
#############################################################################
def setInputDataPolicy(self, policy, dataScheduling=True):
"""Helper function.
Specify a job input data policy, this takes precedence over any site specific or
global settings.
Possible values for policy are 'Download' or 'Protocol' (case-insensitive). This
requires that the module locations are defined for the VO in the CS.
Example usage:
>>> job = Job()
>>> job.setInputDataPolicy('download')
"""
kwargs = {"policy": policy, "dataScheduling": dataScheduling}
csSection = "InputDataPolicy"
possible = ["Download", "Protocol"]
finalPolicy = ""
for value in possible:
if policy.lower() == value.lower():
finalPolicy = value
if not finalPolicy:
return self._reportError(
"Expected one of %s for input data policy" % (", ".join(possible)), __name__, **kwargs
)
jobPolicy = Operations().getValue("%s/%s" % (csSection, finalPolicy), "")
if not jobPolicy:
return self._reportError(
"Could not get value for Operations option %s/%s" % (csSection, finalPolicy), __name__, **kwargs
)
description = "User specified input data policy"
self._addParameter(self.workflow, "InputDataPolicy", "JDL", jobPolicy, description)
if not dataScheduling and policy.lower() == "download":
self.log.verbose("Scheduling by input data is disabled, jobs will run anywhere and download input data")
self._addParameter(
self.workflow, "DisableDataScheduling", "JDL", "True", "Disable scheduling by input data"
)
if not dataScheduling and policy.lower() != "download":
self.log.error('Expected policy to be "download" for bypassing data scheduling')
return self._reportError(
'Expected policy to be "download" for bypassing data scheduling', __name__, **kwargs
)
return S_OK()
#############################################################################
def setOutputData(self, lfns, outputSE=None, outputPath=""):
"""Helper function.
For specifying output data to be registered in Grid storage. If a list
of OutputSEs are specified the job wrapper will try each in turn until
successful. If the OutputPath is specified this will appear only after
/ <VO> / user / <initial> / <username>
directory.
The output data can be LFNs or local file names.
If they are LFNs they should be pre-prended by "LFN:",
otherwise they will be interpreted as local files to be found.
If local files are specified, then specifying the outputPath may become necessary,
because if it's not specified then it will be constructed starting from the user name.
Example usage:
>>> job = Job()
>>> job.setOutputData(['DVNtuple.root'])
:param lfns: Output data file or files.
:type lfns: Single string or list of strings ['','']
:param outputSE: Optional parameter to specify the Storage Element to store data or files, e.g. CERN-tape
:type outputSE: string or python:list
:param outputPath: Optional parameter to specify part of the path in the storage (see above)
:type outputPath: string
"""
if outputSE is None:
outputSE = []
kwargs = {"lfns": lfns, "OutputSE": outputSE, "OutputPath": outputPath}
if isinstance(lfns, list) and lfns:
outputDataStr = ";".join(lfns)
description = "List of output data files"
self._addParameter(self.workflow, "OutputData", "JDL", outputDataStr, description)
elif isinstance(lfns, str):
description = "Output data file"
self._addParameter(self.workflow, "OutputData", "JDL", lfns, description)
else:
return self._reportError("Expected file name string or list of file names for output data", **kwargs)
if outputSE:
description = "User specified Output SE"
if isinstance(outputSE, str):
outputSE = [outputSE]
elif not isinstance(outputSE, list):
return self._reportError("Expected string or list for OutputSE", **kwargs)
outputSE = ";".join(oSE.strip() for oSE in outputSE)
self._addParameter(self.workflow, "OutputSE", "JDL", outputSE, description)
if outputPath:
description = "User specified Output Path"
if not isinstance(outputPath, str):
return self._reportError("Expected string for OutputPath", **kwargs)
# Remove leading "/" that might cause problems with os.path.join
# This will prevent to set OutputPath outside the Home of the User
while outputPath[0] == "/":
outputPath = outputPath[1:]
self._addParameter(self.workflow, "OutputPath", "JDL", outputPath, description)
return S_OK()
#############################################################################
def setPlatform(self, platform):
"""Developer function: sets the target platform, e.g. Linux_x86_64_glibc-2.17.
This platform is in the form of what it is returned by the dirac-platform script
(or dirac-architecture if your extension provides it)
"""
kwargs = {"platform": platform}
if not isinstance(platform, str):
return self._reportError("Expected string for platform", **kwargs)
if not platform.lower() == "any":
availablePlatforms = self.getDIRACPlatforms()
if not availablePlatforms["OK"]:
return self._reportError("Can't check for platform", **kwargs)
if platform in availablePlatforms["Value"]:
self._addParameter(self.workflow, "Platform", "JDL", platform, "Platform ( Operating System )")
else:
return self._reportError("Invalid platform", **kwargs)
return S_OK()
#############################################################################
def setCPUTime(self, timeInSecs):
"""Helper function.
Example usage:
>>> job = Job()
>>> job.setCPUTime(5000)
:param timeInSecs: CPU time
:type timeInSecs: int
"""
kwargs = {"timeInSecs": timeInSecs}
if not isinstance(timeInSecs, int):
try:
timeInSecs = int(timeInSecs)
except ValueError:
if not re.search("{{", timeInSecs):
return self._reportError("Expected numerical string or int for CPU time in seconds", **kwargs)
description = "CPU time in secs"
self._addParameter(self.workflow, "CPUTime", "JDL", timeInSecs, description)
return S_OK()
#############################################################################
def setDestination(self, destination):
"""Helper function.
Can specify a desired destination site or sites for job. This can be useful
for debugging purposes but often limits the possible candidate sites
and overall system response time.
Example usage:
>>> job = Job()
>>> job.setDestination('LCG.CERN.ch')
:param destination: site string
:type destination: str or python:list
:return: S_OK/S_ERROR
"""
kwargs = {"destination": destination}
if isinstance(destination, str):
destination = destination.replace(" ", "").split(",")
description = "User specified destination site"
else:
description = "List of sites selected by user"
if isinstance(destination, list):
sites = set(site for site in destination if site.lower() != "any")
if sites:
result = self._checkSiteIsValid(sites)
if not result["OK"]:
return self._reportError("%s is not a valid destination site" % (destination), **kwargs)
destSites = ";".join(destination)
self._addParameter(self.workflow, "Site", "JDL", destSites, description)
else:
return self._reportError("Invalid destination site, expected string or list", **kwargs)
return S_OK()
#############################################################################
def setNumberOfProcessors(self, numberOfProcessors=None, minNumberOfProcessors=None, maxNumberOfProcessors=None):
"""Helper function.
Example usage:
>>> job = Job()
>>> job.setNumberOfProcessors(numberOfProcessors=2)
means that the job needs 2 processors
>>> job = Job()
>>> job.setNumberOfProcessors(minNumberOfProcessors=4, maxNumberOfProcessors=8)
means that the job needs at least 4 processors, and that will use at most 8 processors
>>> job = Job()
>>> job.setNumberOfProcessors(minNumberOfProcessors=2)
means that the job needs at least 2 processors, and that will use all the processors available
>>> job = Job()
>>> job.setNumberOfProcessors(minNumberOfProcessors=1)
means that the job can run in SP mode, and that will use all the processors available
(so the job could run MP, but also SP)
>>> job = Job()
>>> job.setNumberOfProcessors(maxNumberOfProcessors=4)
is equivalent to
>>> job.setNumberOfProcessors(minNumberOfProcessors=1, maxNumberOfProcessors=4)
and it means that the job can run in SP mode, and that will use at most 4 processors
(so the job could run MP, but also SP)
>>> job = Job()
>>> job.setNumberOfProcessors(minNumberOfProcessors=6, maxNumberOfProcessors=4)
is a non-sense, and will lead to consider that the job can run exactly on 4 processors
>>> job = Job()
>>> job.setNumberOfProcessors(numberOfProcessors=3, maxNumberOfProcessors=4)
will lead to ignore the second parameter
>>> job = Job()
>>> job.setNumberOfProcessors(numberOfProcessors=3, minNumberOfProcessors=2)
will lead to ignore the second parameter
:param int processors: number of processors required by the job (exact number, unless a min/max are set)
:param int minNumberOfProcessors: optional min number of processors the job applications can use
:param int maxNumberOfProcessors: optional max number of processors the job applications can use
:return: S_OK/S_ERROR
"""
if numberOfProcessors:
if not minNumberOfProcessors:
nProc = numberOfProcessors
else:
nProc = max(numberOfProcessors, minNumberOfProcessors)
if nProc > 1:
self._addParameter(
self.workflow, "NumberOfProcessors", "JDL", nProc, "Exact number of processors requested"
)
self._addParameter(
self.workflow,
"MaxNumberOfProcessors",
"JDL",
nProc,
"Max Number of processors the job applications may use",
)
return S_OK()
if maxNumberOfProcessors and not minNumberOfProcessors:
minNumberOfProcessors = 1
if minNumberOfProcessors and maxNumberOfProcessors and minNumberOfProcessors >= maxNumberOfProcessors:
minNumberOfProcessors = maxNumberOfProcessors
if (
minNumberOfProcessors
and maxNumberOfProcessors
and minNumberOfProcessors == maxNumberOfProcessors
and minNumberOfProcessors > 1
):
self._addParameter(
self.workflow,
"NumberOfProcessors",
"JDL",
minNumberOfProcessors,
"Exact number of processors requested",
)
self._addParameter(
self.workflow,
"MaxNumberOfProcessors",
"JDL",
minNumberOfProcessors,
"Max Number of processors the job applications may use",
)
return S_OK()
# By this point there should be a min
self._addParameter(
self.workflow,
"MinNumberOfProcessors",
"JDL",
minNumberOfProcessors,
"Min Number of processors the job applications may use",
)
# If not set, will be "all"
if maxNumberOfProcessors:
self._addParameter(
self.workflow,
"MaxNumberOfProcessors",
"JDL",
maxNumberOfProcessors,
"Max Number of processors the job applications may use",
)
return S_OK()
#############################################################################
def setDestinationCE(self, ceName, diracSite=None):
"""Developer function.
Allows to direct a job to a particular Grid CE.
"""
kwargs = {"ceName": ceName}
if not diracSite:
res = getCESiteMapping(ceName)
if not res["OK"]:
return self._reportError(res["Message"], **kwargs)
if not res["Value"]:
return self._reportError("No DIRAC site name found for CE %s" % (ceName), **kwargs)
diracSite = res["Value"][ceName]
self.setDestination(diracSite)
self._addJDLParameter("GridCE", ceName)
return S_OK()
#############################################################################
def setBannedSites(self, sites):
"""Helper function.
Can specify a desired destination site for job. This can be useful
for debugging purposes but often limits the possible candidate sites
and overall system response time.
Example usage:
>>> job = Job()
>>> job.setBannedSites(['LCG.GRIDKA.de','LCG.CNAF.it'])
:param sites: single site string or list
:type sites: str or python:list
"""
if isinstance(sites, list) and sites:
bannedSites = ";".join(sites)
description = "List of sites excluded by user"
self._addParameter(self.workflow, "BannedSites", "JDL", bannedSites, description)
elif isinstance(sites, str):
description = "Site excluded by user"
self._addParameter(self.workflow, "BannedSites", "JDL", sites, description)
else:
kwargs = {"sites": sites}
return self._reportError("Expected site string or list of sites", **kwargs)
return S_OK()
#############################################################################
def setOwner(self, ownerProvided):
"""Developer function.
Normally users should always specify their immutable DIRAC nickname.
"""
if not isinstance(ownerProvided, str):
return self._reportError("Expected string for owner", **{"ownerProvided": ownerProvided})
self._addParameter(self.workflow, "Owner", "JDL", ownerProvided, "User specified ID")
return S_OK()
#############################################################################
def setOwnerGroup(self, ownerGroup):
"""Developer function.
Allows to force expected owner group of proxy.
"""
if not isinstance(ownerGroup, str):
return self._reportError("Expected string for job owner group", **{"ownerGroup": ownerGroup})
self._addParameter(self.workflow, "OwnerGroup", "JDL", ownerGroup, "User specified owner group.")
return S_OK()
#############################################################################
def setOwnerDN(self, ownerDN):
"""Developer function.
Allows to force expected owner DN of proxy.
"""
if not isinstance(ownerDN, str):
return self._reportError("Expected string for job owner DN", **{"ownerGroup": ownerDN})
self._addParameter(self.workflow, "OwnerDN", "JDL", ownerDN, "User specified owner DN.")
return S_OK()
#############################################################################
def setType(self, jobType):
"""Developer function.
Specify job type for testing purposes.
"""
if not isinstance(jobType, str):
return self._reportError("Expected string for job type", **{"jobType": jobType})
self._addParameter(self.workflow, "JobType", "JDL", jobType, "User specified type")
self.type = jobType
return S_OK()
#############################################################################
def setTag(self, tags):
"""Set the Tags job requirements
Example usage:
>>> job = Job()
>>> job.setTag( ['WholeNode','8GBMemory'] )
:param tags: single tag string or a list of tags
:type tags: str or python:list
"""
if isinstance(tags, str):
tagValue = tags
elif isinstance(tags, list):
tagValue = ";".join(tags)
else:
return self._reportError("Expected string or list for job tags", tags=tags)
self._addParameter(self.workflow, "Tags", "JDL", tagValue, "User specified job tags")
return S_OK()
#############################################################################
def setJobGroup(self, jobGroup):
"""Helper function.
Allows to group certain jobs according to an ID.
Example usage:
>>> job = Job()
>>> job.setJobGroup('Bs2JPsiPhi')
:param jobGroup: JobGroup name
:type jobGroup: string
"""
if not isinstance(jobGroup, str):
return self._reportError("Expected string for job group name", **{"jobGroup": jobGroup})
description = "User specified job group"
self._addParameter(self.workflow, "JobGroup", "JDL", jobGroup, description)
return S_OK()
#############################################################################
def setLogLevel(self, logLevel):
"""Helper function.
Optionally specify a DIRAC logging level for the job, e.g.
ALWAYS, INFO, VERBOSE, WARN, DEBUG
by default this is set to the info level.
Example usage:
>>> job = Job()
>>> job.setLogLevel('debug')
:param logLevel: Logging level
:type logLevel: string
"""
kwargs = {"logLevel": logLevel}
if isinstance(logLevel, str):
if logLevel.upper() in gLogger.getAllPossibleLevels():
description = "User specified logging level"
self.logLevel = logLevel
self._addParameter(self.workflow, "LogLevel", "JDL", logLevel, description)
else:
return self._reportError('Error Level "%s" not valid' % logLevel, **kwargs)
else:
return self._reportError("Expected string for logging level", **kwargs)
return S_OK()
#############################################################################
def setConfigArgs(self, cfgString):
"""Developer function. Allow to pass arbitrary settings to the payload
configuration service environment.
"""
if not isinstance(cfgString, str):
return self._reportError("Expected string for DIRAC Job Config Args", **{"cfgString": cfgString})
description = "User specified cfg settings"
self._addParameter(self.workflow, "JobConfigArgs", "JDL", cfgString, description)
return S_OK()
#############################################################################
def setExecutionEnv(self, environmentDict):
"""Helper function.
Optionally specify a dictionary of key, value pairs to be set before
the job executes e.g. {'MYVAR':3}
The standard application environment variables are always set so this
is intended for user variables only.
Example usage:
>>> job = Job()
>>> job.setExecutionEnv({'<MYVARIABLE>':'<VALUE>'})
:param environmentDict: Environment variables
:type environmentDict: dictionary
"""
kwargs = {"environmentDict": environmentDict}
if not isinstance(environmentDict, dict):
return self._reportError("Expected dictionary of environment variables", **kwargs)
if environmentDict:
environment = []
for var, val in environmentDict.items():
try:
environment.append("=".join([str(var), quote(str(val))]))
except Exception:
return self._reportError("Expected string for environment variable key value pairs", **kwargs)
envStr = ";".join(environment)
description = "Env vars specified by user"
self._addParameter(self.workflow, "ExecutionEnvironment", "JDL", envStr, description)
return S_OK()
#############################################################################
def execute(self):
"""Developer function. Executes the job locally."""
self.workflow.createCode()
self.workflow.execute()
#############################################################################
def _getParameters(self):
"""Developer function.
Method to return the workflow parameters.
"""
wfParams = {}
params = self.workflow.parameters
for par in params:
wfParams[par.getName()] = par.getValue()
return wfParams
#############################################################################
def __setJobDefaults(self):
"""Set job default values. Note that the system configuration is set to "ANY"."""
self._addParameter(self.workflow, "JobType", "JDL", self.type, "Job Type")
self._addParameter(self.workflow, "Priority", "JDL", self.priority, "User Job Priority")
self._addParameter(self.workflow, "JobGroup", "JDL", self.group, "Name of the JobGroup")
self._addParameter(self.workflow, "JobName", "JDL", self.name, "Name of Job")
self._addParameter(self.workflow, "StdOutput", "JDL", self.stdout, "Standard output file")
self._addParameter(self.workflow, "StdError", "JDL", self.stderr, "Standard error file")
self._addParameter(self.workflow, "InputData", "JDL", "", "Default null input data value")
self._addParameter(self.workflow, "LogLevel", "JDL", self.logLevel, "Job Logging Level")
self._addParameter(self.workflow, "arguments", "string", "", "Arguments to executable Step")
# Those 2 below are need for on-site resolution
self._addParameter(
self.workflow, "ParametricInputData", "string", "", "Default null parametric input data value"
)
self._addParameter(
self.workflow, "ParametricInputSandbox", "string", "", "Default null parametric input sandbox value"
)
#############################################################################
@staticmethod
def _addParameter(wObject, name, ptype, value, description, io="input"):
"""Internal Function
Adds a parameter to the object.
"""
if io == "input":
inBool = True
outBool = False
elif io == "output":
inBool = False
outBool = True
else:
raise TypeError("I/O flag is either input or output")
par = Parameter(name, value, ptype, "", "", inBool, outBool, description)
wObject.addParameter(Parameter(parameter=par))
############################################################################
def _resolveInputSandbox(self, inputSandbox):
"""Internal function.
Resolves wildcards for input sandbox files. This is currently linux
specific and should be modified.
"""
resolvedIS = []
for i in inputSandbox:
if not re.search(r"\*", i):
if not os.path.isdir(i):
resolvedIS.append(i)
for name in inputSandbox:
if re.search(r"\*", name): # escape the star character...
cmd = "ls -d " + name
output = systemCall(10, shlex.split(cmd))
if not output["OK"]:
self.log.error("Could not perform: ", cmd)
elif output["Value"][0]:
self.log.error(" Failed getting the files ", output["Value"][2])
else:
files = output["Value"][1].split()
for check in files:
if os.path.isfile(check):
self.log.verbose("Found file " + check + " appending to Input Sandbox")
resolvedIS.append(check)
if os.path.isdir(check):
if re.search("/$", check): # users can specify e.g. /my/dir/lib/
check = check[:-1]
tarName = os.path.basename(check)
directory = os.path.dirname(check) # if just the directory this is null
if directory:
cmd = "tar cfz " + tarName + ".tar.gz " + " -C " + directory + " " + tarName
else:
cmd = "tar cfz " + tarName + ".tar.gz " + tarName
output = systemCall(60, shlex.split(cmd))
if not output["OK"]:
self.log.error("Could not perform: %s" % (cmd))
resolvedIS.append(tarName + ".tar.gz")
self.log.verbose(
"Found directory " + check + ", appending " + check + ".tar.gz to Input Sandbox"
)
if os.path.isdir(name):
self.log.verbose(
"Found specified directory " + name + ", appending " + name + ".tar.gz to Input Sandbox"
)
if re.search("/$", name): # users can specify e.g. /my/dir/lib/
name = name[:-1]
tarName = os.path.basename(name)
directory = os.path.dirname(name) # if just the directory this is null
if directory:
cmd = "tar cfz " + tarName + ".tar.gz " + " -C " + directory + " " + tarName
else:
cmd = "tar cfz " + tarName + ".tar.gz " + tarName
output = systemCall(60, shlex.split(cmd))
if not output["OK"]:
self.log.error("Could not perform: %s" % (cmd))
else:
resolvedIS.append(tarName + ".tar.gz")
return resolvedIS
#############################################################################
def _toXML(self):
"""Returns an XML representation of itself as a Job."""
return self.workflow.toXML()
def _handleParameterSequences(self, paramsDict, arguments):
for pName in self.parameterSeqs:
if pName in paramsDict:
if pName == "InputSandbox":
if isinstance(paramsDict[pName]["value"], list):
paramsDict[pName]["value"].append("%%(%s)s" % pName)
elif isinstance(paramsDict[pName]["value"], str):
if paramsDict[pName]["value"]:
paramsDict[pName]["value"] += ";%%(%s)s" % pName
else:
paramsDict[pName]["value"] = "%%(%s)s" % pName
elif "jdl" in paramsDict[pName]["type"].lower():
# If a parameter with the same name as the sequence name already exists
# and is a list, then extend it by the sequence value. If it is not a
# list, then replace it by the sequence value
if isinstance(paramsDict[pName]["value"], list):
currentParams = paramsDict[pName]["value"]
tmpList = []
pData = self.parameterSeqs[pName]
if isinstance(pData[0], list):
for pElement in pData:
tmpList.append(currentParams + pElement)
else:
for pElement in pData:
tmpList.append(currentParams + [pElement])
self.parameterSeqs[pName] = tmpList
paramsDict[pName]["value"] = "%%(%s)s" % pName
else:
paramsDict[pName] = {}
paramsDict[pName]["type"] = "JDL"
paramsDict[pName]["value"] = "%%(%s)s" % pName
paramsDict["Parameters.%s" % pName] = {}
paramsDict["Parameters.%s" % pName]["value"] = self.parameterSeqs[pName]
paramsDict["Parameters.%s" % pName]["type"] = "JDL"
if pName in self.wfArguments:
arguments.append(" -p %s=%%(%s)s" % (self.wfArguments[pName], pName))
return paramsDict, arguments
#############################################################################
def _toJDL(self, xmlFile="", jobDescriptionObject=None):
"""Creates a JDL representation of itself as a Job.
Example usage:
>>> job = Job()
>>> job._toJDL()
:param xmlFile: location of the XML file
:type xmlFile: str
:param jobDescriptionObject: if not None, it must be a StringIO object
:type jobDescriptionObject: StringIO
:returns: JDL (str)
"""
# Check if we have to do old bootstrap...
classadJob = ClassAd("[]")
paramsDict = {}
params = self.workflow.parameters # ParameterCollection object
paramList = params
for param in paramList:
paramsDict[param.getName()] = {"type": param.getType(), "value": param.getValue()}
arguments = []
scriptName = "jobDescription.xml"
if jobDescriptionObject is None:
# if we are here it's because there's a real file, on disk, that is named 'jobDescription.xml'
# Messy but need to account for xml file being in /tmp/guid dir
if self.script:
if os.path.exists(self.script):
scriptName = os.path.abspath(self.script)
self.log.verbose("Found script name %s" % scriptName)
else:
self.log.warn("File not found", self.script)
else:
if xmlFile:
if os.path.exists(xmlFile):
self.log.verbose("Found XML File %s" % xmlFile)
scriptName = xmlFile
else:
self.log.warn("File not found", xmlFile)
else:
if os.path.exists("jobDescription.xml"):
scriptName = os.path.abspath("jobDescription.xml")
self.log.verbose("Found script name %s" % scriptName)
else:
self.log.warn("Job description XML file not found")
self.addToInputSandbox.append(scriptName)
elif isinstance(jobDescriptionObject, StringIO):
self.log.verbose("jobDescription is passed in as a StringIO object")
else:
self.log.error("Where's the job description?")
arguments.append(os.path.basename(scriptName))
if "LogLevel" in paramsDict:
if paramsDict["LogLevel"]["value"]:
arguments.append("-o LogLevel=%s" % (paramsDict["LogLevel"]["value"]))
else:
self.log.warn("Job LogLevel defined with null value")
if "DIRACSetup" in paramsDict:
if paramsDict["DIRACSetup"]["value"]:
arguments.append("-o DIRAC/Setup=%s" % (paramsDict["DIRACSetup"]["value"]))
else:
self.log.warn("Job DIRACSetup defined with null value")
if "JobConfigArgs" in paramsDict:
if paramsDict["JobConfigArgs"]["value"]:
arguments.append("--cfg %s" % (paramsDict["JobConfigArgs"]["value"]))
else:
self.log.warn("JobConfigArgs defined with null value")
if self.parametricWFArguments:
for name, value in self.parametricWFArguments.items():
arguments.append("-p %s='%s'" % (name, value))
classadJob.insertAttributeString("Executable", self.executable)
self.addToOutputSandbox.append(self.stderr)
self.addToOutputSandbox.append(self.stdout)
# Extract i/o sandbox parameters from steps and any input data parameters
# to do when introducing step-level api...
# To add any additional files to input and output sandboxes
if self.addToInputSandbox:
extraFiles = ";".join(self.addToInputSandbox)
if "InputSandbox" in paramsDict:
currentFiles = paramsDict["InputSandbox"]["value"]
finalInputSandbox = currentFiles + ";" + extraFiles
uniqueInputSandbox = uniqueElements(finalInputSandbox.split(";"))
paramsDict["InputSandbox"]["value"] = ";".join(uniqueInputSandbox)
self.log.verbose("Final unique Input Sandbox %s" % (";".join(uniqueInputSandbox)))
else:
paramsDict["InputSandbox"] = {}
paramsDict["InputSandbox"]["value"] = extraFiles
paramsDict["InputSandbox"]["type"] = "JDL"
if self.addToOutputSandbox:
extraFiles = ";".join(self.addToOutputSandbox)
if "OutputSandbox" in paramsDict:
currentFiles = paramsDict["OutputSandbox"]["value"]
finalOutputSandbox = currentFiles + ";" + extraFiles
uniqueOutputSandbox = uniqueElements(finalOutputSandbox.split(";"))
paramsDict["OutputSandbox"]["value"] = ";".join(uniqueOutputSandbox)
self.log.verbose("Final unique Output Sandbox %s" % (";".join(uniqueOutputSandbox)))
else:
paramsDict["OutputSandbox"] = {}
paramsDict["OutputSandbox"]["value"] = extraFiles
paramsDict["OutputSandbox"]["type"] = "JDL"
if self.addToInputData:
extraFiles = ";".join(self.addToInputData)
if "InputData" in paramsDict:
currentFiles = paramsDict["InputData"]["value"]
finalInputData = extraFiles
if currentFiles:
finalInputData = currentFiles + ";" + extraFiles
uniqueInputData = uniqueElements(finalInputData.split(";"))
paramsDict["InputData"]["value"] = ";".join(uniqueInputData)
self.log.verbose("Final unique Input Data %s" % (";".join(uniqueInputData)))
else:
paramsDict["InputData"] = {}
paramsDict["InputData"]["value"] = extraFiles
paramsDict["InputData"]["type"] = "JDL"
# Handle parameter sequences
if self.numberOfParameters > 0:
paramsDict, arguments = self._handleParameterSequences(paramsDict, arguments)
classadJob.insertAttributeString("Arguments", " ".join(arguments))
# Add any JDL parameters to classad obeying lists with ';' rule
for name, props in paramsDict.items():
ptype = props["type"]
value = props["value"]
if isinstance(value, str) and re.search(";", value):
value = value.split(";")
if name.lower() == "requirements" and ptype == "JDL":
self.log.verbose("Found existing requirements: %s" % (value))
if re.search("^JDL", ptype):
if isinstance(value, list):
if isinstance(value[0], list):
classadJob.insertAttributeVectorStringList(name, value)
else:
classadJob.insertAttributeVectorInt(name, value)
elif isinstance(value, str) and value:
classadJob.insertAttributeInt(name, value)
elif isinstance(value, (int, float)):
classadJob.insertAttributeInt(name, value)
if self.numberOfParameters > 0:
classadJob.insertAttributeInt("Parameters", self.numberOfParameters)
for fToBeRemoved in [scriptName, self.stdout, self.stderr]:
try:
self.addToInputSandbox.remove(fToBeRemoved)
except ValueError:
pass
jdl = classadJob.asJDL()
start = jdl.find("[")
end = jdl.rfind("]")
return jdl[(start + 1) : (end - 1)]
#############################################################################
def _setParamValue(self, name, value):
"""Internal Function. Sets a parameter value, used for production."""
return self.workflow.setValue(name, value)
#############################################################################
def _addJDLParameter(self, name, value):
"""Developer function, add an arbitrary JDL parameter."""
self._addParameter(self.workflow, name, "JDL", value, "Optional JDL parameter added")
return self.workflow.setValue(name, value)
#############################################################################
def runLocal(self, dirac=None):
"""The dirac (API) object is for local submission."""
if dirac is None:
dirac = Dirac()
return dirac.submitJob(self, mode="local")
| DIRACGrid/DIRAC | src/DIRAC/Interfaces/API/Job.py | Python | gpl-3.0 | 50,982 | [
"DIRAC"
] | 4bc178237b348d8f73362fed71ea133b439892d35fd02b8407f3c5cb03c91e53 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import petl as etl
from petl.compat import text_type
from petl.util.base import Table
def fromtabix(filename, reference=None, start=None, stop=None, region=None,
header=None):
"""
Extract rows from a tabix indexed file, e.g.::
>>> import petl as etl
>>> # activate bio extensions
... import petlx.bio
>>> table1 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3')
>>> table1
+---------------+----------+----------+-----------------------------+
| #chrom | start | end | region |
+===============+==========+==========+=============================+
| 'Pf3D7_02_v3' | '0' | '23100' | 'SubtelomericRepeat' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '23100' | '105800' | 'SubtelomericHypervariable' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '447300' | '450450' | 'Centromere' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '450450' | '862500' | 'Core' |
+---------------+----------+----------+-----------------------------+
...
>>> table2 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3:110000-120000')
>>> table2
+---------------+----------+----------+--------+
| #chrom | start | end | region |
+===============+==========+==========+========+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+--------+
"""
return TabixView(filename, reference, start, stop, region, header)
etl.fromtabix = fromtabix
class TabixView(Table):
def __init__(self, filename, reference=None, start=None, stop=None,
region=None, header=None):
self.filename = filename
self.reference = reference
self.start = start
self.stop = stop
self.region = region
self.header = header
def __iter__(self):
from pysam import Tabixfile, asTuple
f = Tabixfile(self.filename, mode='r')
try:
# header row
if self.header is not None:
yield self.header
else:
# assume last header line has fields
h = list(f.header)
if len(h) > 0:
header_line = text_type(h[-1], encoding='ascii')
yield tuple(header_line.split('\t'))
# data rows
for row in f.fetch(reference=self.reference, start=self.start,
end=self.stop, region=self.region,
parser=asTuple()):
yield tuple(row)
except:
raise
finally:
f.close()
| alimanfoo/petlx | petlx/bio/tabix.py | Python | mit | 3,291 | [
"pysam"
] | 883849e8c9917972c5f81c115c394b010365a81dd7bb11a501f8c78145bfe6e2 |
########################################################################
# $HeadURL$
########################################################################
""" ProxyManagementAPI has the functions to "talk" to the ProxyManagement service
"""
__RCSID__ = "$Id$"
import os
import datetime
import types
from DIRAC.Core.Utilities import Time, ThreadSafe, DIRACSingleton
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Core.Security import Locations, CS, File, Properties
from DIRAC.Core.Security.X509Chain import X509Chain, g_X509ChainType
from DIRAC.Core.Security.X509Request import X509Request
from DIRAC.Core.Security.VOMS import VOMS
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import S_OK, S_ERROR, gLogger
gUsersSync = ThreadSafe.Synchronizer()
gProxiesSync = ThreadSafe.Synchronizer()
gVOMSProxiesSync = ThreadSafe.Synchronizer()
class ProxyManagerClient:
__metaclass__ = DIRACSingleton.DIRACSingleton
def __init__( self ):
self.__usersCache = DictCache()
self.__proxiesCache = DictCache()
self.__vomsProxiesCache = DictCache()
self.__pilotProxiesCache = DictCache()
self.__filesCache = DictCache( self.__deleteTemporalFile )
def __deleteTemporalFile( self, filename ):
try:
os.unlink( filename )
except:
pass
def clearCaches( self ):
self.__usersCache.purgeAll()
self.__proxiesCache.purgeAll()
self.__vomsProxiesCache.purgeAll()
self.__pilotProxiesCache.purgeAll()
def __getSecondsLeftToExpiration( self, expiration, utc = True ):
if utc:
td = expiration - datetime.datetime.utcnow()
else:
td = expiration - datetime.datetime.now()
return td.days * 86400 + td.seconds
def __refreshUserCache( self, validSeconds = 0 ):
rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
retVal = rpcClient.getRegisteredUsers( validSeconds )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
#Update the cache
for record in data:
cacheKey = ( record[ 'DN' ], record[ 'group' ] )
self.__usersCache.add( cacheKey,
self.__getSecondsLeftToExpiration( record[ 'expirationtime' ] ),
record )
return S_OK()
@gUsersSync
def userHasProxy( self, userDN, userGroup, validSeconds = 0 ):
"""
Check if a user(DN-group) has a proxy in the proxy management
- Updates internal cache if needed to minimize queries to the
service
"""
cacheKey = ( userDN, userGroup )
if self.__usersCache.exists( cacheKey, validSeconds ):
return S_OK( True )
#Get list of users from the DB with proxys at least 300 seconds
gLogger.verbose( "Updating list of users in proxy management" )
retVal = self.__refreshUserCache( validSeconds )
if not retVal[ 'OK' ]:
return retVal
return S_OK( self.__usersCache.exists( cacheKey, validSeconds ) )
@gUsersSync
def getUserPersistence( self, userDN, userGroup, validSeconds = 0 ):
"""
Check if a user(DN-group) has a proxy in the proxy management
- Updates internal cache if needed to minimize queries to the
service
"""
cacheKey = ( userDN, userGroup )
userData = self.__usersCache.get( cacheKey, validSeconds )
if userData:
if userData[ 'persistent' ]:
return S_OK( True )
#Get list of users from the DB with proxys at least 300 seconds
gLogger.verbose( "Updating list of users in proxy management" )
retVal = self.__refreshUserCache( validSeconds )
if not retVal[ 'OK' ]:
return retVal
userData = self.__usersCache.get( cacheKey, validSeconds )
if userData:
return S_OK( userData[ 'persistent' ] )
return S_OK( False )
def setPersistency( self, userDN, userGroup, persistent ):
"""
Set the persistency for user/group
"""
#Hack to ensure bool in the rpc call
persistentFlag = True
if not persistent:
persistentFlag = False
rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
retVal = rpcClient.setPersistency( userDN, userGroup, persistentFlag )
if not retVal[ 'OK' ]:
return retVal
#Update internal persistency cache
cacheKey = ( userDN, userGroup )
record = self.__usersCache.get( cacheKey, 0 )
if record:
record[ 'persistent' ] = persistentFlag
self.__usersCache.add( cacheKey,
self.__getSecondsLeftToExpiration( record[ 'expirationtime' ] ),
record )
return retVal
def uploadProxy( self, proxy = False, diracGroup = False, chainToConnect = False, restrictLifeTime = 0 ):
"""
Upload a proxy to the proxy management service using delegation
"""
#Discover proxy location
if type( proxy ) == g_X509ChainType:
chain = proxy
proxyLocation = ""
else:
if not proxy:
proxyLocation = Locations.getProxyLocation()
if not proxyLocation:
return S_ERROR( "Can't find a valid proxy" )
elif type( proxy ) in ( types.StringType, types.UnicodeType ):
proxyLocation = proxy
else:
return S_ERROR( "Can't find a valid proxy" )
chain = X509Chain()
result = chain.loadProxyFromFile( proxyLocation )
if not result[ 'OK' ]:
return S_ERROR( "Can't load %s: %s " % ( proxyLocation, result[ 'Message' ] ) )
if not chainToConnect:
chainToConnect = chain
#Make sure it's valid
if chain.hasExpired()[ 'Value' ]:
return S_ERROR( "Proxy %s has expired" % proxyLocation )
#rpcClient = RPCClient( "Framework/ProxyManager", proxyChain = chainToConnect )
rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
#Get a delegation request
result = rpcClient.requestDelegationUpload( chain.getRemainingSecs()['Value'], diracGroup )
if not result[ 'OK' ]:
return result
#Check if the delegation has been granted
if 'Value' not in result or not result[ 'Value' ]:
if 'proxies' in result:
return S_OK( result[ 'proxies' ] )
else:
return S_OK()
reqDict = result[ 'Value' ]
#Generate delegated chain
chainLifeTime = chain.getRemainingSecs()[ 'Value' ] - 60
if restrictLifeTime and restrictLifeTime < chainLifeTime:
chainLifeTime = restrictLifeTime
retVal = chain.generateChainFromRequestString( reqDict[ 'request' ],
lifetime = chainLifeTime,
diracGroup = diracGroup )
if not retVal[ 'OK' ]:
return retVal
#Upload!
result = rpcClient.completeDelegationUpload( reqDict[ 'id' ], retVal[ 'Value' ] )
if not result[ 'OK' ]:
return result
if 'proxies' in result:
return S_OK( result[ 'proxies' ] )
return S_OK()
@gProxiesSync
def downloadProxy( self, userDN, userGroup, limited = False, requiredTimeLeft = 1200,
cacheTime = 43200, proxyToConnect = False, token = False ):
"""
Get a proxy Chain from the proxy management
"""
cacheKey = ( userDN, userGroup )
if self.__proxiesCache.exists( cacheKey, requiredTimeLeft ):
return S_OK( self.__proxiesCache.get( cacheKey ) )
req = X509Request()
req.generateProxyRequest( limited = limited )
if proxyToConnect:
rpcClient = RPCClient( "Framework/ProxyManager", proxyChain = proxyToConnect, timeout = 120 )
else:
rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
if token:
retVal = rpcClient.getProxyWithToken( userDN, userGroup, req.dumpRequest()['Value'],
long( cacheTime + requiredTimeLeft ), token )
else:
retVal = rpcClient.getProxy( userDN, userGroup, req.dumpRequest()['Value'],
long( cacheTime + requiredTimeLeft ) )
if not retVal[ 'OK' ]:
return retVal
chain = X509Chain( keyObj = req.getPKey() )
retVal = chain.loadChainFromString( retVal[ 'Value' ] )
if not retVal[ 'OK' ]:
return retVal
self.__proxiesCache.add( cacheKey, chain.getRemainingSecs()['Value'], chain )
return S_OK( chain )
def downloadProxyToFile( self, userDN, userGroup, limited = False, requiredTimeLeft = 1200,
cacheTime = 43200, filePath = False, proxyToConnect = False, token = False ):
"""
Get a proxy Chain from the proxy management and write it to file
"""
retVal = self.downloadProxy( userDN, userGroup, limited, requiredTimeLeft, cacheTime, proxyToConnect, token )
if not retVal[ 'OK' ]:
return retVal
chain = retVal[ 'Value' ]
retVal = self.dumpProxyToFile( chain, filePath )
if not retVal[ 'OK' ]:
return retVal
retVal[ 'chain' ] = chain
return retVal
@gVOMSProxiesSync
def downloadVOMSProxy( self, userDN, userGroup, limited = False, requiredTimeLeft = 1200,
cacheTime = 43200, requiredVOMSAttribute = False, proxyToConnect = False, token = False ):
"""
Download a proxy if needed and transform it into a VOMS one
"""
cacheKey = ( userDN, userGroup, requiredVOMSAttribute, limited )
if self.__vomsProxiesCache.exists( cacheKey, requiredTimeLeft ):
return S_OK( self.__vomsProxiesCache.get( cacheKey ) )
req = X509Request()
req.generateProxyRequest( limited = limited )
if proxyToConnect:
rpcClient = RPCClient( "Framework/ProxyManager", proxyChain = proxyToConnect, timeout = 120 )
else:
rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
if token:
retVal = rpcClient.getVOMSProxyWithToken( userDN, userGroup, req.dumpRequest()['Value'],
long( cacheTime + requiredTimeLeft ), token, requiredVOMSAttribute )
else:
retVal = rpcClient.getVOMSProxy( userDN, userGroup, req.dumpRequest()['Value'],
long( cacheTime + requiredTimeLeft ), requiredVOMSAttribute )
if not retVal[ 'OK' ]:
return retVal
chain = X509Chain( keyObj = req.getPKey() )
retVal = chain.loadChainFromString( retVal[ 'Value' ] )
if not retVal[ 'OK' ]:
return retVal
self.__vomsProxiesCache.add( cacheKey, chain.getRemainingSecs()['Value'], chain )
return S_OK( chain )
def downloadVOMSProxyToFile( self, userDN, userGroup, limited = False, requiredTimeLeft = 1200, cacheTime = 43200,
requiredVOMSAttribute = False, filePath = False, proxyToConnect = False, token = False ):
"""
Download a proxy if needed, transform it into a VOMS one and write it to file
"""
retVal = self.downloadVOMSProxy( userDN, userGroup, limited, requiredTimeLeft, cacheTime,
requiredVOMSAttribute, proxyToConnect, token )
if not retVal[ 'OK' ]:
return retVal
chain = retVal[ 'Value' ]
retVal = self.dumpProxyToFile( chain, filePath )
if not retVal[ 'OK' ]:
return retVal
retVal[ 'chain' ] = chain
return retVal
def getPilotProxyFromDIRACGroup( self, userDN, userGroup, requiredTimeLeft = 43200, proxyToConnect = False ):
"""
Download a pilot proxy with VOMS extensions depending on the group
"""
#Assign VOMS attribute
vomsAttr = CS.getVOMSAttributeForGroup( userGroup )
if not vomsAttr:
gLogger.verbose( "No voms attribute assigned to group %s when requested pilot proxy" % userGroup )
return self.downloadProxy( userDN, userGroup, limited = False, requiredTimeLeft = requiredTimeLeft,
proxyToConnect = proxyToConnect )
else:
return self.downloadVOMSProxy( userDN, userGroup, limited = False, requiredTimeLeft = requiredTimeLeft,
requiredVOMSAttribute = vomsAttr, proxyToConnect = proxyToConnect )
def getPilotProxyFromVOMSGroup( self, userDN, vomsAttr, requiredTimeLeft = 43200, proxyToConnect = False ):
"""
Download a pilot proxy with VOMS extensions depending on the group
"""
groups = CS.getGroupsWithVOMSAttribute( vomsAttr )
if not groups:
return S_ERROR( "No group found that has %s as voms attrs" % vomsAttr )
for userGroup in groups:
result = self.downloadVOMSProxy( userDN,
userGroup,
limited = False,
requiredTimeLeft = requiredTimeLeft,
requiredVOMSAttribute = vomsAttr,
proxyToConnect = proxyToConnect )
if result['OK']:
return result
return result
def getPayloadProxyFromDIRACGroup( self, userDN, userGroup, requiredTimeLeft, token = False, proxyToConnect = False ):
"""
Download a payload proxy with VOMS extensions depending on the group
"""
#Assign VOMS attribute
vomsAttr = CS.getVOMSAttributeForGroup( userGroup )
if not vomsAttr:
gLogger.verbose( "No voms attribute assigned to group %s when requested payload proxy" % userGroup )
return self.downloadProxy( userDN, userGroup, limited = True, requiredTimeLeft = requiredTimeLeft,
proxyToConnect = proxyToConnect, token = token )
else:
return self.downloadVOMSProxy( userDN, userGroup, limited = True, requiredTimeLeft = requiredTimeLeft,
requiredVOMSAttribute = vomsAttr, proxyToConnect = proxyToConnect,
token = token )
def getPayloadProxyFromVOMSGroup( self, userDN, vomsAttr, token, requiredTimeLeft, proxyToConnect = False ):
"""
Download a payload proxy with VOMS extensions depending on the VOMS attr
"""
groups = CS.getGroupsWithVOMSAttribute( vomsAttr )
if not groups:
return S_ERROR( "No group found that has %s as voms attrs" % vomsAttr )
userGroup = groups[0]
return self.downloadVOMSProxy( userDN,
userGroup,
limited = True,
requiredTimeLeft = requiredTimeLeft,
requiredVOMSAttribute = vomsAttr,
proxyToConnect = proxyToConnect,
token = token )
def dumpProxyToFile( self, chain, destinationFile = False, requiredTimeLeft = 600 ):
"""
Dump a proxy to a file. It's cached so multiple calls won't generate extra files
"""
result = chain.hash()
if not result[ 'OK' ]:
return result
hash = result[ 'Value' ]
if self.__filesCache.exists( hash, requiredTimeLeft ):
filepath = self.__filesCache.get( hash )
if os.path.isfile( filepath ):
return S_OK( filepath )
self.__filesCache.delete( hash )
retVal = chain.dumpAllToFile( destinationFile )
if not retVal[ 'OK' ]:
return retVal
filename = retVal[ 'Value' ]
self.__filesCache.add( hash, chain.getRemainingSecs()['Value'], filename )
return S_OK( filename )
def deleteGeneratedProxyFile( self, chain ):
"""
Delete a file generated by a dump
"""
self.__filesCache.delete( chain )
return S_OK()
def requestToken( self, requesterDN, requesterGroup, numUses = 1 ):
"""
Request a number of tokens. usesList must be a list of integers and each integer is the number of uses a token
must have
"""
rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
return rpcClient.generateToken( requesterDN, requesterGroup, numUses )
def renewProxy( self, proxyToBeRenewed = False, minLifeTime = 3600, newProxyLifeTime = 43200, proxyToConnect = False ):
"""
Renew a proxy using the ProxyManager
Arguments:
proxyToBeRenewed : proxy to renew
minLifeTime : if proxy life time is less than this, renew. Skip otherwise
newProxyLifeTime : life time of new proxy
proxyToConnect : proxy to use for connecting to the service
"""
retVal = File.multiProxyArgument( proxyToBeRenewed )
if not retVal[ 'Value' ]:
return retVal
proxyToRenewDict = retVal[ 'Value' ]
secs = proxyToRenewDict[ 'chain' ].getRemainingSecs()[ 'Value' ]
if secs > minLifeTime:
File.deleteMultiProxy( proxyToRenewDict )
return S_OK()
if not proxyToConnect:
proxyToConnectDict = { 'chain': False, 'tempFile': False }
else:
retVal = File.multiProxyArgument( proxyToConnect )
if not retVal[ 'Value' ]:
File.deleteMultiProxy( proxyToRenewDict )
return retVal
proxyToConnectDict = retVal[ 'Value' ]
userDN = proxyToRenewDict[ 'chain' ].getIssuerCert()[ 'Value' ].getSubjectDN()[ 'Value' ]
retVal = proxyToRenewDict[ 'chain' ].getDIRACGroup()
if not retVal[ 'OK' ]:
File.deleteMultiProxy( proxyToRenewDict )
File.deleteMultiProxy( proxyToConnectDict )
return retVal
userGroup = retVal[ 'Value' ]
limited = proxyToRenewDict[ 'chain' ].isLimitedProxy()[ 'Value' ]
voms = VOMS()
retVal = voms.getVOMSAttributes( proxyToRenewDict[ 'chain' ] )
if not retVal[ 'OK' ]:
File.deleteMultiProxy( proxyToRenewDict )
File.deleteMultiProxy( proxyToConnectDict )
return retVal
vomsAttrs = retVal[ 'Value' ]
if vomsAttrs:
retVal = self.downloadVOMSProxy( userDN,
userGroup,
limited = limited,
requiredTimeLeft = newProxyLifeTime,
requiredVOMSAttribute = vomsAttrs[0],
proxyToConnect = proxyToConnectDict[ 'chain' ] )
else:
retVal = self.downloadProxy( userDN,
userGroup,
limited = limited,
requiredTimeLeft = newProxyLifeTime,
proxyToConnect = proxyToConnectDict[ 'chain' ] )
File.deleteMultiProxy( proxyToRenewDict )
File.deleteMultiProxy( proxyToConnectDict )
if not retVal[ 'OK' ]:
return retVal
chain = retVal['Value']
if not proxyToRenewDict[ 'tempFile' ]:
return chain.dumpAllToFile( proxyToRenewDict[ 'file' ] )
return S_OK( chain )
def getDBContents( self, condDict = {} ):
"""
Get the contents of the db
"""
rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
return rpcClient.getContents( condDict, [ [ 'UserDN', 'DESC' ] ], 0, 0 )
def getVOMSAttributes( self, chain ):
"""
Get the voms attributes for a chain
"""
return VOMS().getVOMSAttributes( chain )
def getUploadedProxyLifeTime( self, DN, group ):
"""
Get the remaining seconds for an uploaded proxy
"""
result = self.getDBContents( { 'UserDN' : [ DN ], 'UserGroup' : [ group ] } )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if len( data[ 'Records' ] ) == 0:
return S_OK( 0 )
pNames = list( data[ 'ParameterNames' ] )
dnPos = pNames.index( 'UserDN' )
groupPos = pNames.index( 'UserGroup' )
expiryPos = pNames.index( 'ExpirationTime' )
for row in data[ 'Records' ]:
if DN == row[ dnPos ] and group == row[ groupPos ]:
td = row[ expiryPos ] - datetime.datetime.utcnow()
secondsLeft = td.days * 86400 + td.seconds
return S_OK( max( 0, secondsLeft ) )
return S_OK( 0 )
def getUserProxiesInfo( self ):
""" Get the user proxies uploaded info
"""
result = RPCClient( "Framework/ProxyManager", timeout = 120 ).getUserProxiesInfo()
if 'rpcStub' in result:
result.pop( 'rpcStub' )
return result
gProxyManager = ProxyManagerClient()
| Sbalbp/DIRAC | FrameworkSystem/Client/ProxyManagerClient.py | Python | gpl-3.0 | 19,823 | [
"DIRAC"
] | 344f109708710b7f3835f03762d0d534c53bbbd56c82e674aa895cd48cfa4a30 |
# -*- coding: utf-8 -*-
#
# network_params.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
pynest microcircuit parameters
------------------------------
Network parameters for the microcircuit.
Hendrik Rothe, Hannah Bos, Sacha van Albada; May 2016
'''
import numpy as np
def get_mean_delays(mean_delay_exc, mean_delay_inh, number_of_pop):
""" Creates matrix containing the delay of all connections.
Arguments
---------
mean_delay_exc
Delay of the excitatory connections.
mean_delay_inh
Delay of the inhibitory connections.
number_of_pop
Number of populations.
Returns
-------
mean_delays
Matrix specifying the mean delay of all connections.
"""
dim = number_of_pop
mean_delays = np.zeros((dim, dim))
mean_delays[:, 0:dim:2] = mean_delay_exc
mean_delays[:, 1:dim:2] = mean_delay_inh
return mean_delays
def get_std_delays(std_delay_exc, std_delay_inh, number_of_pop):
""" Creates matrix containing the standard deviations of all delays.
Arguments
---------
std_delay_exc
Standard deviation of excitatory delays.
std_delay_inh
Standard deviation of inhibitory delays.
number_of_pop
Number of populations in the microcircuit.
Returns
-------
std_delays
Matrix specifying the standard deviation of all delays.
"""
dim = number_of_pop
std_delays = np.zeros((dim, dim))
std_delays[:, 0:dim:2] = std_delay_exc
std_delays[:, 1:dim:2] = std_delay_inh
return std_delays
def get_mean_PSP_matrix(PSP_e, g, number_of_pop):
""" Creates a matrix of the mean evoked postsynaptic potential.
The function creates a matrix of the mean evoked postsynaptic
potentials between the recurrent connections of the microcircuit.
The weight of the connection from L4E to L23E is doubled.
Arguments
---------
PSP_e
Mean evoked potential.
g
Relative strength of the inhibitory to excitatory connection.
number_of_pop
Number of populations in the microcircuit.
Returns
-------
weights
Matrix of the weights for the recurrent connections.
"""
dim = number_of_pop
weights = np.zeros((dim, dim))
exc = PSP_e
inh = PSP_e * g
weights[:, 0:dim:2] = exc
weights[:, 1:dim:2] = inh
weights[0, 2] = exc * 2
return weights
def get_std_PSP_matrix(PSP_rel, number_of_pop):
""" Relative standard deviation matrix of postsynaptic potential created.
The relative standard deviation matrix of the evoked postsynaptic potential
for the recurrent connections of the microcircuit is created.
Arguments
---------
PSP_rel
Relative standard deviation of the evoked postsynaptic potential.
number_of_pop
Number of populations in the microcircuit.
Returns
-------
std_mat
Matrix of the standard deviation of postsynaptic potentials.
"""
dim = number_of_pop
std_mat = np.zeros((dim, dim))
std_mat[:, :] = PSP_rel
return std_mat
net_dict = {
# Neuron model.
'neuron_model': 'iaf_psc_exp',
# The default recording device is the spike_detector. If you also
# want to record the membrane potentials of the neurons, add
# 'voltmeter' to the list.
'rec_dev': ['spike_detector'],
# Names of the simulated populations.
'populations': ['L23E', 'L23I', 'L4E', 'L4I', 'L5E', 'L5I', 'L6E', 'L6I'],
# Number of neurons in the different populations. The order of the
# elements corresponds to the names of the variable 'populations'.
'N_full': np.array([20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948]),
# Mean rates of the different populations in the non-scaled version
# of the microcircuit. Necessary for the scaling of the network.
# The order corresponds to the order in 'populations'.
'full_mean_rates':
np.array([0.971, 2.868, 4.746, 5.396, 8.142, 9.078, 0.991, 7.523]),
# Connection probabilities. The first index corresponds to the targets
# and the second to the sources.
'conn_probs':
np.array(
[[0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0., 0.0076, 0.],
[0.1346, 0.1371, 0.0316, 0.0515, 0.0755, 0., 0.0042, 0.],
[0.0077, 0.0059, 0.0497, 0.135, 0.0067, 0.0003, 0.0453, 0.],
[0.0691, 0.0029, 0.0794, 0.1597, 0.0033, 0., 0.1057, 0.],
[0.1004, 0.0622, 0.0505, 0.0057, 0.0831, 0.3726, 0.0204, 0.],
[0.0548, 0.0269, 0.0257, 0.0022, 0.06, 0.3158, 0.0086, 0.],
[0.0156, 0.0066, 0.0211, 0.0166, 0.0572, 0.0197, 0.0396, 0.2252],
[0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658, 0.1443]]
),
# Number of external connections to the different populations.
# The order corresponds to the order in 'populations'.
'K_ext': np.array([1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100]),
# Factor to scale the indegrees.
'K_scaling': 0.1,
# Factor to scale the number of neurons.
'N_scaling': 0.1,
# Mean amplitude of excitatory postsynaptic potential (in mV).
'PSP_e': 0.15,
# Relative standard deviation of the postsynaptic potential.
'PSP_sd': 0.1,
# Relative inhibitory synaptic strength (in relative units).
'g': -4,
# Rate of the Poissonian spike generator (in Hz).
'bg_rate': 8.,
# Turn Poisson input on or off (True or False).
'poisson_input': True,
# Delay of the Poisson generator (in ms).
'poisson_delay': 1.5,
# Mean delay of excitatory connections (in ms).
'mean_delay_exc': 1.5,
# Mean delay of inhibitory connections (in ms).
'mean_delay_inh': 0.75,
# Relative standard deviation of the delay of excitatory and
# inhibitory connections (in relative units).
'rel_std_delay': 0.5,
# Parameters of the neurons.
'neuron_params': {
# Membrane potential average for the neurons (in mV).
'V0_mean': -58.0,
# Standard deviation of the average membrane potential (in mV).
'V0_sd': 10.0,
# Reset membrane potential of the neurons (in mV).
'E_L': -65.0,
# Threshold potential of the neurons (in mV).
'V_th': -50.0,
# Membrane potential after a spike (in mV).
'V_reset': -65.0,
# Membrane capacitance (in pF).
'C_m': 250.0,
# Membrane time constant (in ms).
'tau_m': 10.0,
# Time constant of postsynaptic excitatory currents (in ms).
'tau_syn_ex': 0.5,
# Time constant of postsynaptic inhibitory currents (in ms).
'tau_syn_in': 0.5,
# Time constant of external postsynaptic excitatory current (in ms).
'tau_syn_E': 0.5,
# Refractory period of the neurons after a spike (in ms).
't_ref': 2.0}
}
updated_dict = {
# PSP mean matrix.
'PSP_mean_matrix': get_mean_PSP_matrix(
net_dict['PSP_e'], net_dict['g'], len(net_dict['populations'])
),
# PSP std matrix.
'PSP_std_matrix': get_std_PSP_matrix(
net_dict['PSP_sd'], len(net_dict['populations'])
),
# mean delay matrix.
'mean_delay_matrix': get_mean_delays(
net_dict['mean_delay_exc'], net_dict['mean_delay_inh'],
len(net_dict['populations'])
),
# std delay matrix.
'std_delay_matrix': get_std_delays(
net_dict['mean_delay_exc'] * net_dict['rel_std_delay'],
net_dict['mean_delay_inh'] * net_dict['rel_std_delay'],
len(net_dict['populations'])
),
}
net_dict.update(updated_dict)
| tobikausk/nest-simulator | pynest/examples/Potjans_2014/network_params.py | Python | gpl-2.0 | 8,234 | [
"NEURON"
] | b5089d1fb35b393014824b7e5fef980759cdb60589b160104fc667d559bc0b07 |
# -*- coding: utf-8 -*-
'''Programme to clone galaxies from a catalogue and generate a random,
unclustered catalogue to examine the effects of clustering on galaxy properties.
This is the full interface version, with a text prompt to generate a catalogue
when run as the main program. Note that this is currently set up only for flat
cosmologies (FlatLambdaCDM).
'''
import os
import numpy as np
import reflwin
from astropy.cosmology import FlatLambdaCDM
from matplotlib import pyplot
#m_min = 10.0 # Minimum apparent magnitude usable in the survey
#m_max = 19.8 # Maximum apparent magnitude detectable in the survey
#n_clone = 20 # The ratio of cloned-to-surveyed galaxies, typ. 400
#Sigma = 0.389017e11
def boolean_raw_input(text):
variable = -1
while variable != True and variable != False:
variable = raw_input(text)
if variable.lower() in {"y", "ye", "yes", "true", "sure", "oui", "ja"}:
variable = True
elif variable.lower() in {"n", "no", "false", "non", "nein"}:
variable = False
elif variable != True and variable != False:
print "Please answer with y/yes or n/no."
return variable
if __name__=="__main__":
H0 = float(raw_input("Hubble constant in kms^-1Mpc^-1: "))
Om0 = float(raw_input("Matter density: "))
print "Please wait while the cosmology is generated."
print "Cosmology finished."
cosmo = FlatLambdaCDM(H0, Om0)
m_min = float(raw_input("Minimum apparent magnitude: "))
m_max = float(raw_input("Maximum apparent magnitude: "))
sol_ang = float(raw_input("Solid angle covered by the survey in " + \
"steradians: "))
k_text = raw_input("Type in Python function for k-correction in terms " + \
"of redshift z: ")
e_text = raw_input("Type in Python function for e-correction in terms " + \
"of redshift z: ")
k_corr = lambda z: eval(k_text)
e_corr = lambda z: eval(e_text)
abs_mag = lambda z, m, univ: m - univ.distmod(z).value - k_corr(z) - \
e_corr(z)
obs_mag = lambda z, M, univ: M + univ.distmod(z).value + k_corr(z) + \
e_corr(z)
print "The generator accepts a text file with columns delimited " + \
"by spaces. The columns must be ordered right ascension " + \
"(in degrees), declination (in degrees), redshift, and " + \
"apparent magnitudes, followed by any arbitrary properties. " + \
"Alternatively, the properties can be input as separate " + \
"text files in column format."
separate = boolean_raw_input("Do you wish to input separate " + \
"text files? (y/n): ")
if separate == True:
same_loc = boolean_raw_input("Is the Python file in the same " + \
"location as the data files? (y/n): ")
if same_loc == True:
__location__ = os.path.realpath(os.path.join(os.getcwd(), \
os.path.dirname(__file__)))
angpos = boolean_raw_input("Are there angular positions (right " + \
"ascensions or declinations) to input? (y/n): ")
if angpos == True:
ra_loc = raw_input("Type the name of the right ascension " + \
"file: ")
ra = np.loadtxt(os.path.join(__location__,ra_loc), \
delimiter=' ')
dec_loc = raw_input("Type the name of the declination file: ")
dec = np.loadtxt(os.path.join(__location__,dec_loc), \
delimiter=' ')
elif angpos == False:
ra = None
dec = None
z_loc = raw_input("Type the name of the redshift file: ")
z = np.loadtxt(os.path.join(__location__,z_loc), delimiter=' ')
absolute = boolean_raw_input("Are the input magnitudes " + \
"absolute? (y/n): ")
if absolute == True:
mag_loc = raw_input("Type the name of the absolute " + \
"magnitude file: ")
mag = np.loadtxt(os.path.join(__location__,mag_loc), \
delimiter=' ')
elif absolute == False:
mag_loc = raw_input("Type the name of the apparent " + \
"magnitude file: ")
mag = np.loadtxt(os.path.join(__location__,mag_loc), \
delimiter=' ')
for i in range(len(mag)):
mag[i] = abs_mag(z[i],mag[i],cosmo)
att_loc = raw_input("Type the name of the additional " + \
"properties file: ")
att = np.loadtxt(os.path.join(__location__,att_loc), delimiter=' ')
elif same_loc == False:
angpos = boolean_raw_input("Are there angular positions (right " + \
"ascensions or declinations) to input? (y/n): ")
if angpos == True:
ra_loc = raw_input("Type the full file path of the right " + \
"ascension file: ")
ra = np.loadtxt(ra_loc, delimiter=' ')
dec_loc = raw_input("Type the full file path of the " + \
"declination file: ")
dec = np.loadtxt(dec_loc, delimiter=' ')
elif angpos == False:
ra = None
dec = None
z_loc = raw_input("Type the full file path of the redshift file: ")
z = np.loadtxt(z_loc, delimiter=' ')
absolute = boolean_raw_input("Are the input magnitudes " + \
"absolute? (y/n): ")
if absolute == True:
mag_loc = raw_input("Type the full file path of the " + \
"absolute magnitude file: ")
mag = np.loadtxt(mag_loc, delimiter=' ')
elif absolute == False:
mag_loc = raw_input("Type the full file path of the " + \
"apparent magnitude file: ")
mag = np.loadtxt(mag_loc, delimiter=' ')
for i in range(len(mag)):
mag[i] = abs_mag(z[i],mag[i],cosmo)
att_loc = raw_input("Type the full file path of the additional " + \
"properties file: ")
att = np.loadtxt(att_loc, delimiter=' ')
elif separate == False:
same_loc = boolean_raw_input("Is the Python file in the same " + \
"location as the data file? (y/n): ")
if same_loc == True:
__location__ = os.path.realpath(os.path.join(os.getcwd(), \
os.path.dirname(__file__)))
file_loc = raw_input("Type the name of the data file: ")
data = np.loadtxt(os.path.join(__location__,file_loc), \
delimiter=' ')
ra = data[:,0]
dec = data[:,1]
z = data[:,2]
mag = data[:,3]
att = data[:,4:int(data.shape[1])]
for i in range(len(mag)):
mag[i] = abs_mag(z[i],mag[i],cosmo)
elif same_loc == False:
file_loc = raw_input("Type the full file path of the data file: ")
data = np.loadtxt(file_loc, delimiter=' ')
ra = data[:,0]
dec = data[:,1]
z = data[:,2]
mag = data[:,3]
att = data[:,4:int(data.shape[1])]
for i in range(len(mag)):
mag[i] = abs_mag(z[i],mag[i],cosmo)
n_clone = int(raw_input("The average number of galaxy clones (20 is " + \
"recommended): "))
windowed = boolean_raw_input("Apply the Farrow window to the " + \
"redistribution? (y/n): ")
if windowed == True:
Sigma = float(raw_input("The standard deviation of the window " + \
"in Mpc^3: "))
zlim = boolean_raw_input("Is there a redshift limit to the " + \
"survey? (y/n): ")
if zlim == True:
zlim = float(raw_input("Input the redshift limit: "))
elif zlim == False:
zlim = None
ralim = boolean_raw_input("Are there RA limits to the survey? (y/n): ")
if ralim == True:
ralim_lo = float(raw_input("Input the lower RA limit: "))
ralim_hi = float(raw_input("Input the upper RA limit: "))
ralim = (ralim_lo,ralim_hi)
elif ralim == False:
ralim = None
declim = boolean_raw_input("Are there declination limits to the " + \
"survey? (y/n): ")
if declim == True:
declim_lo = float(raw_input("Input the lower declination limit: "))
declim_hi = float(raw_input("Input the upper declination limit: "))
declim = (declim_lo,declim_hi)
elif declim == False:
declim = None
N = int(raw_input("Input the number of iterations to perform (20 is " + \
"sufficient): "))
dz = float(raw_input("Input the separation of the redshift bins the " + \
" overdenstity will be measured over (0.025 is " + \
"recommended): "))
take_record = boolean_raw_input("Do you wish to obtain a record of the " + \
"catalogue at each iteration? Note that" + \
" this can be memory intensive. (y/n): ")
Phistar_z0 = 9.4e-3 * cosmo.h ** 3 # The normalization density for the LF at z0
Alpha = -1.26 # The power law coefficient for the LF
Magstar_z0 = -20.7 # The characteristic luminosity for the LF at z0
Phistar = lambda z: Phistar_z0 * 10 ** (0.4*1.8*z)
Magstar = lambda z: Magstar_z0 + 0.7 * (z-0.1)
def truncate_by_mag(app_m, z, abs_m, min_app_mag, vmaxs=None, \
vmaxcut=(False,-23.,-22.5)):
'''
Truncates a catalogue's data according to either a minimum apparent
magnitude, or a specified magnitude window.
An input catalogue of apparent magnitudes app_m, absolute magnitudes abs_m,
and redshifts z have galaxies removed according to either an apparent
minimum magnitude min_app_mag, or so that only galaxies within an absolute
magnitude window vmaxcut remain. Optionally, the analytical V_max values
vmaxs can be trucated as well. If no such values exist, use None.
Parameters
----------
app_m : array_like
An array of apparent magnitude values.
z : array_like
An array of redshift values, where z[x] and app_m[x] refer to the same
galaxy.
abs_m : array_like
An array of absolute magnitudes, where abs_m[x] and app_m[x] refer to
the same galaxy.
min_app_mag : float
Minimum apparent magnitude of the catalogue.
vmaxs : array_like or NoneType, optional
An array of analytic V_max values, where vmaxs[x] and app_m[x] refer to
the same galaxy. If no V_max values are available, use None.
vmaxcut : array_like (Boolean, float, float), optional
Apparent magnitude window the function will cut the catalogue to,
removing all galaxies that lie outside of it, with three packed
variables: a Boolean that is True if the window is to be cut, the lower
limit of apparent magnitude, and the upper limit of apparent magnitude.
Returns
-------
app_m_cut : array_like
An array of the apparent magnitudes after the cut.
z_cut : array_like
An array of the redshifts after the cut.
abs_m_cut : array_like
An array of the apparent magnitudes after the cut.
vmax_cut : array_like
An array of the V_max values after the cut. Only produced if vmaxs is not
None.
'''
mag_run = np.copy(app_m)
z_run = np.copy(z)
absmag_run = np.copy(abs_m)
order = []
if vmaxcut[0] == True:
if vmaxs is None:
for i in range(len(abs_m)):
if absmag_run[i] < vmaxcut[1] or absmag_run[i] > vmaxcut[2]:
order.append(i)
app_m_cut = np.delete(mag_run, order)
z_cut = np.delete(z_run, order)
abs_m_cut = np.delete(absmag_run, order)
return app_m_cut, z_cut, abs_m_cut
else:
vmax_run = np.copy(vmaxs)
for i in range(len(abs_m)):
if absmag_run[i] < vmaxcut[1] or absmag_run[i] > vmaxcut[2]:
order.append(i)
app_m_cut = np.delete(mag_run, order)
z_cut = np.delete(z_run, order)
abs_m_cut = np.delete(absmag_run, order)
vmax_cut = np.delete(vmax_run, order)
return app_m_cut, z_cut, abs_m_cut, vmax_cut
elif vmaxs is None:
for i in range(len(abs_m)):
if mag_run[i] < min_app_mag:
order.append(i)
app_m_cut = np.delete(mag_run, order)
z_cut = np.delete(z_run, order)
abs_m_cut = np.delete(absmag_run, order)
return app_m_cut, z_cut, abs_m_cut
else:
vmax_run = np.copy(vmaxs)
for i in range(len(abs_m)):
if mag_run[i] < min_app_mag:
order.append(i)
app_m_cut = np.delete(mag_run, order)
z_cut = np.delete(z_run, order)
abs_m_cut = np.delete(absmag_run, order)
vmax_cut = np.delete(vmax_run, order)
return app_m_cut, z_cut, abs_m_cut, vmax_cut
#-------------------------------------------------------------------------------
def schechter(magnitude, phiStar=Phistar_z0, alpha=Alpha, mStar=Magstar_z0, \
z=False):
'''
Returns the value of the Schechter luminosity function specified by the
input properties at a given absolute magnitude, in units of cubic
megaparsecs. An evolving definition of the function can be given by
entering a redshift z and lambda functions for phiStar and mStar.
Parameters
----------
magnitude : float
An absolute magnitude value the Schechter function is evaluated at.
phiStar : float or lambda_like, optional
The normalisation number density for the Schechter function, in units of
lunminous bodies per cubic megaparsec. Can be an evolving function if a
redshift is specified.
alpha : float, optional
The unitless coefficient for the power law term for the Schechter
function.
mStar : float or lambda_like, optional
The characteristic luminosity for the cutoff point of the Schechter
function, in units of magnitudes. Can be an evolving function if a
redshift is specified.
z : float or bool, optional
The redshift at which the evolving Schechter function is measured. If the
function is constant, use False.
Returns
-------
float
The value of the Schechter function at the input magnitude.
'''
if type(phiStar)!=float and type(mStar)!=float and type(z)==float:
w = 0.4 * (mStar(z) - magnitude)
return 0.4 * np.log(10) * phiStar(z) * 10.0**(w * (alpha + 1.0)) \
* np.exp(-10.0**w)
else:
w = 0.4 * (mStar - magnitude)
return 0.4 * np.log(10) * phiStar * 10.0**(w * (alpha + 1.0)) \
* np.exp(-10.0**w)
def redshift_tabulator(univ, solid_Angle=4*np.pi, min_z=0.0, max_z=1.5, N=1e5):
'''
Tables redshifts and associated values in a reference table for finding
V_max and V_max_dc.
The function tables N redshift bins between min_z and max_z in a Numpy
array. Using an AstroPy cosmology univ, values of various parameters are
calculated at each redshift bin, giving the following columns:
1: Redshift bin (z)
2: Distance modulus (q), which is equivalent to both 5 * (lg(D_L) - 1) + k(z)
+ e(z), and m - M
3: The comoving volume (V_c) over the solid angle (solid_Angle) of the
surface at z
4: The overdensity (Delta) of an associated catalogue at z. At generation,
this is set to Delta(z) = 1.0.
5: The change in volume over the redshift bin (DeltaV_c)
6: The sum of the multiple of overdensity and change in volume (Sum_dc),
from min_z to the current z bin.
Parameters
----------
univ : LambdaCDM
An AstroPy cosmology.
solid_Angle : float
The solid angle over which the corresponding survey is taken, in
steradians. Standard value is 4 pi (i.e. the whole sky).
min_z : float, optional
The minimum redshift bin for the table. Standard value is 0.0.
min_z : float, optional
The maximum redshift bin for the table. Standard value is 1.5.
N : float, optional
The number of redshift bins in the table. Standard value is 1e5.
Returns
-------
table : array_like
A (N,6) array of redshift values and corresponding dependants.
'''
intvl = 1.0/N * (max_z - min_z) # Redshift bin interval
table = np.zeros((int(N),6))
for i in xrange(int(N)):
z = intvl + i * intvl
table[i][0] = z # z (1)
table[i][1] = univ.distmod(z).value + \
k_corr(z) + e_corr(z) # q (2)
table[i][2] = (solid_Angle / (4.0 * np.pi * univ.h ** 3)) * \
univ.comoving_volume(z).value # V_c (3)
table[i][3] = 1.0 # Delta (4)
if i != 0:
table[i][4] = table[i][2] - table[i-1][2] # DeltaV_c (5)
table[i][5] = (table[i][4]*table[i][3]) \
+ table[i-1][5] # Sum_dc (6)
else:
table[i][4] = 0.0 # DeltaV_c for min_z
table[i][5] = 0.0 # Sum_dc for min_z
return table
def v_max(table, M, lim=0.5):
'''
Estimates the maximum volume statistic for a galaxy of absolute magnitude M
from a table.
Finds the maximum volume for any given galaxy with absolute magnitude M
to be translocated and still remain in survey parameters of observed
magnitude, using a redshift table. An absolute redshift limit for the
volume can be set, based on the redshift limits of the survey.
Parameters
----------
table : array_like
A (N,6) array of redshift values and corresponding dependants.
M : float
The absolute magnitude of the galaxy.
lim : float or NoneType, optional
The redshift limit on the maximum volume. Use None if no limit is used.
Returns
-------
v : float
The maximum volume statistic for the galaxy.
v_mx : float
The upper volume limit for the galaxy's maximum volume.
v_mn : float
The lower volume limit for the galaxy's maximum volume.
'''
q_min = m_min - M # Minimum distance modulus
q_max = m_max - M # Maximum distance modulus
index_z_min = np.searchsorted(table[:,1], q_min) # Binary search for min z
index_z_max = np.searchsorted(table[:,1], q_max) # Binary search for max z
if lim==None:
return table[index_z_max][2] - table[index_z_min][2], \
table[index_z_max][2], table[index_z_min][2]
elif table[index_z_max][0] > lim:
index_z_max = np.searchsorted(table[:,0], lim)
return table[index_z_max][2] - table[index_z_min][2], \
table[index_z_max][2], table[index_z_min][2]
def v_max_dc(table, M, lim=0.5):
'''
Estimates the density corrected maximum volume statistic from Cole (2011)
for a galaxy of absolute magnitude M from a table.
Finds the density corrected maximum volume for any given galaxy with
absolute magnitude M to be translocated and still remain in survey
parameters of observed magnitude, using a redshift table. An absolute
redshift limit for the volume can be set, based on the redshift limits of
the survey.
Parameters
----------
table : array_like
A (N,6) array of redshift values and corresponding dependants,
including overdensity.
M : float
The absolute magnitude of the galaxy.
lim : float or NoneType, optional
The redshift limit on the maximum volume. Use None if no limit is used.
Returns
-------
v_dc : float
The density corrected maximum volume statistic for the galaxy.
v_mx : float
The upper volume limit for the galaxy's maximum volume.
v_mn : float
The lower volume limit for the galaxy's maximum volume.
'''
q_min = m_min - M # Minimum distance modulus
q_max = m_max - M # Maximum distance modulus
index_z_min = np.searchsorted(table[:,1], q_min) # Binary search for min z
index_z_max = np.searchsorted(table[:,1], q_max) # Binary search for max z
if lim==None:
return table[index_z_max,5] - table[index_z_min,5], \
table[index_z_max][2] * table[index_z_max][3], table[index_z_min][2] * \
table[index_z_min][3]
elif table[index_z_max][0] > lim:
index_z_max = np.searchsorted(table[:,0], lim)
return table[index_z_max,5] - table[index_z_min,5], table[index_z_max][2] \
* table[index_z_max][3], table[index_z_min][2] * table[index_z_min][3]
def lumin_func_est(z, m, table, dM=0.1, absolute=True, density_corr=True):
'''
Estimates the luminosity function of galaxies of magnitudes m and redshifts
z.
This estimates the luminosity function for a collection of galaxies, given
as arrays of corresponding magnitudes and redshifts, over absolute magnitude
bins of size dM and using a redshift table.
Parameters
----------
z : array_like
An array of redshift values.
m : array_like
An array of magnitude values, where z[x] and m[x] refer to the same
galaxy.
table : array_like
A (N,6) array of redshift values and corresponding dependants.
dM : float, optional
The size of the absolute magnitude bins over which luminosity function
values are applicable.
absolute : bool, optional
True if the magnitudes m are absolute; False if they are apparent.
density_corr : bool, optional
True if the density corrected maximum volume is to be used; False if the
classic density independent value is used.
Returns
-------
M_bin : array_like
An array of the lower limits of the luminosity bins.
M_bincentres : array_like
An array of the centres of the luminosity bins.
lum_func : array_like
The estimated luminosity function phi, a function of M_bincentre values.
'''
if len(m) != len(z):
return 'The catalogue must consist of two ordered lists of ' + \
'magnitudes m and redshifts z of equal length.'
elif absolute == False:
M = np.zeros(shape=(len(m),))
for g in xrange(len(m)):
M[g] = abs_mag(z[g], m[g])
elif absolute == True:
M = np.copy(m)
binnum = int(abs(max(M)-min(M))/dM)
if density_corr == True:
inv = 1.0/np.asarray([v_max_dc(table, M[i])[0] for i in range(len(M))])
elif density_corr == False:
inv = 1.0/np.asarray([v_max(table, M[i])[0] for i in range(len(M))])
lum_func,M_bin = np.histogram(M, weights=inv, bins=binnum, \
range=(min(M),max(M)))
dM_true = M_bin[2]-M_bin[1]
M_bincentres=(M_bin[1:]+M_bin[:-1])/2.0
lum_func /= dM_true
return M_bin, M_bincentres, lum_func
#-------------------------------------------------------------------------------
def rand_cat_populator(z, M, ra, dec, attributes, table, win=False, sig=Sigma, \
z_lim=0.5, ra_lim=(0.,10.), dec_lim=(-7.5,7.5)):
'''
Populatates a random catalogue with clones from an initial catalogue (z, M,
ra, dec, attributes), using a redshift table defined by redshift_tabulator.
From a catalogue of galaxies of redshifts (z), absolute magnitudes (M),
right ascension (ra), declination (dec), and arbitrary properties
(attributes), a new random catalogue is generated by cloning the original
galaxies. If win is set to True, the galaxies will be redistributed in V_max
in a probabiity window with standard deviation sig, as used in Farrow
(2015). If not, it will be distributed uniformly. A maximum limit in
redshift (z_lim) can be set, in addition to upper and lower limits in right
ascension (ra_lim) and declination (dec_lim).
Parameters
----------
z : array_like
An array of redshift values for a catalogue of galaxies.
M : array_like
An array of absolute magnitude values for the catalogue of galaxies.
ra : array_like or NoneType
An array of right ascension values for the catalogue of galaxies. If no
values are given, use None.
dec : array_like or NoneType
An array of declination values for the catalogue of galaxies. If no
values are given, use None.
attributes : array_like
An array of arbitrary properties values for the catalogue of galaxies.
table : array_like
A (N,6) array of redshift values and corresponding dependants.
win : bool, optional
True if the Farrow window is to be used; False if a uniform
distribution is to be used.
sig : float, optional
The standard deviation of the Farrow window.
z_lim : float or NoneType, optional
The redshift limit of the redistribution. Use None if no limit is used.
ra_lim : array_like (float, float) or NoneType, optional
The right ascension limits (lower, upper) of the redistribution. Using
None will redistrbute in redshift alone.
dec_lim : array_like (float, float) or NoneType, optional
The declination limits (lower, upper) of the redistribution in degrees.
Using None will redistrbute in redshift alone.
Returns
-------
rand_cat : array_like
An array for a random catalogue (z, M, ra, dec, attributes)
n_list : array_like
An array of the number of clones of each original galaxy.
lum_func : array_like
An array of the redshift of each original galaxy.
'''
rand_cat = []
n_list = []
orgz_list = []
ang_pos = False
if ra != None and dec != None and ra_lim != None and dec_lim != None:
low_dec = np.radians(dec_lim[0])
hi_dec = np.radians(dec_lim[1])
low_ra = np.radians(ra_lim[0])
hi_ra = np.radians(ra_lim[1])
ang_pos = True
if win == True:
random_v = lambda w,x,y,z: reflwin.rand_refl_win(w,x,y,z)
elif win == False:
random_v = lambda w,x,y,z: np.random.random() * (y-x)
for i in xrange(len(z)):
v, v_mx, v_mn = v_max(table, M[i], None)
v_dc, v_mx_dc, v_mn_dc = v_max_dc(table, M[i], None)
n = n_clone * v / v_dc
centre = table[np.searchsorted(table[:,0], z[i])][2]
prob_n = 1 - (n % 1)
lot_n = np.random.rand()
if lot_n <= prob_n:
n = np.floor(n)
else:
n = np.ceil(n)
n_list.append(n)
for j in xrange(int(n)):
new_v = v_mn + random_v(centre,v_mn,v_mx,sig)
new_z = table[np.searchsorted(table[:,2], new_v)][0]
while new_z > z_lim:
new_v = v_mn + random_v(centre,v_mn,v_mx,sig)
new_z = table[np.searchsorted(table[:,2], new_v)][0]
if ang_pos == True:
new_ra = np.random.random() * (hi_ra - low_ra)
new_dec = np.arcsin(np.random.random() * \
(np.sin(hi_dec) - np.sin(low_dec)))
rand_cat.append([new_z, M[i], new_ra, new_dec, attributes[i]])
else:
rand_cat.append([new_z, M[i], attributes[i]])
orgz_list.append(z[i])
return np.asarray(rand_cat), n_list, orgz_list
def overdensity_iter(z, M, ra, dec, attributes, table, N, dz=0.025, win=False, \
sig=Sigma, z_lim=0.5, ra_lim=(0.,10.), dec_lim=(-7.5,7.5),\
record=False):
'''
Populatates a random catalogue with clones from an initial catalogue using
rand_cat_populator for N iterations, correcting the overdensity array after
each run.
From a catalogue of galaxies of redshifts (z), absolute magnitudes (M),
right ascension (ra), declination (dec), and arbitrary properties
(attributes), a new random catalogue is generated by cloning the original
galaxies. If win is set to True, the galaxies will be redistributed in V_max
in a probabiity window with standard deviation sig, as used in Farrow
(2015). If not, it will be distributed uniformly. A maximum limit in
redshift (z_lim) can be set, in addition to upper and lower limits in right
ascension (ra_lim) and declination (dec_lim). This is then repeated N times,
with the overdensity measured and recorded in a running version of table
with each iteration, over redshift bins of size dz. The final catalogue will
then be made uniform according to the methods in Cole (2011) and/or Farrow
(2015).
Parameters
----------
z : array_like
An array of redshift values for a catalogue of galaxies.
M : array_like
An array of absolute magnitude values for the catalogue of galaxies.
ra : array_like
An array of right ascension values for the catalogue of galaxies.
dec : array_like
An array of declination values for the catalogue of galaxies.
attributes : array_like
An array of arbitrary properties values for the catalogue of galaxies.
N : float
The number of iterations of the generation loop.
dz : float, optional
The separation of redshift bins over which the overdensity is measured.
table : array_like
A (X,6) array of redshift values and corresponding dependants.
win : bool, optional
True if the Farrow window is to be used; False if a uniform
distribution is to be used.
sig : float, optional
The standard deviation of the Farrow window.
z_lim : float or NoneType, optional
The redshift limit of the redistribution. Use None if no limit is used.
ra_lim : array_like (float, float) or NoneType, optional
The right ascension limits (lower, upper) of the redistribution. Using
None will redistrbute in redshift alone.
dec_lim : array_like (float, float) or NoneType, optional
The declination limits (lower, upper) of the redistribution in degrees.
Using None will redistrbute in redshift alone.
record : Boolean, optional
True if a complete record of all returns at every iteration is needed,
False if only the final iteration is needed (to limit memory cost).
Returns
-------
run_table : array_like
An array of the redshift values and corresponding dependants, with the
overdensity acquired by iteration.
run_cat : array_like
An array for a random catalogue (z, M, ra, dec, attributes)
run_delta : array_like
An array of the obtained overdensties, equivalent to run_table[:,3].
run_zbin : array_like
An array of the redshift bins the overdensities were gathered over.
run_n : array_like
An array of the number of clones made of each original galaxy.
run_orgz : array_like
An array of the original redshifts of each original galaxy.
table_rec : array_like, optional
An array of the run_table arrays produced at each iteration. Only
produced if record is set to True.
cat_rec : array_like, optional
An array of the run_cat arrays produced at each iteration. Only
produced if record is set to True.
delta_rec : array_like, optional
An array of the run_delta arrays produced at each iteration. Only
produced if record is set to True.
zbin_dec : array_like, optional
An array of the run_zbin arrays produced at each iteration. Only
produced if record is set to True.
n_rec : array_like, optional
An array of the run_n arrays produced at each iteration. Only
produced if record is set to True.
orgz_rec : array_like, optional
An array of the run_orgz arrays produced at each iteration. Only
produced if record is set to True.
'''
run_table = table.copy()
if record == True:
table_rec = []
delta_rec = []
cat_rec = []
zbin_rec = []
n_rec = []
orgz_rec = []
table_rec.append(run_table)
run_orgz = []
def ceilstep(a, hibound):
'The floor of a with respect to step size hibound.'
return np.ceil(np.array(a, dtype=float) / hibound) * hibound
def floorstep(a, lowbound):
'The ceiling of a with respect to step size lowbound.'
return np.floor(np.array(a, dtype=float) / lowbound) * lowbound
for i in xrange(N):
if record == True:
run_cat, run_n, run_orgz = rand_cat_populator(z, M, ra, dec, \
attributes, \
table_rec[-1], win, \
sig, z_lim, ra_lim, \
dec_lim)
elif record == False:
run_cat, run_n, run_orgz = rand_cat_populator(z, M, ra, dec, \
attributes, \
run_table, win, \
sig, z_lim, ra_lim, \
dec_lim)
z_r = run_cat[:,0]
minz = floorstep(min(z),dz)
maxz = ceilstep(max(z),dz)
binnum = int(np.round((maxz-minz)/dz))
n_g,z_bin = np.histogram(z, bins=binnum, range=(minz,maxz))
n_r,z_r_bin = np.histogram(z_r, bins=binnum, range=(minz,maxz))
run_zbin = (z_bin[1:]+z_bin[:-1])/2.0
if 0 not in n_r and 0. not in n_r:
run_delta = n_clone * n_g / np.asarray(n_r, dtype=float)
else:
run_delta = np.zeros_like(n_g, dtype=float)
ind_nonzero = (n_r != 0)
run_delta[ind_nonzero] = n_clone * n_g[ind_nonzero] / \
np.asarray(n_r[ind_nonzero], dtype=float)
for j in xrange(len(run_table)):
j_z = run_table[j][0]
if j_z < maxz and j_z > minz:
k = np.searchsorted(z_bin, floorstep(j_z, dz)) - 1
run_table[j][3] = run_delta[k]
else:
run_table[j][3] = 1.0
if j != 0:
run_table[j][5] = (run_table[j][4]*run_table[j][3]) + \
run_table[j-1][5]
else:
run_table[j][5] = 0.0
run_table = run_table.copy()
if record == True:
table_rec.append(run_table)
cat_rec.append(run_cat)
n_rec.append(run_n)
delta_rec = np.append(delta_rec, run_delta)
zbin_rec = np.append(zbin_rec, run_zbin)
zbin_rec = np.append(orgz_rec, run_orgz)
if record == True:
return run_table, run_cat, run_delta, run_zbin, run_n, run_orgz, \
table_rec, cat_rec, delta_rec, zbin_rec, n_rec, orgz_rec
elif record == False:
return run_table, run_cat, run_delta, run_zbin, run_n, run_orgz
#-------------------------------------------------------------------------------
if __name__=="__main__":
print "Please wait while the table is generated."
table = redshift_tabulator(cosmo)
print "Table finished."
print "Please wait while the random catalogue is generated."
if take_record == True:
run_table, rand_cat, run_delta, run_zbin, run_n, run_orgz, table_rec, \
cat_rec, delta_rec, zbin_rec, n_rec, orgz_rec = \
overdensity_iter(z, mag, ra, dec, att, table, N, dz, windowed, Sigma, \
zlim, ralim, declim, take_record)
elif take_record == False:
run_table, rand_cat, run_delta, run_zbin, run_n, run_orgz = \
overdensity_iter(z, mag, ra, dec, att, table, N, dz, windowed, Sigma, \
zlim, ralim, declim, take_record)
np.savetxt("rand_cat.txt", rand_cat, delimiter= ' ')
np.savetxt("table.txt", table, delimiter= ' ')
print "Random catalogue generated; saved as rand_cat.txt. Available as" + \
" variable rand_cat. Table saved as table.txt."
def finalplot():
M = 200
n, bins = np.histogram(rand_cat[:,0], bins = M, normed=True)
bincent = (bins[1:]+bins[:-1])/2.0
n2, bins2 = np.histogram(z, bins = M, normed=True)
bincent2 = (bins2[1:]+bins2[:-1])/2.0
corrn = sum(n)
corrn2 = sum(n2)
pyplot.figure(1,figsize=(8.2677,11.6929))
pyplot.subplots_adjust(left=0.2)
pyplot.rcParams['xtick.major.pad'] = 6
pyplot.rcParams['ytick.major.pad'] = 6
pyplot.subplot(1,1,1)
pyplot.xlabel(r'$z$',fontsize='48')
pyplot.ylabel(r'$N(z)$',fontsize='48')
pyplot.plot(bincent, n/corrn)
pyplot.plot(bincent2, n2/corrn2)
pyplot.legend(['Farrow (2015) Distribution', \
'Cole (2011) Distribution', \
'Analytic Distribution'], fontsize='xx-large')
pyplot.show()
return 0.0
finalplot()
| jmoore06/Large-scale-galaxy-clustering-code | rand_cat_gen.py | Python | mit | 39,049 | [
"Galaxy"
] | d3381724a3a6a55ed94c49362aad8426d42cd5e14edbc4b1bb7f738e12825a4e |
import logging
import urllib
from functools import partial
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect
from mitxmako.shortcuts import render_to_response, render_to_string
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from markupsafe import escape
from courseware import grades
from courseware.access import has_access
from courseware.courses import (get_courses, get_course_with_access,
get_courses_by_university, sort_by_announcement)
import courseware.tabs as tabs
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from django_comment_client.utils import get_discussion_title
from student.models import UserTestGroup, CourseEnrollment
from util.cache import cache, cache_if_anonymous
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location
from xmodule.course_module import CourseDescriptor
import comment_client
log = logging.getLogger("mitx.courseware")
template_imports = {'urllib': urllib}
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses = get_courses(request.user, request.META.get('HTTP_HOST'))
courses = sort_by_announcement(courses)
return render_to_response("courseware/courses.html", {'courses': courses})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([('toc', toc),
('course_id', course.id),
('csrf', csrf(request)['csrf_token']),
('show_timezone', course.show_timezone)] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first child.
Returns None only if there are no children at all.
"""
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
pos = 0
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# Something is wrong. Default to first child
child = children[0]
else:
child = None
return child
def redirect_to_course_position(course_module):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.descriptor.id}
chapter = get_current_child(course_module)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.url_name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def check_for_active_timelimit_module(request, course_id, course):
"""
Looks for a timing module for the given user and course that is currently active.
If found, returns a context dict with timer-related values to enable display of time remaining.
"""
context = {}
# TODO (cpennington): Once we can query the course structure, replace this with such a query
timelimit_student_modules = StudentModule.objects.filter(student=request.user, course_id=course_id, module_type='timelimit')
if timelimit_student_modules:
for timelimit_student_module in timelimit_student_modules:
# get the corresponding section_descriptor for the given StudentModel entry:
module_state_key = timelimit_student_module.module_state_key
timelimit_descriptor = modulestore().get_instance(course_id, Location(module_state_key))
timelimit_module_cache = FieldDataCache.cache_for_descriptor_descendents(course.id, request.user,
timelimit_descriptor, depth=None)
timelimit_module = get_module_for_descriptor(request.user, request, timelimit_descriptor,
timelimit_module_cache, course.id, position=None)
if timelimit_module is not None and timelimit_module.category == 'timelimit' and \
timelimit_module.has_begun and not timelimit_module.has_ended:
location = timelimit_module.location
# determine where to go when the timer expires:
if timelimit_descriptor.time_expired_redirect_url is None:
raise Http404("no time_expired_redirect_url specified at this location: {} ".format(timelimit_module.location))
context['time_expired_redirect_url'] = timelimit_descriptor.time_expired_redirect_url
# Fetch the remaining time relative to the end time as stored in the module when it was started.
# This value should be in milliseconds.
remaining_time = timelimit_module.get_remaining_time_in_ms()
context['timer_expiration_duration'] = remaining_time
context['suppress_toplevel_navigation'] = timelimit_descriptor.suppress_toplevel_navigation
return_url = reverse('jump_to', kwargs={'course_id': course_id, 'location': location})
context['timer_navigation_return_url'] = return_url
return context
def update_timelimit_module(user, course_id, field_data_cache, timelimit_descriptor, timelimit_module):
"""
Updates the state of the provided timing module, starting it if it hasn't begun.
Returns dict with timer-related values to enable display of time remaining.
Returns 'timer_expiration_duration' in dict if timer is still active, and not if timer has expired.
"""
context = {}
# determine where to go when the exam ends:
if timelimit_descriptor.time_expired_redirect_url is None:
raise Http404("No time_expired_redirect_url specified at this location: {} ".format(timelimit_module.location))
context['time_expired_redirect_url'] = timelimit_descriptor.time_expired_redirect_url
if not timelimit_module.has_ended:
if not timelimit_module.has_begun:
# user has not started the exam, so start it now.
if timelimit_descriptor.duration is None:
raise Http404("No duration specified at this location: {} ".format(timelimit_module.location))
# The user may have an accommodation that has been granted to them.
# This accommodation information should already be stored in the module's state.
timelimit_module.begin(timelimit_descriptor.duration)
# the exam has been started, either because the student is returning to the
# exam page, or because they have just visited it. Fetch the remaining time relative to the
# end time as stored in the module when it was started.
context['timer_expiration_duration'] = timelimit_module.get_remaining_time_in_ms()
# also use the timed module to determine whether top-level navigation is visible:
context['suppress_toplevel_navigation'] = timelimit_descriptor.suppress_toplevel_navigation
return context
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug('User %s tried to view course %s but is not enrolled' % (user, course.location.url()))
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning('If you see this, something went wrong: if we got this'
' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
if chapter is None:
return redirect_to_course_position(course_module)
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'content': '',
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.MITX_FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'),
#@begin:Inform the template that it is in homepage
#@date:2013-11-02
'is_index':'True'
#@end
}
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.MITX_FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id, user, section_descriptor, depth=None)
section_module = get_module(request.user, request,
section_descriptor.location,
section_field_data_cache, course_id, position, depth=None)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
# check here if this section *is* a timed module.
if section_module.category == 'timelimit':
timer_context = update_timelimit_module(user, course_id, student_module_cache,
section_descriptor, section_module)
if 'timer_expiration_duration' in timer_context:
context.update(timer_context)
else:
# if there is no expiration defined, then we know the timer has expired:
return HttpResponseRedirect(timer_context['time_expired_redirect_url'])
else:
# check here if this page is within a course that has an active timed module running. If so, then
# add in the appropriate timer information to the rendering context:
context.update(check_for_active_timelimit_module(request, course_id, course))
context['content'] = section_module.runtime.render(section_module, None, 'student_view').content
else:
# section is none, so display a message
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id,
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name})
context['content'] = render_to_string('courseware/welcome-back.html',
{'course': course,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url})
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},"
" chapter={chapter} section={section}"
"position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_location = CourseDescriptor.id_to_location(course_id)
items = modulestore().get_items(
['i4x', course_location.org, course_location.course, None, module_id],
course_id=course_id
)
if len(items) == 0:
raise Http404("Could not find id = {0} in course_id = {1}. Referer = {2}".
format(module_id, course_id, request.META.get("HTTP_REFERER", "")))
if len(items) > 1:
log.warning("Multiple items found with id = {0} in course_id = {1}. Referer = {2}. Using first found {3}...".
format(module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.url()))
return jump_to(request, course_id, items[0].location.url())
@ensure_csrf_cookie
def jump_to(request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
# Complain if the location isn't valid
try:
location = Location(location)
except InvalidLocationError:
raise Http404("Invalid location")
# Complain if there's not data for this location
try:
(course_id, chapter, section, position) = path_to_location(modulestore(), course_id, location)
except ItemNotFoundError:
raise Http404("No data at this location: {0}".format(location))
except NoPathToItem:
raise Http404("This location is not in any class: {0}".format(location))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=course_id)
elif section is None:
return redirect('courseware_chapter', course_id=course_id, chapter=chapter)
elif position is None:
return redirect('courseware_section', course_id=course_id, chapter=chapter, section=section)
else:
return redirect('courseware_position', course_id=course_id, chapter=chapter, section=section, position=position)
@ensure_csrf_cookie
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
masq = setup_masquerade(request, staff_access) # allow staff to toggle masquerade on info page
return render_to_response('courseware/info.html', {'request': request, 'course_id': course_id, 'cache': None,
'course': course, 'staff_access': staff_access, 'masquerade': masq})
@ensure_csrf_cookie
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
tab = tabs.get_static_tab_by_slug(course, tab_slug)
if tab is None:
raise Http404
contents = tabs.get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/static_tab.html',
{'course': course,
'tab': tab,
'tab_contents': contents,
'staff_access': staff_access, })
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/syllabus.html', {'course': course,
'staff_access': staff_access, })
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
@ensure_csrf_cookie
@cache_if_anonymous
def course_about(request, course_id):
if settings.MITX_FEATURES.get('ENABLE_MKTG_SITE', False):
raise Http404
course = get_course_with_access(request.user, course_id, 'see_exists')
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
show_courseware_link = (has_access(request.user, course, 'load') or
settings.MITX_FEATURES.get('ENABLE_LMS_MIGRATION'))
return render_to_response('courseware/course_about.html',
{'course': course,
'registered': registered,
'course_target': course_target,
'show_courseware_link': show_courseware_link})
@ensure_csrf_cookie
@cache_if_anonymous
#@begin:View of the course
#@date:2013-11-02
def cabout(request, course_id):
if settings.MITX_FEATURES.get('ENABLE_MKTG_SITE', False):
raise Http404
course = get_course_with_access(request.user, course_id, 'see_exists')
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
show_courseware_link = (has_access(request.user, course, 'load') or
settings.MITX_FEATURES.get('ENABLE_LMS_MIGRATION'))
return render_to_response('courseware/cabout.html',
{
'course': course,
'registered': registered,
'course_target': course_target,
'show_courseware_link': show_courseware_link})
#@end
@ensure_csrf_cookie
@cache_if_anonymous
def mktg_course_about(request, course_id):
"""
This is the button that gets put into an iframe on the Drupal site
"""
try:
course = get_course_with_access(request.user, course_id, 'see_exists')
except (ValueError, Http404) as e:
# if a course does not exist yet, display a coming
# soon button
return render_to_response('courseware/mktg_coming_soon.html',
{'course_id': course_id})
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
allow_registration = has_access(request.user, course, 'enroll')
show_courseware_link = (has_access(request.user, course, 'load') or
settings.MITX_FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course(course.id)
return render_to_response('courseware/mktg_course_about.html',
{
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
})
def render_notifications(request, course, notifications):
context = {
'notifications': notifications,
'get_discussion_title': partial(get_discussion_title, request=request, course=course),
'course': course,
}
return render_to_string('courseware/notifications.html', context)
@login_required
def news(request, course_id):
course = get_course_with_access(request.user, course_id, 'load')
notifications = comment_client.get_notifications(request.user.id)
context = {
'course': course,
'content': render_notifications(request, course, notifications),
}
return render_to_response('courseware/news.html', context)
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def progress(request, course_id, student_id=None):
""" User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, course_id, 'load', depth=None)
staff_access = has_access(request.user, course, 'staff')
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
student = User.objects.get(id=int(student_id))
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id, student, course, depth=None)
courseware_summary = grades.progress_summary(student, request, course,
field_data_cache)
grade_summary = grades.grade(student, request, course, field_data_cache)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
context = {'course': course,
'courseware_summary': courseware_summary,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
}
context.update()
return render_to_response('courseware/progress.html', context)
@login_required
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(course_id=course_id,
module_state_key=location,
student_id=student.id)
except User.DoesNotExist:
return HttpResponse(escape("User {0} does not exist.".format(student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape("{0} has never accessed problem {1}".format(student_username, location)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_id
}
return render_to_response('courseware/submission_history.html', context)
#@begin:View of the newly added page
#@date:2013-11-02
def my_course_portfolio(request, course_id, student_id=None):
return False
def resource_library(request, course_id, student_id=None):
return False
def people(request, course_id, student_id=None):
return False
#@end
| EduPepperPD/pepper2013 | lms/djangoapps/courseware/views.py | Python | agpl-3.0 | 33,478 | [
"VisIt"
] | e64f5b476ebee3653f63091dcb3ee6207b00200f100bc5802a64d567e914c40b |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
import numpy as np
#
#
#
def get_default_log_mesh_param4gto(gto, tol_in=None):
rmin_gcs = 10.0
rmax_gcs = -1.0
akmx_gcs = -1.0
tol = 1e-7 if tol_in is None else tol_in
seen_species = [] # this is auxiliary to organize the loop over species
for ia in range(gto.natm):
if gto.atom_symbol(ia) in seen_species: continue
seen_species.append(gto.atom_symbol(ia))
for sid in gto.atom_shell_ids(ia):
for power,coeffs in zip(gto.bas_exp(sid), gto.bas_ctr_coeff(sid)):
for coeff in coeffs:
if coeff==0.0: continue
rmin_gcs = min(rmin_gcs, np.sqrt( abs(np.log(1.0-tol)/power )))
rmax_gcs = max(rmax_gcs, np.sqrt( abs(np.log(abs(coeff))-np.log(tol))/power ))
akmx_gcs = max(akmx_gcs, np.sqrt( abs(np.log(abs(coeff))-np.log(tol))*4*power ))
if rmin_gcs<1e-9 : print('rmin_gcs<1e-9', __name__) # Last check
if rmax_gcs>1e+3 : print('rmax_gcs>1e+3', __name__)
if akmx_gcs>1e+4 : print('akmx_gcs>1e+4', __name__)
return 1024,rmin_gcs,rmax_gcs,akmx_gcs
#
#
#
def get_default_log_mesh_param4ion(sp2ion):
from pyscf.nao.m_next235 import next235
""" Determines the default (optimal) parameters for radial orbitals given on equidistant grid"""
npts = max(max(ion["paos"]["npts"]) for ion in sp2ion)
nr_def = next235( max(2.0*npts, 1024.0) )
rmin_def = min(min(ion["paos"]["delta"]) for ion in sp2ion)
rmax_def = 2.3*max(max(ion["paos"]["cutoff"]) for ion in sp2ion)
kmax_def = 1.0/rmin_def/np.pi
return nr_def,rmin_def,rmax_def,kmax_def
#
#
#
def get_default_log_mesh_param4gpaw(sp2dic):
""" Determines the default (optimal) parameters for radial orbitals given on equidistant grid"""
sp2key = sp2dic.keys()
nr_def = 1024
rmin_def = 1.0e100
rmax_grid = -1.0e100
for key in sp2key:
rmin_def = min(rmin_def, sp2dic[key].basis.rgd.r_g[1])
rmax_grid = max(rmax_grid, sp2dic[key].basis.rgd.r_g[-1])
rmax_def = 2.3*rmax_grid
kmax_def = 1.0/rmin_def/np.pi
return nr_def,rmin_def,rmax_def,kmax_def
# sp2dic = setups.setups
# print('dir(r_g) ', dir(sp2dic[sp2id[1]].basis.rgd.r_g))
# print(sp2dic[sp2id[0]].basis.rgd.r_g.size)
# print(sp2dic[sp2id[1]].basis.rgd.r_g.size)
#
#
#
def funct_log_mesh(nr, rmin, rmax, kmax=None):
"""
Initializes log grid in real and reciprocal (momentum) spaces.
These grids are used in James Talman's subroutines.
"""
assert(type(nr)==int and nr>2)
rhomin=np.log(rmin)
rhomax=np.log(rmax)
kmax = 1.0/rmin/np.pi if kmax is None else kmax
kapmin=np.log(kmax)-rhomax+rhomin
rr = np.array(np.exp( np.linspace(rhomin, rhomax, nr)) )
pp = np.array(rr*(np.exp(kapmin)/rr[0]))
return rr, pp
#
#
#
class log_mesh():
''' Constructor of the log grid used with NAOs.'''
def __init__(self, **kw):
if 'gto' in kw: self.init_log_mesh_gto(**kw)
elif 'sp2ion' in kw: self.init_log_mesh_ion(**kw)
elif 'setups' in kw: self.init_log_mesh_gpaw(**kw)
elif 'rr' in kw and 'pp' in kw: self.init_log_mesh(**kw)
elif 'xyz_list' in kw: pass
elif 'ao_log' in kw: pass
else:
print(kw.keys())
raise RuntimeError('unknown init method')
def init_log_mesh_gto(self, **kw):
""" Initialize an optimal logarithmic mesh based on Gaussian orbitals from pySCF"""
#self.gto = gto cannot copy GTO object here... because python3 + deepcopy in m_ao_log_hartree fails
gto = kw['gto']
self.rcut_tol = kw['rcut_tol'] if 'rcut_tol' in kw else 1e-7
nr_def,rmin_def,rmax_def,kmax_def = get_default_log_mesh_param4gto(gto, self.rcut_tol)
self.nr = kw['nr'] if "nr" in kw else nr_def
self.rmin = kw['rmin'] if "rmin" in kw else rmin_def
self.rmax = kw['rmax'] if "rmax" in kw else rmax_def
self.kmax = kw['kmax'] if "kmax" in kw else kmax_def
assert(self.rmin>0.0); assert(self.kmax>0.0); assert(self.nr>2); assert(self.rmax>self.rmin);
self.rr,self.pp = funct_log_mesh(self.nr, self.rmin, self.rmax, self.kmax)
return self
def init_log_mesh_ion(self, **kw):
""" Initialize an optimal logarithmic mesh based on information from SIESTA ion files"""
sp2ion = kw['sp2ion']
self.sp2ion = sp2ion
nr_def,rmin_def,rmax_def,kmax_def = get_default_log_mesh_param4ion(sp2ion)
self.nr = kw['nr'] if "nr" in kw else nr_def
self.rmin = kw['rmin'] if "rmin" in kw else rmin_def
self.rmax = kw['rmax'] if "rmax" in kw else rmax_def
self.kmax = kw['kmax'] if "kmax" in kw else kmax_def
assert(self.rmin>0.0); assert(self.kmax>0.0); assert(self.nr>2); assert(self.rmax>self.rmin);
self.rr,self.pp = funct_log_mesh(self.nr, self.rmin, self.rmax, self.kmax)
return self
def init_log_mesh_gpaw(self, **kw):
""" This initializes an optimal logarithmic mesh based on setups from GPAW"""
#self.setups = setups same problem than in m_ao_log
setups = kw['setups']
nr_def,rmin_def,rmax_def,kmax_def = get_default_log_mesh_param4gpaw(setups.setups)
self.nr = kw['nr'] if "nr" in kw else nr_def
self.rmin = kw['rmin'] if "rmin" in kw else rmin_def
self.rmax = kw['rmax'] if "rmax" in kw else rmax_def
self.kmax = kw['kmax'] if "kmax" in kw else kmax_def
assert(self.rmin>0.0); assert(self.kmax>0.0); assert(self.nr>2); assert(self.rmax>self.rmin);
self.rr,self.pp = funct_log_mesh(self.nr, self.rmin, self.rmax, self.kmax)
return self
def init_log_mesh(self, **kw):
""" Taking over the given grid rr and pp"""
rr, pp = kw['rr'], kw['pp']
assert(len(pp)==len(rr))
self.rr,self.pp = rr,pp
self.nr = len(rr)
self.rmin = rr[0]
self.rmax = rr[-1]
self.kmax = pp[-1]
return self
| gkc1000/pyscf | pyscf/nao/log_mesh.py | Python | apache-2.0 | 6,338 | [
"GPAW",
"Gaussian",
"PySCF",
"SIESTA"
] | bfa8f0a495ec8ab3ad7b05d85eed7db2f17ef9ba50b3dc6226b1f9ebc6fe40cc |
# Import the necessary modules.
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# For easily grabbing several file names
import glob
# For image processing functions
import skimage.io
import skimage.exposure
import skimage.filters
import skimage.morphology
# In this script, we will compute the growth rate of a bacerial colony over
# time through high-magnification microscopy. The provided images are of
# E. coli cells growing on a hard agar substrate supplemented with a rich
# medium to grow in. The images are at a magnification of 100x and all images
# were taken at 5 minute intervals. In the coming code, we will perform some
# basic segmentation on these images and compute the growth rate.
# We'll start by looking at looking at an image some where in the middle of the
# movie. At this point, there are a bunch of cells stacked next to each other
# forming a microcolony. Our goal will be to figure out some way of measuring
# the amount of area of the image is composed of bacterial cells. To load this
# image, we will pull from an incredibly useful image processing module called
# sci-kit image. We imported this (along with some other modules) above by
# importing skimage.
plt.close('all')
# Load the example image.
im = skimage.io.imread('data/ecoli_growth/ecoli_phase_24.tif')
# Show it
plt.figure()
plt.imshow(im, cmap=plt.cm.Greys_r)
plt.show()
# Noteice that all of teh cells are black and the background is a light gray.
# Remember, an image is data -- it is simply a two-dimensional array of pixel
# values. By hovering over the above image with our mouse, we can see that
# all of the cells are dark (having values of a few hundred counts) while the
# background is much higher. Note that illumination differences between frames
# may be slightly different, making it more difficult to draw a very strict
# boundary for the threshold of what is a cell. However, the approximate ratio
# of the cell pixel values to the background values should be constant. We
# can convert this image to a floating point image which will rescale all of
# the vales from 0 to 1.0
# Make the image a float
im_float = (im - im.min()) / (im.max() - im.min())
# How do we distinguish what is cell mass and what is background? We could
# hover over a bunch of cells to get the idea of what the pixel counts are, but
# a more useful method is to look at the image histogram. This will show us
# the frequency of a given pixel count in the image.
# Show the image histogram.
plt.figure()
plt.hist(im_float.flatten(), bins=500)
plt.xlabel('pixel value')
plt.ylabel('frequency')
plt.show()
# We can see that there are two humps in the histogram. The leftmost hump are
# most likely the cells while the largest hump is likely the background
# pixels. Lets just show every pixel that is below the value of X to see if
# those are really cells.
plt.figure()
plt.imshow(im_float < 0.27, cmap=plt.cm.Greys_r)
plt.show()
# We did a good job of only selecting the bacteria, however, we also got a
# bunch of the background! This is because the illumination is uneven meaning
# that the left-most part of the image is darker than the right-most part. We
# can correct for this by doing something called a background subtraction. To
# perform this, we'll very heavily blur the image and subtract it from the
# original float image. This means that large variances in illumination will
# be removed while the smaller structures (such as the bacteria) will be
# preserved. Let's blur the image and perform the subtraction.
im_blur = skimage.filters.gaussian(im_float, sigma=30.0)
im_sub = im_float - im_blur
plt.figure()
plt.imshow(im_sub, cmap=plt.cm.Greys_r)
plt.title('background subtracted')
plt.show()
# That is much better! Let's look at the histogram again and try to choose a
# better threshold.
plt.figure()
plt.hist(im_sub.flatten(), bins=500)
plt.xlabel('pixel value')
plt.ylabel('frequency')
plt.title('background subtracted image histogram')
# We see the two peaks much more clearly now. Also, it makes sense that the
# peak of the background pixels is the most frequent as most pixels in our
# image are actually background. Let's go a head and apply the threshold.
im_thresh = im_sub < -0.05
plt.figure()
plt.imshow(im_thresh, cmap=plt.cm.Greys_r)
plt.show()
# We did a much better job, but there are a bunch of small dots around. These
# are stray pixels in the background which are falling below our threshold.
# We can get rid of these by removing all of the objects which are less than
# 50 square pixels. There is a simple command for this packaged in the
# skimage module.
im_large = skimage.morphology.remove_small_objects(im_thresh, min_size=50)
plt.figure()
plt.imshow(im_large, cmap=plt.cm.Greys_r)
plt.show()
# That is much better! But how do we measure area? Remember, by doing the
# thresholding, we are generating a binary image with a pixel value of 1
# wherever there are 'bacterial cells' and 0 else where. In order to get the
# bacterial area of teh image, we can simply sum up all values of the image!
bacterial_area = np.sum(im_large)
print('Bacterial area in this frame is ' + str(bacterial_area) + ' sq pixels')
# In order to measure the growth rate of this colony, we will want to
# iterate across all images in this experiment, repeat the steps above, and
# calculate the area. We can then plot the bacterial area as a function of
# time and get a measure of the growth rate.
# We'll start by getting a list of all of the image names in the file. We can
# do this using another python module called glob.
image_names = glob.glob('data/ecoli_growth/ecoli_phase_*.tif')
# The asterisk means it will get all file names that match that pattern where
# anything can occur betweeen `ecoli_phase_ ` and `.tif`. With this set of
# file names, now we can simply iterate through each file and perform the same
# set of steps!
cell_area = np.zeros(len(image_names)) # Make an empty storage vector.
for i in range(len(image_names)):
# Load the image.
im = skimage.io.imread(image_names[i])
# Make the image a float.
im_float = (im - im.min()) / (im.max() - im.min())
# Perform the background subtraction.
im_blur = skimage.filters.gaussian(im_float, sigma=30.0)
im_sub = im_float - im_blur
# Apply the threshold
im_thresh = im_sub < -0.05
im_large = skimage.morphology.remove_small_objects(im_thresh, min_size=10)
# Compute the cell area.
cell_area[i] = np.sum(im_large)
# Let's plot the bacterial area as a function of time.
time = np.arange(0, len(image_names) * 5, 5)
plt.figure()
plt.plot(time, cell_area, 'o')
plt.xlabel('time (min)')
plt.ylabel('cell area (sq. pixels)')
plt.show()
# Since we predict that the growth is exponential with time, this plot should
# be approximately linear on a log-Y scale.
plt.figure()
plt.plot(time, np.log(cell_area), 'o')
plt.xlabel('time (min)')
plt.ylabel('log(cell_area (sq. pixels))')
plt.show()
# That is impressively linear given how rough our segmentation algorithm is.
# Let's fit this trend to an exponential curve to find the doubling time. If
# we assume that this growth is exponential, then we can fit
#
# A_t = A_0 * exp(k * t)
# ln(A_t) = ln(A_0) + k * t.
#
# To determine the doubling time, this is a simple rearrangement of the above
# linear equation to
#
# t_double = ln(2) / k.
#
# To do this, we'll use the NumPy polyfit function, although there are many
# different available utilities to do this.
linear_fit = np.polyfit(time, np.log(cell_area), 1)
# The output of this function is an array with the slope an intercept.
slope, intercept = linear_fit
# Now, let's compute the doubling time.
t_double = np.log(2) / slope
print('The doubling time is ' + str(t_double) + ' min.')
# Let's also plot the fit on our raw data.
plt.figure()
fit_curve = intercept + slope * time
plt.plot(time, fit_curve, 'k-', label='fit')
plt.plot(time, np.log(cell_area), 'o', label='experiment')
plt.xlabel('time (min)')
plt.ylabel('log(cell area (sq. pixels))')
plt.show()
# That is a pretty good fit! The compute value for the doubling time also makes
# some sense with our intuition of how cells growth. In class, we determined
# that our rule of thumb for E. coli growth is around twenty minutes whereas
# our fitted value is a little bit over that. There are a few reasons for this.
# First, our segmentation isn't 'perfect'. We aren't really segmenting most of
# the cells when the colony is fully developed. Secondly, these cells are
# growing in an aerobic environment sandwiched between the pad and the glass
# under the microscope. However, in just around 60 lines of acutal code, we
# are able to get a pretty good measure!
| RPGroup-PBoC/gist_pboc_2017 | code/ecoli_growth_phase.py | Python | mit | 8,724 | [
"Gaussian"
] | 221827c896c9c19e85301df909b996b8a05e279e723f9938fb287bebe3b12544 |
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
"""
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Blur kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
averaging
&
extrapolating
lines points achieved.
"""
if len(img.shape) == 2: # grayscale image -> make a "color" image out of it
img = np.dstack((img, img, img))
for line in lines:
for x1, y1, x2, y2 in line:
if x1 >= 0 and x1 < img.shape[1] and \
y1 >= 0 and y1 < img.shape[0] and \
x2 >= 0 and x2 < img.shape[1] and \
y2 >= 0 and y2 < img.shape[0]:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
else:
print('BAD LINE (%d, %d, %d, %d)' % (x1, y1, x2, y2))
def weighted_img(img, initial_img, alpha=0.8, beta=1., lamda=0.):
return cv2.addWeighted(initial_img, alpha, img, beta, lamda)
def preprocess(img):
""" Preprocess the input image """
v=grayscale(img)
# Apply Gaussian Blur to reduce the noise in edge detection
kernel_size = 5
out = gaussian_blur(v, kernel_size)
# plt.subplot(2,2,1)
# show_img(out)
return out
def apply_canny(img):
""" Applies the Canny edge detector to the input image """
# Apply Canny edge detector
low_threshold = 55
high_threshold = 140
out_img = canny(img, low_threshold, high_threshold)
# show_img(out_img)
return out_img
def select_region_of_interest(img):
'''
Definig ROI, you can change ROI according to your images and it is passing manually, if you want to analyse points, use MATLAB tool
function, its good to analyse points.
'''
h = 20
v1 = (0 + h, img.shape[0])
v2 = (img.shape[1] / 3.2, img.shape[0] / 2)
v3 = (img.shape[1] / 1.64, img.shape[0] / 2)
v4 = (img.shape[1] / 1.28, img.shape[0])
'''
h = 20
k = 1.35
v1 = (0 + h, img.shape[0])
v2 = (img.shape[1] / 1.85 +h, img.shape[0] / 2)
v3 = (img.shape[1] / 1.98*k -h, img.shape[0] / 2)
v4 = (img.shape[1] /1.35, img.shape[0])
'''
return region_of_interest(img, np.array([[v1, v2, v3, v4]], dtype=np.int32))
def extract_edges(img):
# Get edges using the Canny edge detector
img_canny = apply_canny(img)
return select_region_of_interest(img_canny)
def detect_lines(img_canny_masked):
""" Runs the Hough transform to detect lines in the input image"""
# Apply HoughLines to extract lines
rho_res = .1 # [pixels]
theta_res = np.pi / 180. # [radians]
threshold = 7 # [# votes]
min_line_length = 11 # [pixels]
max_line_gap = 1 # [pixels]
lines = cv2.HoughLinesP(img_canny_masked, rho_res, theta_res, threshold, np.array([]),
minLineLength=min_line_length, maxLineGap=max_line_gap)
return lines
def fitLine(line_points):
""" Given 2 points (x1,y1,x2,y2), compute the line equation
y = mx + b"""
x1 = line_points[0]
y1 = line_points[1]
x2 = line_points[2]
y2 = line_points[3]
m = (y2 - y1) / (x2 - x1)
b = y1 - m * x1
return (m, b)
def extract_lanemarkings(img_shape, lines):
""" Given a list of lines (detected by the Probabilistic Hough transform),
average and extrapolate them in order to come up with 2 single
lines, corresponding to the left and right lanemarkings """
# For each line segment
slope_min = 0.8
slope_max = 2.4
m1 = np.array([])
b1 = np.array([])
m2 = np.array([])
b2 = np.array([])
y_min = img_shape[0]
for line_points in lines:
# Fit to line equation (m, b)
(m, b) = fitLine(line_points)
# Filter line by slope
if abs(m) > slope_min and abs(m) < slope_max:
y_min = min(y_min, line_points[1])
y_min = min(y_min, line_points[3])
# Separate into left/right using the sign of the slope
if (m > 0):
m1 = np.append(m1, m)
b1 = np.append(b1, b)
else:
m2 = np.append(m2, m)
b2 = np.append(b2, b)
# Average the two main lines
m1 = np.mean(m1)
b1 = np.mean(b1)
m2 = np.mean(m2)
b2 = np.mean(b2)
# Compute the crossing (x,y) point in the image
x_cross = (b2 - b1) / (m1 - m2)
y_cross = m1 * x_cross + b1
# End point of the line: at most the crossing point
y_end = max(y_cross, y_min)
# Compute the (x) coordinate where the line crosses the
# bottom edge of the image
y1 = img_shape[0] - 1
x1 = (y1 - b1) / m1
y2 = img_shape[0] - 1
x2 = (img_shape[0] - b2) / m2
x_end1 = (y_end - b1) / m1
x_end2 = (y_end - b2) / m2
return np.array([[[x1, y1, x_end1, y_end]], [[x2, y2, x_end2, y_end]]]).astype(int)
def overlay_lanemarkings(img, lanemarkings):
""" Draws the lines on top of the image img """
# Create a black image with red lanemarkings
img_lines = np.copy(img) * 0
draw_lines(img_lines, lanemarkings, color=[255, 0, 0], thickness=10)
# Blend the original image with the previous one
img_out = weighted_img(img_lines, img)
return img_out
def pipeline(img_original):
"""
Process the input image 'img' and outputs an annotated version of it,
where the left and right lane markings are detected.
"""
# Pre-process
img = preprocess(img_original)
# Extract edges
img_edges = extract_edges(img)
# Detect lines
lines = detect_lines(img_edges)
img_lines = np.copy(img_original)
draw_lines(img_lines, lines)
# Extract left and right lanemarkings from the lines
lanemarkings = extract_lanemarkings(img.shape, lines.squeeze())
# Produce output
img_out = overlay_lanemarkings(img_original, lanemarkings)
# img_out = img_lines
return img_out
i=0
cap=cv2.VideoCapture('um_%06d.png')
while(cap.isOpened()):
ret, img = cap.read()
ht, wd, dp = img.shape
imgout = pipeline(img)
a='.png'
b=str(i)+a
i=i+1
cv2.imwrite(b,imgout)
cv2.waitKey()
#Passing single image
'''
img=cv2.imread('um_000000.png')
imgout=pipeline(img)
cv2.imshow('output',imgout)
cv2.waitKey()
'''
| shivamsardana/Simple-Lane-Detection-System | main.py | Python | mit | 7,667 | [
"Gaussian"
] | 8de455a389630f9a63dceb3672eaabf10c03946c2142f8fa1810156412438131 |
import re
from os.path import basename
import sys
import argparse
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
# When the user supplies an invalid sort key
class InvalidSortKey(Exception): pass
def main():
args = parse_args()
srpf = sort_seq_files(args.seqfiles, args.delimiter, args.sortkeys)
records = combine_seqs_inorder(
srpf,
args.delimiter,
args.sortkeys,
)
SeqIO.write(records, sys.stdout, 'fasta')
def parse_args():
parser = argparse.ArgumentParser(
description = 'Merges multiple fasta files into one list of '\
'concatenated single records.' \
'Essentially creates a 2-D matrix from all the files and '\
'their records. Then sorts all columns(files). Then sorts '\
'all rows(sequence records). Once sorting is done, then '\
'concatenates across the rows in ascending order. This '\
'is useful for building genomes from segment fasta files.'
)
parser.add_argument(
'seqfiles',
nargs='+',
help='List of sequence files to concatenate'
)
parser.add_argument(
'--delimiter',
'-d',
default='__',
help='The delimiter to use to split the sequence id[Default: %(default)s]'
)
parser.add_argument(
'--sortkeys',
'-k',
default=[1,2],
nargs='+',
type=int,
help='Which columns in the sequence id to use after split using ' \
'delmiter[Default: %(default)s]'
)
return parser.parse_args()
def sort_seq_files(seqfiles, delimiter, sortkeys):
'''
Create a dictionary mapping each seqfilename to its sequences
Sort the sequences in each item of the dictionary
Return dictionary with sorted seqrecords for each file
'''
seqsperfile = {}
for seqfile in seqfiles:
seqs = list(SeqIO.parse(seqfile, 'fasta'))
sort_sequences(seqs, delimiter, sortkeys)
seqsperfile[basename(seqfile)] = seqs
return seqsperfile
def combine_seqs_inorder(seqrecordsperfile, delimiter, sortkeys):
'''
Assumes seqrecordsperfile has each seqrecord list already sorted
Can use sort_seq_files to get this dictionary
Go through each item in seqrecordsperfile and sort all first
elements, then sort all second elements
When finished sorting an a list of first elements then join them
in the sorted order
seqrecordsperfile - dictionary mapping {name: [seqrecords],}
probably from sort_seq_files
delmiter - What to split each seqrecord.id on
sortkeys - What columns after splitting to sort on in order
return single seqrecord list with combined seqrecords
'''
# This will hold the resulting concatted sequence records
seqrecords = []
# Gets only the seqrecords lists which should be sorted now
perfilerecords = seqrecordsperfile.values()
# Ensure all list lengths are same
if len(set(map(len,perfilerecords))) != 1:
# We will handle this better some time later
raise Exception("Not all sequence record lists have same length")
# Transpose lists such that we get lists per index across them all
transposed = map(list, zip(*perfilerecords))
# Sort each of the transposed lists in place
# then cat them to get our resulting seqrecords
for i in range(len(transposed)):
sort_sequences(transposed[i], delimiter, sortkeys)
rec = cat_seqrecords(transposed[i], delimiter, sortkeys)
seqrecords.append(rec)
return seqrecords
def cat_seqrecords(seqrecords, delimiter, sortkeys, keepdescriptions=True):
'''
Simply append from left to right the seqrecord.seq.seq in each element
and return the resulting Bio.seqrecord
The id of the seqrecord will be the first item of sortkeys
as that should be the same hopefully in each seqrecord
The description will be all id's joined together by comma unless keepdescriptions
is False then it will be ''
'''
newseq = []
newid = ''
newdescription = []
for rec in seqrecords:
splitid = split_seq_id(rec, delimiter)
# Again sortkeys is 1-indexed
newid = splitid[sortkeys[0]-1]
newseq.append(str(rec.seq))
if keepdescriptions:
newdescription.append(rec.id)
# Set up newdescription
if not newdescription:
newdescription = ''
else:
newdescription = ','.join(newdescription)
# New sequence record
# Has same alphabet as first item in original list
# Has concatted sequences
newrecord = SeqRecord(
Seq(
''.join(newseq),
seqrecords[0].seq.alphabet
),
id = newid,
description = newdescription,
)
return newrecord
def sort_sequences(seqs, delimiter, sortkeys):
'''
Sort a list of Biopython seqrecords using sortkeys after splitting each
seq.id with the delimiter
Items are sorted in place
'''
def keyfunc(seqrec):
'''Join sortkeys into a string'''
ids = split_seq_id(seqrec, delimiter)
key = ''
for k in sortkeys:
try:
# sortkeys is 1-indexed not 0-indexed
key += ids[k-1].upper()
except IndexError as e:
raise InvalidSortKey(
'{0} is an invalid sort key for the identifier {1}'.format(
k, seqrec.id
)
)
return key
seqs.sort(key=keyfunc)
def split_seq_id(seqrecord, delimiter):
'''
Simply split seqrecord.id using delimiter and return the list
returns list of split items
'''
return re.split(delimiter, seqrecord.id)
| VDBWRAIR/bio_bits | bio_pieces_old/cat_sequences.py | Python | gpl-2.0 | 5,801 | [
"Biopython"
] | 343b428a800af08d66fb667ce64bd4fcac994f2e5cf0a98f3f1b8ff8be637861 |
import numpy
import chainer
from chainer import cuda
def exponential_decay_noise(xp, shape, dtype, hook, opt):
"""Time-dependent annealed Gaussian noise function from the paper:
`Adding Gradient Noise Improves Learning for Very Deep Networks
<https://arxiv.org/pdf/1511.06807>`_.
"""
std = numpy.sqrt(hook.eta / numpy.power(1 + opt.t, 0.55))
return xp.random.normal(0, std, shape).astype(dtype)
class GradientNoise(object):
"""Optimizer/UpdateRule hook function for adding gradient noise.
This hook function simply adds noise generated by the ``noise_func``
to the gradient. By default it adds time-dependent annealed Gaussian
noise to the gradient at every training step:
.. math::
g_t \\leftarrow g_t + N(0, \\sigma_t^2)
where
.. math::
\\sigma_t^2 = \\frac{\\eta}{(1+t)^\\gamma}
with :math:`\\eta` selected from {0.01, 0.3, 1.0} and
:math:`\\gamma = 0.55`.
Args:
eta (float): Parameter that defines the scale of the noise. For
the default noise function, it is recommended that it be either
0.01, 0.3 or 1.0.
noise_func (function): Noise generating function which by default
is given by `Adding Gradient Noise Improves Learning for Very Deep
Networks <https://arxiv.org/pdf/1511.06807>`_.
Attributes:
~optimizer_hooks.GradientNoise.timing (string): Specifies
when this hook should be called by the
Optimizer/UpdateRule. Valid values are
'pre' (before any updates) and 'post' (after any
updates).
~optimizer_hooks.GradientNoise.call_for_each_param (bool): Specifies
if this hook is called for each parameter (``True``)
or only once (``False``) by an optimizer to
which this hook is registered. This function does
not expect users to switch the value from default one,
which is `True`.
.. versionadded:: 4.0.0
The *timing* parameter.
"""
name = 'GradientNoise'
call_for_each_param = True
timing = 'pre'
def __init__(self, eta, noise_func=exponential_decay_noise):
self.eta = eta
self.noise_func = noise_func
def __call__(self, rule, param):
g = param.grad
if g is None:
return
with chainer.using_device(param.device):
xp = param.device.xp
noise = self.noise_func(xp, g.shape, g.dtype, self, rule)
if xp is cuda.cupy:
kernel = cuda.elementwise(
'T noise', 'T g', 'g += noise', 'gradient_noise')
kernel(noise, g)
else:
g += noise
| pfnet/chainer | chainer/optimizer_hooks/gradient_noise.py | Python | mit | 2,840 | [
"Gaussian"
] | a214ffec191518eed59e288e8f59e1d38a2b4fc495e9fa59b21ed7b78ea4add2 |
"""
Dist Plot
=========
_thumb: .2, .8
_example_title: Plot distribution.
"""
import matplotlib.pyplot as plt
import numpy as np
import arviz as az
az.style.use("arviz-darkgrid")
a = np.random.poisson(4, 1000)
b = np.random.normal(0, 1, 1000)
_, ax = plt.subplots(1, 2, figsize=(10, 4))
az.plot_dist(a, color="C1", label="Poisson", ax=ax[0])
az.plot_dist(b, color="C2", label="Gaussian", ax=ax[1])
plt.show()
| arviz-devs/arviz | examples/matplotlib/mpl_plot_dist.py | Python | apache-2.0 | 415 | [
"Gaussian"
] | 63bd23cc0e5b3a3771229373806c33e98199d45efaf19f2a3a6c735df1cb2d05 |
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Any
from typing import cast
from typing import List
from typing import Dict
from typing import Union
from typing import Optional
#from typing import TypedDict
from xml.etree.ElementTree import Element
from kivy.core.audio import SoundLoader
from kivy.logger import Logger
from kivy.config import ConfigParser
from ORCA.ui.ShowErrorPopUp import ShowErrorPopUp
from ORCA.utils.LogError import LogError
from ORCA.utils.XML import GetXMLTextAttribute
from ORCA.utils.XML import LoadXMLFile
from ORCA.utils.TypeConvert import ToFloat
from ORCA.utils.TypeConvert import ToBool
from ORCA.utils.Platform import OS_RegisterSoundProvider
from ORCA.utils.ConfigHelpers import Config_GetDefault_Int
from ORCA.utils.FileName import cFileName
import ORCA.Globals as Globals
__all__ = ['cSound']
'''
# reserved for python 3.8
class dSoundDef(TypedDict):
"""
Little typing helper
"""
oFnSound: Optional[cFileName]
iSoundVolume: int
'''
from typing import TypeVar
dSoundDef = Dict
class cSound:
""" Represents the Sound Object """
def __init__(self) ->None:
self.aSounds:Dict[str,dSoundDef] = { u'startup' :{"oFnSound":None,"iSoundVolume":100},
u'shutdown' :{"oFnSound":None,"iSoundVolume":100},
u'error' :{"oFnSound":None,"iSoundVolume":100},
u'message' :{"oFnSound":None,"iSoundVolume":100},
u'question' :{"oFnSound":None,"iSoundVolume":100},
u'notification':{"oFnSound":None,"iSoundVolume":100},
u'ring' :{"oFnSound":None,"iSoundVolume":100},
u'success' :{"oFnSound":None,"iSoundVolume":100},
u'click' :{"oFnSound":None,"iSoundVolume":100}}
self.dSoundObjects:Dict[str,Any] = {}
self.aSoundsList:List[str] = [] # List of all available soundsets (Just their names)
self.bMute = False
OS_RegisterSoundProvider()
def Init(self) ->None:
""" get a list of all sounds """
self.aSoundsList = Globals.oPathSoundsRoot.GetFolderList()
def LoadSoundsDescription(self) ->None:
""" Loads the sound description (tunes) """
try:
Logger.debug (u'TheScreen: Loading Sounds')
oET_Root:Element = LoadXMLFile(oFile=Globals.oFnSoundsXml)
if oET_Root is not None:
for oXMLSound in oET_Root.findall('sound'):
uSoundName:str = GetXMLTextAttribute(oXMLNode=oXMLSound,uTag=u'name',bMandatory=False,vDefault=u'')
oFnSound:cFileName = cFileName('').ImportFullPath(uFnFullName=GetXMLTextAttribute(oXMLNode=oXMLSound,uTag=u'file',bMandatory=False,vDefault=u''))
if uSoundName in self.aSounds:
self.aSounds[uSoundName]["oFnSound"]=oFnSound
else:
Logger.warning(u'Unknown Sound:'+oFnSound)
except Exception as e:
ShowErrorPopUp(uMessage=LogError(uMsg=u'TheScreen: LoadSoundDescription: can\'t load SoundDescription',oException=e))
def ReadSoundVolumesFromConfig(self,*,oConfig:ConfigParser) -> None:
"""
Reads the sound volumes from the given configparser
"""
for uSoundName in self.aSounds:
self.SetSoundVolume(uSoundName=uSoundName,iValue=Config_GetDefault_Int(oConfig=oConfig, uSection=u'ORCA', uOption=u'soundvolume_' + uSoundName, uDefaultValue=u'100'))
self.bMute = ToBool(Config_GetDefault_Int(oConfig=oConfig, uSection=u'ORCA', uOption=u'sound_muteall', uDefaultValue=u'0'))
def SetSoundVolume(self,*,uSoundName:str,iValue:int) -> None:
"""
Sets the volume for a give sound
:param str uSoundName: The name of the sound
:param int iValue: The sound volume (0-100)
:return:
"""
self.aSounds[uSoundName]["iSoundVolume"] = iValue
return None
def PlaySound(self,*,uSoundName:str, vSoundVolume:Union[float,str]=-1.0) -> bool:
""" plays a given sound with a given volume """
iSoundVolume:int
fVolume:float
vVolume:Union[str,float]
dSound:Optional[Dict[str,Union[cFileName,int]]]
oFnSound:cFileName
if self.bMute:
return True
try:
dSound = self.aSounds.get(uSoundName)
vVolume = vSoundVolume
if dSound is not None:
oFnSound = dSound["oFnSound"]
iSoundVolume = dSound["iSoundVolume"]
else:
oFnSound=cFileName('').ImportFullPath(uFnFullName=uSoundName)
iSoundVolume=100
if oFnSound and not oFnSound.IsEmpty():
oSound=self.dSoundObjects.get(oFnSound.string)
# temporary disabled
if oSound is None or True:
oSound = SoundLoader.load(oFnSound.string)
self.dSoundObjects[oFnSound.string]=oSound
if oSound:
if oSound.state != 'stop':
oSound.stop()
if isinstance(vSoundVolume, str):
if vSoundVolume!=u'':
vVolume = ToFloat(vSoundVolume)
if not vVolume==-1.0 and not vVolume==u'':
fVolume = cast(float,vVolume)*(iSoundVolume/100.0)
else:
fVolume = iSoundVolume*1.0
fVolume = fVolume /100.0
oSound.volume = fVolume
if fVolume>0:
oSound.play()
return True
except Exception as e:
LogError(uMsg=u'Playing sound failed:'+uSoundName,oException=e)
return False
| thica/ORCA-Remote | src/ORCA/Sound.py | Python | gpl-3.0 | 7,387 | [
"ORCA"
] | ee5c4b6b04d8c06a8d4ac3cfa4c62939ccb9459d198b8734d8dda1e7e78b1992 |
"""Simple test for the ImageDataProbe filter.
"""
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import abspath
from io import BytesIO
import copy
# Local imports.
from common import TestCase, get_example_data
class TestLabels(TestCase):
def check(self, saved=False):
"""Does the checking, if saved is True it does not change the
properties at first to see how those behave and only tests the
final unpickled state."""
script = self.script
e = script.engine
scene = e.current_scene
src = scene.children[0]
mm = src.children[0]
l = mm.children[1]
if not saved:
np = l.visible_points.outputs[0].number_of_points
assert np < 35 and np > 20
l.visible_points.enabled = True
l.mapper.label_mode = 'label_scalars'
l.label_format = '%.1f'
l.number_of_labels = 45
l.property.color = (0,0,0)
l.property.italic = False
np = l.visible_points.outputs[0].number_of_points
assert np < 60 and np > 35
assert l.visible_points.enabled == True
assert l.visible_points.outputs[0] == \
l.visible_points.filter.filter.output
assert l.property.color == (0,0,0)
assert l.property.italic == False
assert l.mapper.label_mode == 'label_scalars'
assert l.label_format == '%.1f'
def test(self):
self.main()
def do(self):
############################################################
# Imports.
from mayavi.modules.api import ScalarCutPlane
from mayavi.modules.labels import Labels
from mayavi.sources.vtk_xml_file_reader import VTKXMLFileReader
############################################################
# Create a new scene and set up the visualization.
s = self.new_scene()
script = mayavi = self.script
# Read a VTK (old style) data file.
r = VTKXMLFileReader()
r.initialize(get_example_data('fire_ug.vtu'))
script.add_source(r)
# Create the filters.
cp = ScalarCutPlane()
script.add_module(cp)
l = Labels(object=cp)
script.add_module(l)
s.scene.isometric_view()
self.check(saved=False)
############################################################
# Test if saving a visualization and restoring it works.
# Save visualization.
f = BytesIO()
f.name = abspath('test.mv2') # We simulate a file.
script.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine = script.engine
engine.close_scene(s)
# Load visualization
script.load_visualization(f)
s = engine.current_scene
s.scene.isometric_view()
# Seems to be needed for the test to pass. :( Just flushes the
# pipeline.
s.children[0].pipeline_changed = True
# Check.
# Now do the check.
self.check(saved=True)
############################################################
# Test if the Mayavi2 visualization can be deep-copied.
# Pop the source object.
source = s.children.pop()
# Add it back to see if that works without error.
s.children.append(source)
# Now do the check.
s.scene.isometric_view()
self.check(saved=True)
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
source1 = copy.deepcopy(source)
s.children[0] = source1
# Now do the check.
s.scene.isometric_view()
self.check(saved=True)
# If we have come this far, we are golden!
if __name__ == "__main__":
t = TestLabels()
t.test()
| dmsurti/mayavi | integrationtests/mayavi/test_labels.py | Python | bsd-3-clause | 4,116 | [
"Mayavi",
"VTK"
] | ed2a914d15fc4fb000533c52e020fb65f5d78810d876fc5ba00684da91a5de6d |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018-2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.base import Encoder
import thorns
import cochlea
class CochleaEncoder(Encoder):
def __init__(self, fs=100e3, anfs=(60, 25, 15),
min_cf=125, max_cf=4000, num_cf=2048,
normalizeInput=True):
"""
The `CochleaEncoder` encodes an input signal into spike trains of
auditory nerve fibers.
It uses the cochlea Python package - https://github.com/mrkrd/cochlea
Which contains state-of-the-art biophysical models, and gives realistic
approximation of the auditory nerve activity for a collection of inner
ear models.
Auditory nerve fibres have spontaneous rates that vary from 0 to more
than 120 spikes per second. Fibres with the highest rates of spontaneous
activity have the lowest thresholds. Once the level of a sound exceeds
the fibre’s threshold the discharge rate of the fibre rises above its
spontaneous rate. Eventually, when the sound is made sufficiently high
in level the fibre cannot fire any faster and reaches saturated discharge
rate. For high spontaneous rate fibres the shape the function relating
firing rate to sound levels is sigmoidal whereas for medium and low
spontaneous rate fibres it may deviate from this and even be straight
over wide ranges of level. The range over which a fibre changes its
discharge rate to signal sound level changes is called the dynamic range.
The dynamic range is narrow for high spontaneous rate fibres and wider
for medium and low spontaneous rate fibres.
Source: The basic physiology of the auditory nerve.
Alan R. Palmer, MRC Institute of Hearing Research
The spontaneous rate (SR) of discharge varies a lot from one large
myelinated type I Spiral Ganglion Neuron (SGN) to another and depends
on certain molecular characteristics. Three categories of SGN have
been described depending on their SR:
- low-SR (less than 0.5 spike/sec), about 15% of the SGNs, all forming
synapses on the modiolar side of the IHC.
- high-SR (over 18 spikes/sec), about 60% of the SGNs, forming synapses
on the tunnel pillar side of the IHC.
- in-between, an intermediate class called medium-SR represents about
25% of the SGN population.
Source: http://www.cochlea.eu/en/spiral-ganglion/type-i-sgns-physiology
The lower limit for the number of characteristic frequencies is 2048.
A default voice band is used for characteristic frequencies.
Source: https://en.wikipedia.org/wiki/Voice_frequency
:param fs: (int) Sampling frequency, in Hertz.
Defaults to 100e3. Range is [100e3, 500e3].
:param anfs: (int tuple) Number of auditory nerve fibers.
(High, Medium, Low) spontaneous rate fiber counts.
Defaults to (60, 25, 15).
:param min_cf: (int) Minimum characteristic frequency.
Defaults to 125. Minimum value is 125.
:param max_cf: (int) Maximum characteristic frequency.
Defaults to 4000. Maximum value is 20e3.
:param normalizeInput: (bool) Whether to normalize input data range to [-1,1]
Defaults to True.
"""
# Sampling frequency of the input data.
self.fs = fs
# Number of auditory nerve fibers.
self.anfs = anfs
# Minimum and maximum characteristic frequency range.
self.min_cf = min_cf
self.max_cf = max_cf
# Number of characteristic frequencies.
self.num_cf = num_cf
# Enforce a lower limit for the number of characteristic frequencies.
if self.num_cf < 16:
self.num_cf = 16
self.outputWidth = self.num_cf
if self.max_cf - self.min_cf < self.num_cf:
self.max_cf = self.min_cf + self.num_cf
self.normalizeInput = normalizeInput
def getWidth(self):
"""
Return the output width, in bits.
:return outputWidth: (int) output width
"""
return self.outputWidth
def encodeIntoArray(self, inputData, output):
"""
Encodes inputData and puts the encoded value into the numpy output array,
which is a 1D array of length returned by getWidth().
:param inputData: (np.array) Data to encode.
:param output: (np.array) 1D array. Encoder output.
"""
pass
def encodeIntoNeurogram(self, inputData):
"""
Encodes inputData and returns the encoded neurogram.
:param inputData: (np.array) Data to encode.
"""
if type(inputData) != np.ndarray:
raise TypeError('Expected inputData to be a numpy array but the input '
'type is %s' % type(inputData))
# The Zilany2014 model requires the data to be in dB SPL (deciBel Sound
# Pressure Level). To do this the auditory threshold is used as the
# reference sound pressure, i.e. p0 = 20 µPa
# Desired level of the output signal in dB SPL set to 50
data = cochlea.set_dbspl(inputData, 50)
if self.normalizeInput:
data = np.array([float(val) / pow(2, 15) for val in data])
# Run model
anf = cochlea.run_zilany2014(
data,
self.fs,
anf_num=self.anfs,
cf=(self.min_cf, self.max_cf, self.num_cf),
seed=0,
powerlaw='approximate',
species='human',
)
# Accumulate spike trains
anf_acc = thorns.accumulate(anf, keep=['cf', 'duration'])
# Sort according to characteristic frequency
anf_acc.sort_values('cf', ascending=False, inplace=True)
# Create an array where each row contains a column per characteristic frequency,
# containing a count of firings (num_cf column count)
neurogram = thorns.spikes.trains_to_array(anf_acc, self.fs)
# Clamp multiple spikes to 1
neurogram = (neurogram > 0) * neurogram
return neurogram
def write(self, proto):
pass
| nupic-community/nupic.audio | SpeechRecognition/cochlea-encoder/cochlea_encoder.py | Python | gpl-3.0 | 6,805 | [
"NEURON"
] | 60d861c4ca892169cbc7fb5ab2b6d063b1c908cbff9e7df41eb648d15017e16b |
# -*- coding: utf-8 -*-
# module pyparsing.py
#
# Copyright (c) 2003-2019 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pip._vendor.pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of '+', '|' and '^' operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parseString` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
and :class:`'&'<Each>` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.setResultsName`
- access the parsed data, which is returned as a :class:`ParseResults`
object
- find some helpful expression short-cuts like :class:`delimitedList`
and :class:`oneOf`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
__version__ = "2.4.2"
__versionTime__ = "29 Jul 2019 02:58 UTC"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
from operator import itemgetter
import itertools
from functools import wraps
try:
# Python 3
from itertools import filterfalse
except ImportError:
from itertools import ifilterfalse as filterfalse
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
# Python 3
from collections.abc import Iterable
from collections.abc import MutableMapping, Mapping
except ImportError:
# Python 2.7
from collections import Iterable
from collections import MutableMapping, Mapping
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
try:
from types import SimpleNamespace
except ImportError:
class SimpleNamespace: pass
# version compatibility configuration
__compat__ = SimpleNamespace()
__compat__.__doc__ = """
A cross-version compatibility configuration for pyparsing features that will be
released in a future version. By setting values in this configuration to True,
those features can be enabled in prior versions for compatibility development
and testing.
- collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping
of results names when an And expression is nested within an Or or MatchFirst; set to
True to enable bugfix released in pyparsing 2.3.0, or False to preserve
pre-2.3.0 handling of named results
"""
__compat__.collect_all_And_tokens = True
__diag__ = SimpleNamespace()
__diag__.__doc__ = """
Diagnostic configuration (all default to False)
- warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results
name is defined on a MatchFirst or Or expression with one or more And subexpressions
(only warns if __compat__.collect_all_And_tokens is False)
- warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results
name is defined on a containing expression with ungrouped subexpressions that also
have results names
- warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined
with a results name, but has no contents defined
- warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is
incorrectly called with multiple str arguments
- enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent
calls to ParserElement.setName()
"""
__diag__.warn_multiple_tokens_in_named_alternation = False
__diag__.warn_ungrouped_named_tokens_in_collection = False
__diag__.warn_name_set_on_empty_Forward = False
__diag__.warn_on_multiple_string_args_to_oneof = False
__diag__.enable_debug_on_named_expressions = False
# ~ sys.stderr.write("testing pyparsing module, version %s, %s\n" % (__version__, __versionTime__))
__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__',
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set',
'conditionAsParseAction',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
unicode = str
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode
friendly. It first tries str(obj). If that fails with
a UnicodeEncodeError, then it tries unicode(obj). It then
< returns the unicode object | encodes it with the default
encoding | ... >.
"""
if isinstance(obj, unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex(r'&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__, fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&' + s + ';' for s in "amp gt lt quot apos".split())
for from_, to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
def conditionAsParseAction(fn, message=None, fatal=False):
msg = message if message is not None else "failed user-defined condition"
exc_type = ParseFatalException if fatal else ParseException
fn = _trim_arity(fn)
@wraps(fn)
def pa(s, l, t):
if not bool(fn(s, l, t)):
raise exc_type(s, l, msg)
return pa
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(self, pstr, loc=0, msg=None, elem=None):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__(self, aname):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if aname == "lineno":
return lineno(self.loc, self.pstr)
elif aname in ("col", "column"):
return col(self.loc, self.pstr)
elif aname == "line":
return line(self.loc, self.pstr)
else:
raise AttributeError(aname)
def __str__(self):
if self.pstr:
if self.loc >= len(self.pstr):
foundstr = ', found end of text'
else:
foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\', '\\')
else:
foundstr = ''
return ("%s%s (at char %d), (line:%d, col:%d)" %
(self.msg, foundstr, self.loc, self.lineno, self.column))
def __repr__(self):
return _ustr(self)
def markInputline(self, markerString=">!<"):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
@staticmethod
def explain(exc, depth=16):
"""
Method to take an exception and translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- exc - exception raised during parsing (need not be a ParseException, in support
of Python exceptions that might be raised in a parse action)
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
Note: the diagnostic output will include string representations of the expressions
that failed to parse. These representations will be more helpful if you use `setName` to
give identifiable names to your expressions. Otherwise they will use the default string
forms, which may be cryptic to read.
explain() is only supported under Python 3.
"""
import inspect
if depth is None:
depth = sys.getrecursionlimit()
ret = []
if isinstance(exc, ParseBaseException):
ret.append(exc.line)
ret.append(' ' * (exc.col - 1) + '^')
ret.append("{0}: {1}".format(type(exc).__name__, exc))
if depth > 0:
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
seen = set()
for i, ff in enumerate(callers[-depth:]):
frm = ff[0]
f_self = frm.f_locals.get('self', None)
if isinstance(f_self, ParserElement):
if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'):
continue
if f_self in seen:
continue
seen.add(f_self)
self_type = type(f_self)
ret.append("{0}.{1} - {2}".format(self_type.__module__,
self_type.__name__,
f_self))
elif f_self is not None:
self_type = type(f_self)
ret.append("{0}.{1}".format(self_type.__module__,
self_type.__name__))
else:
code = frm.f_code
if code.co_name in ('wrapper', '<module>'):
continue
ret.append("{0}".format(code.co_name))
depth -= 1
if not depth:
break
return '\n'.join(ret)
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like :class:`ParseFatalException`, but thrown internally
when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
that parsing is to stop immediately because an unbacktrackable
syntax error has been found.
"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by :class:`ParserElement.validate` if the
grammar could be improperly recursive
"""
def __init__(self, parseElementList):
self.parseElementTrace = parseElementList
def __str__(self):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self, p1, p2):
self.tup = (p1, p2)
def __getitem__(self, i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self, i):
self.tup = (self.tup[0], i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to
the parsed data:
- as a list (``len(results)``)
- by list index (``results[0], results[1]``, etc.)
- by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`)
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name, int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])):
if isinstance(toklist, basestring):
toklist = [toklist]
if asList:
if isinstance(toklist, ParseResults):
self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError, TypeError, IndexError):
self[name] = toklist
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([v[0] for v in self.__tokdict[i]])
def __setitem__(self, k, v, isinstance=isinstance):
if isinstance(v, _ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k, list()) + [v]
sub = v[0]
elif isinstance(k, (int, slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)]
sub = v
if isinstance(sub, ParseResults):
sub.__parent = wkref(self)
def __delitem__(self, i):
if isinstance(i, (int, slice)):
mylen = len(self.__toklist)
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i + 1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name, occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__(self, k):
return k in self.__tokdict
def __len__(self):
return len(self.__toklist)
def __bool__(self):
return (not not self.__toklist)
__nonzero__ = __bool__
def __iter__(self):
return iter(self.__toklist)
def __reversed__(self):
return iter(self.__toklist[::-1])
def _iterkeys(self):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues(self):
return (self[k] for k in self._iterkeys())
def _iteritems(self):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys."""
values = _itervalues
"""Returns an iterator of all named result values."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys(self):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values(self):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items(self):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys(self):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop(self, *args, **kwargs):
"""
Removes and returns item at specified index (default= ``last``).
Supports both ``list`` and ``dict`` semantics for ``pop()``. If
passed no argument or an integer argument, it will use ``list``
semantics and pop tokens from the list of parsed tokens. If passed
a non-integer argument (most likely a string), it will use ``dict``
semantics and pop the corresponding value from any defined results
names. A second default return value argument is supported, just as in
``dict.pop()``.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k, v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int)
or len(args) == 1
or args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given ``defaultValue`` or ``None`` if no
``defaultValue`` is specified.
Similar to ``dict.get()``.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert(self, index, insStr):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to ``list.insert()``.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name, occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append(self, item):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend(self, itemseq):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self.__iadd__(itemseq)
else:
self.__toklist.extend(itemseq)
def clear(self):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__(self, name):
try:
return self[name]
except KeyError:
return ""
def __add__(self, other):
ret = self.copy()
ret += other
return ret
def __iadd__(self, other):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a < 0 else a + offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
for k, vlist in otheritems for v in vlist]
for k, v in otherdictitems:
self[k] = v
if isinstance(v[0], ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update(other.__accumNames)
return self
def __radd__(self, other):
if isinstance(other, int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__(self):
return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict))
def __str__(self):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList(self, sep=''):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance(item, ParseResults):
out += item._asStringList()
else:
out.append(_ustr(item))
return out
def asList(self):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist]
def asDict(self):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k, toItem(v)) for k, v in item_fn())
def copy(self):
"""
Returns a new copy of a :class:`ParseResults` object.
"""
ret = ParseResults(self.__toklist)
ret.__tokdict = dict(self.__tokdict.items())
ret.__parent = self.__parent
ret.__accumNames.update(self.__accumNames)
ret.__name = self.__name
return ret
def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [nl, indent, "<", selfTag, ">"]
for i, res in enumerate(self.__toklist):
if isinstance(res, ParseResults):
if i in namedItems:
out += [res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">"]
out += [nl, indent, "</", selfTag, ">"]
return "".join(out)
def __lookup(self, sub):
for k, vlist in self.__tokdict.items():
for v, loc in vlist:
if sub is v:
return k
return None
def getName(self):
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1
and len(self.__tokdict) == 1
and next(iter(self.__tokdict.values()))[0][1] in (0, -1)):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent='', full=True, include_list=True, _depth=0):
"""
Diagnostic method for listing out the contents of
a :class:`ParseResults`. Accepts an optional ``indent`` argument so
that this string can be embedded in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
if include_list:
out.append(indent + _ustr(self.asList()))
else:
out.append('')
if full:
if self.haskeys():
items = sorted((str(k), v) for k, v in self.items())
for k, v in items:
if out:
out.append(NL)
out.append("%s%s- %s: " % (indent, (' ' * _depth), k))
if isinstance(v, ParseResults):
if v:
out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1))
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv, ParseResults) for vv in self):
v = self
for i, vv in enumerate(v):
if isinstance(vv, ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,
(' ' * (_depth)),
i,
indent,
(' ' * (_depth + 1)),
vv.dump(indent=indent,
full=full,
include_list=include_list,
_depth=_depth + 1)))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,
(' ' * (_depth)),
i,
indent,
(' ' * (_depth + 1)),
_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the
`pprint <https://docs.python.org/3/library/pprint.html>`_ module.
Accepts additional positional or keyword args as defined for
`pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return (self.__toklist,
(self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name))
def __setstate__(self, state):
self.__toklist = state[0]
self.__tokdict, par, inAccumNames, self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return dir(type(self)) + list(self.keys())
@classmethod
def from_dict(cls, other, name=None):
"""
Helper classmethod to construct a ParseResults from a dict, preserving the
name-value relations as results names. If an optional 'name' argument is
given, a nested ParseResults will be returned
"""
def is_iterable(obj):
try:
iter(obj)
except Exception:
return False
else:
if PY_3:
return not isinstance(obj, (str, bytes))
else:
return not isinstance(obj, basestring)
ret = cls([])
for k, v in other.items():
if isinstance(v, Mapping):
ret += cls.from_dict(v, name=k)
else:
ret += cls([v], name=k, asList=is_iterable(v))
if name is not None:
ret = cls([ret], name=name)
return ret
MutableMapping.register(ParseResults)
def col (loc, strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See
:class:`ParserElement.parseString` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
"""
s = strg
return 1 if 0 < loc < len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc, strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note - the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`ParserElement.parseString`
for more information on parsing strings containing ``<TAB>`` s, and
suggested methods to maintain a consistent view of the parsed string, the
parse location, and line and column positions within the parsed string.
"""
return strg.count("\n", 0, loc) + 1
def line(loc, strg):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR + 1:nextCR]
else:
return strg[lastCR + 1:]
def _defaultStartDebugAction(instring, loc, expr):
print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring))))
def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
print("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction(instring, loc, expr, exc):
print("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s, l, t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3, 5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3, 5, 0) else -2
frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset]
return [frame_summary[:2]]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [frame_summary[:2]]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
try:
del tb
except NameError:
pass
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars(chars):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__(self, savelist=False):
self.parseAction = list()
self.failAction = None
# ~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = (None, None, None) # custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy(self):
"""
Make a copy of this :class:`ParserElement`. Useful for defining
different parse actions for the same parsing pattern, using copies of
the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of ``expr.copy()`` is just ``expr()``::
integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
"""
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName(self, name):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if __diag__.enable_debug_on_named_expressions:
self.setDebug()
return self
def setResultsName(self, name, listAllMatches=False):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original :class:`ParserElement` object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
``expr("name")`` in place of ``expr.setResultsName("name")``
- see :class:`__call__`.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
return self._setResultsName(name, listAllMatches)
def _setResultsName(self, name, listAllMatches=False):
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches = True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self, breakFlag=True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set ``breakFlag`` to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
# this call to pdb.set_trace() is intentional, not a checkin error
pdb.set_trace()
return _parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse, "_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction(self, *fns, **kwargs):
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` ,
``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
If None is passed as the parse action, all previously added parse actions for this
expression are cleared.
Optional keyword arguments:
- callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`parseString for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
if list(fns) == [None,]:
self.parseAction = []
else:
if not all(callable(fn) for fn in fns):
raise TypeError("parse actions must be callable")
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction(self, *fns, **kwargs):
"""
Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`.
See examples in :class:`copy`.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
:class:`setParseAction` for function call signatures. Unlike ``setParseAction``,
functions passed to ``addCondition`` need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
for fn in fns:
self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'),
fatal=kwargs.get('fatal', False)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction(self, fn):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
``fn(s, loc, expr, err)`` where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw :class:`ParseFatalException`
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc, dummy = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return loc, []
def postParse(self, instring, loc, tokenlist):
return tokenlist
# ~ @profile
def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):
TRY, MATCH, FAIL = 0, 1, 2
debugging = (self.debug) # and doActions)
if debugging or self.failAction:
# ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring)))
if self.debugActions[TRY]:
self.debugActions[TRY](instring, loc, self)
try:
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or preloc >= len(instring):
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except Exception as err:
# ~ print ("Exception raised:", err)
if self.debugActions[FAIL]:
self.debugActions[FAIL](instring, tokensStart, self, err)
if self.failAction:
self.failAction(instring, tokensStart, self, err)
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or preloc >= len(instring):
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, preloc, doActions)
tokens = self.postParse(instring, loc, tokens)
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
try:
tokens = fn(instring, tokensStart, retTokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
exc.__cause__ = parse_action_exc
raise exc
if tokens is not None and tokens is not retTokens:
retTokens = ParseResults(tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults)
except Exception as err:
# ~ print "Exception raised in user parse action:", err
if self.debugActions[FAIL]:
self.debugActions[FAIL](instring, tokensStart, self, err)
raise
else:
for fn in self.parseAction:
try:
tokens = fn(instring, tokensStart, retTokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
exc.__cause__ = parse_action_exc
raise exc
if tokens is not None and tokens is not retTokens:
retTokens = ParseResults(tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults)
if debugging:
# ~ print ("Matched", self, "->", retTokens.asList())
if self.debugActions[MATCH]:
self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens)
return loc, retTokens
def tryParse(self, instring, loc):
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
raise ParseException(instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(cache) > size:
try:
cache.popitem(False)
except KeyError:
pass
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(key_fifo) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return value[0], value[1].copy()
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default= ``128``) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method :class:`ParserElement.enablePackrat`.
For best results, call ``enablePackrat()`` immediately after
importing pyparsing.
Example::
from pip._vendor import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString(self, instring, parseAll=False):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
Returns the parsed data as a :class:`ParseResults` object, which may be
accessed as a list, or as a dict or object with attributes if the given parser
includes results names.
If you want the grammar to require that the entire input string be
successfully parsed, then set ``parseAll`` to True (equivalent to ending
the grammar with ``StringEnd()``).
Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the ``loc`` argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling ``parseWithTabs`` on your grammar before calling ``parseString``
(see :class:`parseWithTabs`)
- define your parse action using the full ``(s, loc, toks)`` signature, and
reference the input string using the parse action's ``s`` argument
- explictly expand the tabs in your input string before calling
``parseString``
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
# ~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = Empty() + StringEnd()
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
``maxMatches`` argument, to clip scanning after 'n' matches are found. If
``overlap`` is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See :class:`parseString` for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens, start, end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString(self, instring):
"""
Extension to :class:`scanString`, to modify matching text with modified tokens that may
be returned from a parse action. To use ``transformString``, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking ``transformString()`` on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. ``transformString()`` returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t, s, e in self.scanString(instring):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.asList()
elif isinstance(t, list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr, _flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString(self, instring, maxMatches=_MAX_INT):
"""
Another extension to :class:`scanString`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``maxMatches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional ``maxsplit`` argument, to limit the number of splits;
and the optional ``includeSeparators`` argument (default= ``False``), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t, s, e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other):
"""
Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement
converts them to :class:`Literal`s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
prints::
Hello, World! -> ['Hello', ',', 'World', '!']
``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
Literal('start') + ... + Literal('end')
is equivalent to:
Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
Note that the skipped text is returned with '_skipped' as a results name,
and to support having multiple skips in the same parser, the value returned is
a list of all skipped text.
"""
if other is Ellipsis:
return _PendingSkip(self)
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And([self, other])
def __radd__(self, other):
"""
Implementation of + operator when left operand is not a :class:`ParserElement`
"""
if other is Ellipsis:
return SkipTo(self)("_skipped*") + self
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns :class:`And` with error stop
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return self + And._ErrorStop() + other
def __rsub__(self, other):
"""
Implementation of - operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self, other):
"""
Implementation of * operator, allows use of ``expr * 3`` in place of
``expr + expr + expr``. Expressions may also me multiplied by a 2-integer
tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples
may also include ``None`` as in:
- ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
- ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
Note that ``expr*(None, n)`` does not raise an exception if
more than n exprs exist in the input stream; that is,
``expr*(None, n)`` does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
``expr*(None, n) + ~expr``
"""
if other is Ellipsis:
other = (0, None)
elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
other = ((0, ) + other[1:] + (None,))[:2]
if isinstance(other, int):
minElements, optElements = other, 0
elif isinstance(other, tuple):
other = tuple(o if o is not Ellipsis else None for o in other)
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0], int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self * other[0] + ZeroOrMore(self)
elif isinstance(other[0], int) and isinstance(other[1], int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s', '%s') objects", type(other[0]), type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0, 0)")
if optElements:
def makeOptionalList(n):
if n > 1:
return Optional(self + makeOptionalList(n - 1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self] * minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self] * minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other):
"""
Implementation of | operator - returns :class:`MatchFirst`
"""
if other is Ellipsis:
return _PendingSkip(self, must_skip=True)
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst([self, other])
def __ror__(self, other):
"""
Implementation of | operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other):
"""
Implementation of ^ operator - returns :class:`Or`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or([self, other])
def __rxor__(self, other):
"""
Implementation of ^ operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other):
"""
Implementation of & operator - returns :class:`Each`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each([self, other])
def __rand__(self, other):
"""
Implementation of & operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__(self):
"""
Implementation of ~ operator - returns :class:`NotAny`
"""
return NotAny(self)
def __iter__(self):
# must implement __iter__ to override legacy use of sequential access to __getitem__ to
# iterate over a sequence
raise TypeError('%r object is not iterable' % self.__class__.__name__)
def __getitem__(self, key):
"""
use ``[]`` indexing notation as a short form for expression repetition:
- ``expr[n]`` is equivalent to ``expr*n``
- ``expr[m, n]`` is equivalent to ``expr*(m, n)``
- ``expr[n, ...]`` or ``expr[n,]`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr[..., n]`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
- ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
``None`` may be used in place of ``...``.
Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
if more than ``n`` ``expr``s exist in the input stream. If this behavior is
desired, then write ``expr[..., n] + ~expr``.
"""
# convert single arg keys to tuples
try:
if isinstance(key, str):
key = (key,)
iter(key)
except TypeError:
key = (key, key)
if len(key) > 2:
warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5],
'... [{0}]'.format(len(key))
if len(key) > 5 else ''))
# clip to 2 elements
ret = self * tuple(key[:2])
return ret
def __call__(self, name=None):
"""
Shortcut for :class:`setResultsName`, with ``listAllMatches=False``.
If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be
passed as ``True``.
If ``name` is omitted, same as calling :class:`copy`.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
"""
if name is not None:
return self._setResultsName(name)
else:
return self.copy()
def suppress(self):
"""
Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress(self)
def leaveWhitespace(self):
"""
Disables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars(self, chars):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs(self):
"""
Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string.
Must be called before ``parseString`` when the input grammar contains elements that
match ``<TAB>`` characters.
"""
self.keepTabs = True
return self
def ignore(self, other):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def setDebugActions(self, startAction, successAction, exceptionAction):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug(self, flag=True):
"""
Enable display of debugging messages while doing pattern matching.
Set ``flag`` to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using :class:`setDebugActions`. Prior to attempting
to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``.
"""
if flag:
self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction)
else:
self.debug = False
return self
def __str__(self):
return self.name
def __repr__(self):
return _ustr(self)
def streamline(self):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion(self, parseElementList):
pass
def validate(self, validateTrace=None):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion([])
def parseFile(self, file_or_filename, parseAll=False):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self, other):
if isinstance(other, ParserElement):
if PY_3:
self is other or super(ParserElement, self).__eq__(other)
else:
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement, self) == other
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return id(self)
def __req__(self, other):
return self == other
def __rne__(self, other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#',
fullDump=True, printResults=True, failureTests=False, postParse=None,
file=None):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
- comment - (default= ``'#'``) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default= ``True``) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default= ``True``) prints test output to stdout
- failureTests - (default= ``False``) indicates if these tests are expected to fail parsing
- postParse - (default= ``None``) optional callback for successful parse results; called as
`fn(test_string, parse_results)` and returns a string to be added to the test output
- file - (default=``None``) optional file-like object to which test output will be written;
if None, will default to ``sys.stdout``
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if ``failureTests`` is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
if file is None:
file = sys.stdout
print_ = file.write
allResults = []
comments = []
success = True
NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString)
BOM = u'\ufeff'
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
# convert newline marks to actual newlines, and strip leading BOM if present
t = NL.transformString(t.lstrip(BOM))
result = self.parseString(t, parseAll=parseAll)
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal)
else:
out.append(' ' * pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
else:
success = success and not failureTests
if postParse is not None:
try:
pp_value = postParse(t, result)
if pp_value is not None:
if isinstance(pp_value, ParseResults):
out.append(pp_value.dump())
else:
out.append(str(pp_value))
else:
out.append(result.dump())
except Exception as e:
out.append(result.dump(full=fullDump))
out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e))
else:
out.append(result.dump(full=fullDump))
if printResults:
if fullDump:
out.append('')
print_('\n'.join(out))
allResults.append((t, result))
return success, allResults
class _PendingSkip(ParserElement):
# internal placeholder class to hold a place were '...' is added to a parser element,
# once another ParserElement is added, this placeholder will be replaced with a SkipTo
def __init__(self, expr, must_skip=False):
super(_PendingSkip, self).__init__()
self.strRepr = str(expr + Empty()).replace('Empty', '...')
self.name = self.strRepr
self.anchor = expr
self.must_skip = must_skip
def __add__(self, other):
skipper = SkipTo(other).setName("...")("_skipped*")
if self.must_skip:
def must_skip(t):
if not t._skipped or t._skipped.asList() == ['']:
del t[0]
t.pop("_skipped", None)
def show_skip(t):
if t._skipped.asList()[-1:] == ['']:
skipped = t.pop('_skipped')
t['_skipped'] = 'missing <' + repr(self.anchor) + '>'
return (self.anchor + skipper().addParseAction(must_skip)
| skipper().addParseAction(show_skip)) + other
return self.anchor + skipper + other
def __repr__(self):
return self.strRepr
def parseImpl(self, *args):
raise Exception("use of `...` expression without following SkipTo target expression")
class Token(ParserElement):
"""Abstract :class:`ParserElement` subclass, for defining atomic
matching patterns.
"""
def __init__(self):
super(Token, self).__init__(savelist=False)
class Empty(Token):
"""An empty token, will always match.
"""
def __init__(self):
super(Empty, self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match.
"""
def __init__(self):
super(NoMatch, self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl(self, instring, loc, doActions=True):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use :class:`CaselessLiteral`.
For keyword matching (force word break before and after the matched string),
use :class:`Keyword` or :class:`CaselessKeyword`.
"""
def __init__(self, matchString):
super(Literal, self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: modify __class__ to select
# a parseImpl optimized for single-character check
if self.matchLen == 1 and type(self) is Literal:
self.__class__ = _SingleCharLiteral
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class _SingleCharLiteral(Literal):
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar:
return loc + 1, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is,
it must be immediately followed by a non-keyword character. Compare
with :class:`Literal`:
- ``Literal("if")`` will match the leading ``'if'`` in
``'ifAndOnlyIf'``.
- ``Keyword("if")`` will not; it will only match the leading
``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
Accepts two optional constructor arguments in addition to the
keyword string:
- ``identChars`` is a string of characters that would be valid
identifier characters, defaulting to all alphanumerics + "_" and
"$"
- ``caseless`` allows case-insensitive matching, default is ``False``.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use :class:`CaselessKeyword`.
"""
DEFAULT_KEYWORD_CHARS = alphanums + "_$"
def __init__(self, matchString, identChars=None, caseless=False):
super(Keyword, self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl(self, instring, loc, doActions=True):
if self.caseless:
if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch)
and (loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars)
and (loc == 0
or instring[loc - 1].upper() not in self.identChars)):
return loc + self.matchLen, self.match
else:
if instring[loc] == self.firstMatchChar:
if ((self.matchLen == 1 or instring.startswith(self.match, loc))
and (loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen] not in self.identChars)
and (loc == 0 or instring[loc - 1] not in self.identChars)):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword, self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars(chars):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for :class:`CaselessKeyword`.)
"""
def __init__(self, matchString):
super(CaselessLiteral, self).__init__(matchString.upper())
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl(self, instring, loc, doActions=True):
if instring[loc:loc + self.matchLen].upper() == self.match:
return loc + self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of :class:`Keyword`.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for :class:`CaselessLiteral`.)
"""
def __init__(self, matchString, identChars=None):
super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True)
class CloseMatch(Token):
"""A variation on :class:`Literal` which matches "close" matches,
that is, strings with at most 'n' mismatching characters.
:class:`CloseMatch` takes parameters:
- ``match_string`` - string to be matched
- ``maxMismatches`` - (``default=1``) maximum number of
mismatches allowed to count as a match
The results from a successful parse will contain the matched text
from the input string and the following named results:
- ``mismatches`` - a list of the positions within the
match_string where mismatches were found
- ``original`` - the original match_string used to compare
against the input string
If ``mismatches`` is an empty list, then the match was an exact
match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch, self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl(self, instring, loc, doActions=True):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)):
src, mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results['original'] = match_string
results['mismatches'] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters, an
optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction. An optional ``excludeChars`` parameter can
list characters that might be found in the input ``bodyChars``
string; useful to define a word of all printables except for one or
two characters, for instance.
:class:`srange` is useful for defining custom character set strings
for defining ``Word`` expressions, using range notation from
regular expression character sets.
A common mistake is to use :class:`Word` to match a specific literal
string, as in ``Word("Address")``. Remember that :class:`Word`
uses the string argument to define *sets* of matchable characters.
This expression would match "Add", "AAA", "dAred", or any other word
made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
exact literal string, use :class:`Literal` or :class:`Keyword`.
pyparsing includes helper strings for building Words:
- :class:`alphas`
- :class:`nums`
- :class:`alphanums`
- :class:`hexnums`
- :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
- accented, tilded, umlauted, etc.)
- :class:`punc8bit` (non-alphabetic characters in ASCII range
128-255 - currency, symbols, superscripts, diacriticals, etc.)
- :class:`printables` (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums + '-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None):
super(Word, self).__init__()
if excludeChars:
excludeChars = set(excludeChars)
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars:
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b" + self.reString + r"\b"
try:
self.re = re.compile(self.reString)
except Exception:
self.re = None
else:
self.re_match = self.re.match
self.__class__ = _WordRegex
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.initChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min(maxloc, instrlen)
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
elif self.asKeyword:
if (start > 0 and instring[start - 1] in bodychars
or loc < instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__(self):
try:
return super(Word, self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s) > 4:
return s[:4] + "..."
else:
return s
if self.initCharsOrig != self.bodyCharsOrig:
self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig))
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class _WordRegex(Word):
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
class Char(_WordRegex):
"""A short-cut class for defining ``Word(characters, exact=1)``,
when defining a match of any single character in a string of
characters.
"""
def __init__(self, charset, asKeyword=False, excludeChars=None):
super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars)
self.reString = "[%s]" % _escapeRegexRangeChars(''.join(self.initChars))
if asKeyword:
self.reString = r"\b%s\b" % self.reString
self.re = re.compile(self.reString)
self.re_match = self.re.match
class Regex(Token):
r"""Token for matching strings that match a given regular
expression. Defined with string specifying the regular expression in
a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_.
If the given regex contains named groups (defined using ``(?P<name>...)``),
these will be preserved as named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False):
"""The parameters ``pattern`` and ``flags`` are passed
to the ``re.compile()`` function as-is. See the Python
`re module <https://docs.python.org/3/library/re.html>`_ module for an
explanation of the acceptable patterns and flags.
"""
super(Regex, self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.re_match = self.re.match
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
self.asGroupList = asGroupList
self.asMatch = asMatch
if self.asGroupList:
self.parseImpl = self.parseImplAsGroupList
if self.asMatch:
self.parseImpl = self.parseImplAsMatch
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = ParseResults(result.group())
d = result.groupdict()
if d:
for k, v in d.items():
ret[k] = v
return loc, ret
def parseImplAsGroupList(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.groups()
return loc, ret
def parseImplAsMatch(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result
return loc, ret
def __str__(self):
try:
return super(Regex, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
def sub(self, repl):
r"""
Return Regex with an attached parse action to transform the parsed
result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
Example::
make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
print(make_html.transformString("h1:main title:"))
# prints "<h1>main title</h1>"
"""
if self.asGroupList:
warnings.warn("cannot use sub() with Regex(asGroupList=True)",
SyntaxWarning, stacklevel=2)
raise SyntaxError()
if self.asMatch and callable(repl):
warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)",
SyntaxWarning, stacklevel=2)
raise SyntaxError()
if self.asMatch:
def pa(tokens):
return tokens[0].expand(repl)
else:
def pa(tokens):
return self.re.sub(repl, tokens[0])
return self.addParseAction(pa)
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the
quote delimiting string
- escChar - character to escape quotes, typically backslash
(default= ``None``)
- escQuote - special quote sequence to escape an embedded quote
string (such as SQL's ``""`` to escape an embedded ``"``)
(default= ``None``)
- multiline - boolean indicating whether quotes can span
multiple lines (default= ``False``)
- unquoteResults - boolean indicating whether the matched text
should be unquoted (default= ``True``)
- endQuoteChar - string of one or more characters defining the
end of the quote delimited string (default= ``None`` => same as
quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace
(``'\t'``, ``'\n'``, etc.) to actual whitespace
(default= ``True``)
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False,
unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super(QuotedString, self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ''))
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % (re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ''))
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')')
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
self.re_match = self.re.match
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen: -self.endQuoteCharLen]
if isinstance(ret, basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t': '\t',
r'\n': '\n',
r'\f': '\f',
r'\r': '\r',
}
for wslit, wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__(self):
try:
return super(QuotedString, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given
set (will include whitespace in matched characters if not listed in
the provided exclusion set - see example). Defined with string
containing all disallowed characters, and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__(self, notChars, min=1, max=0, exact=0):
super(CharsNotIn, self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use "
"Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = (self.minLen == 0)
self.mayIndexError = False
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min(start + self.maxLen, len(instring))
while loc < maxlen and instring[loc] not in notchars:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__(self):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally,
whitespace is ignored by pyparsing grammars. This class is included
when some whitespace structures are significant. Define with
a string containing the whitespace characters to be matched; default
is ``" \\t\\r\\n"``. Also takes optional ``min``,
``max``, and ``exact`` arguments, as defined for the
:class:`Word` class.
"""
whiteStrs = {
' ' : '<SP>',
'\t': '<TAB>',
'\n': '<LF>',
'\r': '<CR>',
'\f': '<FF>',
'u\00A0': '<NBSP>',
'u\1680': '<OGHAM_SPACE_MARK>',
'u\180E': '<MONGOLIAN_VOWEL_SEPARATOR>',
'u\2000': '<EN_QUAD>',
'u\2001': '<EM_QUAD>',
'u\2002': '<EN_SPACE>',
'u\2003': '<EM_SPACE>',
'u\2004': '<THREE-PER-EM_SPACE>',
'u\2005': '<FOUR-PER-EM_SPACE>',
'u\2006': '<SIX-PER-EM_SPACE>',
'u\2007': '<FIGURE_SPACE>',
'u\2008': '<PUNCTUATION_SPACE>',
'u\2009': '<THIN_SPACE>',
'u\200A': '<HAIR_SPACE>',
'u\200B': '<ZERO_WIDTH_SPACE>',
'u\202F': '<NNBSP>',
'u\205F': '<MMSP>',
'u\3000': '<IDEOGRAPHIC_SPACE>',
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White, self).__init__()
self.matchWhite = ws
self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite))
# ~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.matchWhite:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min(maxloc, len(instring))
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__(self):
super(_PositionToken, self).__init__()
self.name = self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for
tabular report scraping.
"""
def __init__(self, colno):
super(GoToColumn, self).__init__()
self.col = colno
def preParse(self, instring, loc):
if col(loc, instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
thiscol = col(loc, instring)
if thiscol > self.col:
raise ParseException(instring, loc, "Text not in expected column", self)
newloc = loc + self.col - thiscol
ret = instring[loc: newloc]
return newloc, ret
class LineStart(_PositionToken):
r"""Matches if current position is at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self):
super(LineStart, self).__init__()
self.errmsg = "Expected start of line"
def parseImpl(self, instring, loc, doActions=True):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the
parse string
"""
def __init__(self):
super(LineEnd, self).__init__()
self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", ""))
self.errmsg = "Expected end of line"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
if instring[loc] == "\n":
return loc + 1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse
string
"""
def __init__(self):
super(StringStart, self).__init__()
self.errmsg = "Expected start of text"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse(instring, 0):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string
"""
def __init__(self):
super(StringEnd, self).__init__()
self.errmsg = "Expected end of text"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word,
and is not preceded by any character in a given set of
``wordChars`` (default= ``printables``). To emulate the
``\b`` behavior of regular expressions, use
``WordStart(alphanums)``. ``WordStart`` will also match at
the beginning of the string being parsed, or at the beginning of
a line.
"""
def __init__(self, wordChars=printables):
super(WordStart, self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
if (instring[loc - 1] in self.wordChars
or instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and is
not followed by any character in a given set of ``wordChars``
(default= ``printables``). To emulate the ``\b`` behavior of
regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
will also match at the end of the string being parsed, or at the end
of a line.
"""
def __init__(self, wordChars=printables):
super(WordEnd, self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True):
instrlen = len(instring)
if instrlen > 0 and loc < instrlen:
if (instring[loc] in self.wordChars or
instring[loc - 1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and
post-processing parsed tokens.
"""
def __init__(self, exprs, savelist=False):
super(ParseExpression, self).__init__(savelist)
if isinstance(exprs, _generatorType):
exprs = list(exprs)
if isinstance(exprs, basestring):
self.exprs = [self._literalStringClass(exprs)]
elif isinstance(exprs, ParserElement):
self.exprs = [exprs]
elif isinstance(exprs, Iterable):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if any(isinstance(expr, basestring) for expr in exprs):
exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list(exprs)
except TypeError:
self.exprs = [exprs]
self.callPreparse = False
def append(self, other):
self.exprs.append(other)
self.strRepr = None
return self
def leaveWhitespace(self):
"""Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super(ParseExpression, self).ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
else:
super(ParseExpression, self).ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
return self
def __str__(self):
try:
return super(ParseExpression, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs))
return self.strRepr
def streamline(self):
super(ParseExpression, self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d)
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if len(self.exprs) == 2:
other = self.exprs[0]
if (isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug):
self.exprs = other.exprs[:] + [self.exprs[1]]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if (isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def validate(self, validateTrace=None):
tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion([])
def copy(self):
ret = super(ParseExpression, self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
def _setResultsName(self, name, listAllMatches=False):
if __diag__.warn_ungrouped_named_tokens_in_collection:
for e in self.exprs:
if isinstance(e, ParserElement) and e.resultsName:
warnings.warn("{0}: setting results name {1!r} on {2} expression "
"collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName),
stacklevel=3)
return super(ParseExpression, self)._setResultsName(name, listAllMatches)
class And(ParseExpression):
"""
Requires all given :class:`ParseExpression` s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the ``'+'`` operator.
May also be constructed using the ``'-'`` operator, which will
suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"), name_expr("name"), integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop, self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__(self, exprs, savelist=True):
if exprs and Ellipsis in exprs:
tmp = []
for i, expr in enumerate(exprs):
if expr is Ellipsis:
if i < len(exprs) - 1:
skipto_arg = (Empty() + exprs[i + 1]).exprs[-1]
tmp.append(SkipTo(skipto_arg)("_skipped*"))
else:
raise Exception("cannot construct And with sequence ending in ...")
else:
tmp.append(expr)
exprs[:] = tmp
super(And, self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars(self.exprs[0].whiteChars)
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def streamline(self):
# collapse any _PendingSkip's
if self.exprs:
if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip)
for e in self.exprs[:-1]):
for i, e in enumerate(self.exprs[:-1]):
if e is None:
continue
if (isinstance(e, ParseExpression)
and e.exprs and isinstance(e.exprs[-1], _PendingSkip)):
e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
self.exprs[i + 1] = None
self.exprs = [e for e in self.exprs if e is not None]
super(And, self).streamline()
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False)
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse(instring, loc, doActions)
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
else:
loc, exprtokens = e._parse(instring, loc, doActions)
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
return self.append(other) # And([self, other])
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
if not e.mayReturnEmpty:
break
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
two expressions match, the expression that matches the longest
string will be used. May be constructed using the ``'^'``
operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs, savelist=False):
super(Or, self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self):
super(Or, self).streamline()
if __compat__.collect_all_And_tokens:
self.saveAsList = any(e.saveAsList for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse(instring, loc)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring, len(instring), e.errmsg, self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
# re-evaluate all matches in descending order of length of match, in case attached actions
# might change whether or how much they match of the input.
matches.sort(key=itemgetter(0), reverse=True)
if not doActions:
# no further conditions or parse actions to change the selection of
# alternative, so the first match will be the best match
best_expr = matches[0][1]
return best_expr._parse(instring, loc, doActions)
longest = -1, None
for loc1, expr1 in matches:
if loc1 <= longest[0]:
# already have a longer match than this one will deliver, we are done
return longest
try:
loc2, toks = expr1._parse(instring, loc, doActions)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
else:
if loc2 >= loc1:
return loc2, toks
# didn't match as much as before
elif loc2 > longest[0]:
longest = loc2, toks
if longest != (-1, None):
return longest
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
return self.append(other) # Or([self, other])
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
def _setResultsName(self, name, listAllMatches=False):
if (not __compat__.collect_all_And_tokens
and __diag__.warn_multiple_tokens_in_named_alternation):
if any(isinstance(e, And) for e in self.exprs):
warnings.warn("{0}: setting results name {1!r} on {2} expression "
"may only return a single token for an And alternative, "
"in future will return the full list of tokens".format(
"warn_multiple_tokens_in_named_alternation", name, type(self).__name__),
stacklevel=3)
return super(Or, self)._setResultsName(name, listAllMatches)
class MatchFirst(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
two expressions match, the first one listed is the one that will
match. May be constructed using the ``'|'`` operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs, savelist=False):
super(MatchFirst, self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self):
super(MatchFirst, self).streamline()
if __compat__.collect_all_And_tokens:
self.saveAsList = any(e.saveAsList for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse(instring, loc, doActions)
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring, len(instring), e.errmsg, self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
return self.append(other) # MatchFirst([self, other])
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
def _setResultsName(self, name, listAllMatches=False):
if (not __compat__.collect_all_And_tokens
and __diag__.warn_multiple_tokens_in_named_alternation):
if any(isinstance(e, And) for e in self.exprs):
warnings.warn("{0}: setting results name {1!r} on {2} expression "
"may only return a single token for an And alternative, "
"in future will return the full list of tokens".format(
"warn_multiple_tokens_in_named_alternation", name, type(self).__name__),
stacklevel=3)
return super(MatchFirst, self)._setResultsName(name, listAllMatches)
class Each(ParseExpression):
"""Requires all given :class:`ParseExpression` s to be found, but in
any order. Expressions may be separated by whitespace.
May be constructed using the ``'&'`` operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__(self, exprs, savelist=True):
super(Each, self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
self.saveAsList = True
def streamline(self):
super(Each, self).streamline()
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
if self.initExprGroups:
self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional))
opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)]
opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)]
self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)]
self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse(instring, tmpLoc)
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e), e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing)
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc, results = e._parse(instring, loc, doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class ParseElementEnhance(ParserElement):
"""Abstract subclass of :class:`ParserElement`, for combining and
post-processing parsed tokens.
"""
def __init__(self, expr, savelist=False):
super(ParseElementEnhance, self).__init__(savelist)
if isinstance(expr, basestring):
if issubclass(self._literalStringClass, Token):
expr = self._literalStringClass(expr)
else:
expr = self._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars(expr.whiteChars)
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl(self, instring, loc, doActions=True):
if self.expr is not None:
return self.expr._parse(instring, loc, doActions, callPreParse=False)
else:
raise ParseException("", loc, self.errmsg, self)
def leaveWhitespace(self):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super(ParseElementEnhance, self).ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
else:
super(ParseElementEnhance, self).ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
return self
def streamline(self):
super(ParseElementEnhance, self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion(self, parseElementList):
if self in parseElementList:
raise RecursiveGrammarException(parseElementList + [self])
subRecCheckList = parseElementList[:] + [self]
if self.expr is not None:
self.expr.checkRecursion(subRecCheckList)
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
try:
return super(ParseElementEnhance, self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr))
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression.
``FollowedBy`` does *not* advance the parsing position within
the input string, it only verifies that the specified parse
expression matches at the current position. ``FollowedBy``
always returns a null token list. If any results names are defined
in the lookahead expression, those *will* be returned for access by
name.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__(self, expr):
super(FollowedBy, self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
# by using self._expr.parse and deleting the contents of the returned ParseResults list
# we keep any named results that were defined in the FollowedBy expression
_, ret = self.expr._parse(instring, loc, doActions=doActions)
del ret[:]
return loc, ret
class PrecededBy(ParseElementEnhance):
"""Lookbehind matching of the given parse expression.
``PrecededBy`` does not advance the parsing position within the
input string, it only verifies that the specified parse expression
matches prior to the current position. ``PrecededBy`` always
returns a null token list, but if a results name is defined on the
given expression, it is returned.
Parameters:
- expr - expression that must match prior to the current parse
location
- retreat - (default= ``None``) - (int) maximum number of characters
to lookbehind prior to the current parse location
If the lookbehind expression is a string, Literal, Keyword, or
a Word or CharsNotIn with a specified exact or maximum length, then
the retreat parameter is not required. Otherwise, retreat must be
specified to give a maximum number of characters to look back from
the current parse position for a lookbehind match.
Example::
# VB-style variable names with type prefixes
int_var = PrecededBy("#") + pyparsing_common.identifier
str_var = PrecededBy("$") + pyparsing_common.identifier
"""
def __init__(self, expr, retreat=None):
super(PrecededBy, self).__init__(expr)
self.expr = self.expr().leaveWhitespace()
self.mayReturnEmpty = True
self.mayIndexError = False
self.exact = False
if isinstance(expr, str):
retreat = len(expr)
self.exact = True
elif isinstance(expr, (Literal, Keyword)):
retreat = expr.matchLen
self.exact = True
elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
retreat = expr.maxLen
self.exact = True
elif isinstance(expr, _PositionToken):
retreat = 0
self.exact = True
self.retreat = retreat
self.errmsg = "not preceded by " + str(expr)
self.skipWhitespace = False
def parseImpl(self, instring, loc=0, doActions=True):
if self.exact:
if loc < self.retreat:
raise ParseException(instring, loc, self.errmsg)
start = loc - self.retreat
_, ret = self.expr._parse(instring, start)
else:
# retreat specified a maximum lookbehind window, iterate
test_expr = self.expr + StringEnd()
instring_slice = instring[:loc]
last_expr = ParseException(instring, loc, self.errmsg)
for offset in range(1, min(loc, self.retreat + 1)):
try:
_, ret = test_expr._parse(instring_slice, loc - offset)
except ParseBaseException as pbe:
last_expr = pbe
else:
break
else:
raise last_expr
# return empty list of tokens, but preserve any defined results names
del ret[:]
return loc, ret
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression.
``NotAny`` does *not* advance the parsing position within the
input string, it only verifies that the specified parse expression
does *not* match at the current position. Also, ``NotAny`` does
*not* skip over leading whitespace. ``NotAny`` always returns
a null token list. May be constructed using the '~' operator.
Example::
AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
# take care not to mistake keywords for identifiers
ident = ~(AND | OR | NOT) + Word(alphas)
boolean_term = Optional(NOT) + ident
# very crude boolean expression - to support parenthesis groups and
# operation hierarchy, use infixNotation
boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term)
# integers that are followed by "." are actually floats
integer = Word(nums) + ~Char(".")
"""
def __init__(self, expr):
super(NotAny, self).__init__(expr)
# ~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, " + _ustr(self.expr)
def parseImpl(self, instring, loc, doActions=True):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__(self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = self._literalStringClass(ender)
self.stopOn(ender)
def stopOn(self, ender):
if isinstance(ender, basestring):
ender = self._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
return self
def parseImpl(self, instring, loc, doActions=True):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False)
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables(instring, loc)
else:
preloc = loc
loc, tmptokens = self_expr_parse(instring, preloc, doActions)
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException, IndexError):
pass
return loc, tokens
def _setResultsName(self, name, listAllMatches=False):
if __diag__.warn_ungrouped_named_tokens_in_collection:
for e in [self.expr] + getattr(self.expr, 'exprs', []):
if isinstance(e, ParserElement) and e.resultsName:
warnings.warn("{0}: setting results name {1!r} on {2} expression "
"collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName),
stacklevel=3)
return super(_MultipleMatch, self)._setResultsName(name, listAllMatches)
class OneOrMore(_MultipleMatch):
"""Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to :class:`OneOrMore`
"""
def __init__(self, expr, stopOn=None):
super(ZeroOrMore, self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException, IndexError):
return loc, []
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
__optionalNotMatched = _NullToken()
def __init__(self, expr, default=__optionalNotMatched):
super(Optional, self).__init__(expr, savelist=False)
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False)
except (ParseException, IndexError):
if self.defaultValue is not self.__optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([self.defaultValue])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [self.defaultValue]
else:
tokens = []
return loc, tokens
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched
expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default= ``False``) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default= ``None``) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default= ``None``) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__(self, other, include=False, ignore=None, failOn=None):
super(SkipTo, self).__init__(other)
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.saveAsList = False
if isinstance(failOn, basestring):
self.failOn = self._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for " + _ustr(self.expr)
def parseImpl(self, instring, loc, doActions=True):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring, loc, doActions, callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the ``Forward``
variable using the '<<' operator.
Note: take care when assigning to ``Forward`` not to overlook
precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the ``Forward``::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See :class:`ParseResults.pprint` for an example of a recursive
parser created using ``Forward``.
"""
def __init__(self, other=None):
super(Forward, self).__init__(other, savelist=False)
def __lshift__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars(self.expr.whiteChars)
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace(self):
self.skipWhitespace = False
return self
def streamline(self):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
if self not in validateTrace:
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is not None:
return self.strRepr
# Avoid infinite recursion by setting a temporary strRepr
self.strRepr = ": ..."
# Use the string representation of main expression.
retString = '...'
try:
if self.expr is not None:
retString = _ustr(self.expr)[:1000]
else:
retString = "None"
finally:
self.strRepr = self.__class__.__name__ + ": " + retString
return self.strRepr
def copy(self):
if self.expr is not None:
return super(Forward, self).copy()
else:
ret = Forward()
ret <<= self
return ret
def _setResultsName(self, name, listAllMatches=False):
if __diag__.warn_name_set_on_empty_Forward:
if self.expr is None:
warnings.warn("{0}: setting results name {0!r} on {1} expression "
"that has no contained expression".format("warn_name_set_on_empty_Forward",
name,
type(self).__name__),
stacklevel=3)
return super(Forward, self)._setResultsName(name, listAllMatches)
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of :class:`ParseExpression`, for converting parsed results.
"""
def __init__(self, expr, savelist=False):
super(TokenConverter, self).__init__(expr) # , savelist)
self.saveAsList = False
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the
input string; this can be disabled by specifying
``'adjacent=False'`` in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__(self, expr, joinString="", adjacent=True):
super(Combine, self).__init__(expr)
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore(self, other):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super(Combine, self).ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [retToks]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for
returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a, b, 100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a, b, 100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__(self, expr):
super(Group, self).__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
return [tokenlist]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also
as a dictionary. Each element can also be referenced using the first
token in the expression as its key. Useful for tabular report
scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at :class:`ParseResults` of accessing fields by results name.
"""
def __init__(self, expr):
super(Dict, self).__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
for i, tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = _ustr(tok[0]).strip()
if len(tok) == 1:
tokenlist[ikey] = _ParseResultsWithOffset("", i)
elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
dictvalue = tok.copy() # ParseResults(i)
del dictvalue[0]
if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self.resultsName:
return [tokenlist]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also :class:`delimitedList`.)
"""
def postParse(self, instring, loc, tokenlist):
return []
def suppress(self):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self, s, l, t):
if not self.called:
results = self.callable(s, l, t)
self.called = True
return results
raise ParseException(s, l, "")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions.
When the parse action is called, this decorator will print
``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
When the parse action completes, the decorator will print
``"<<"`` followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s, l, t = paArgs[-3:]
if len(paArgs) > 3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t))
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write("<<leaving %s (exception: %s)\n" % (thisFunc, exc))
raise
sys.stderr.write("<<leaving %s (ret: %r)\n" % (thisFunc, ret))
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList(expr, delim=",", combine=False):
"""Helper to define a delimited list of expressions - the delimiter
defaults to ','. By default, the list elements and delimiters can
have intervening whitespace, and comments, but this can be
overridden by passing ``combine=True`` in the constructor. If
``combine`` is set to ``True``, the matching tokens are
returned as a single token string, with the delimiters included;
otherwise, the matching tokens are returned as a list of tokens,
with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..."
if combine:
return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName)
else:
return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)
def countedArray(expr, intExpr=None):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the
leading count token is suppressed.
If ``intExpr`` is specified, it should be a pyparsing expression
that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s, l, t):
n = t[0]
arrayExpr << (n and Group(And([expr] * n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t: int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i, list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches a previous literal, will also match the leading
``"1:1"`` in ``"1:10"``. If this is not desired, use
:class:`matchPreviousExpr`. Do *not* use with packrat parsing
enabled.
"""
rep = Forward()
def copyTokenToRepeater(s, l, t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches by expressions, will *not* match the leading ``"1:1"``
in ``"1:10"``; the expressions are evaluated first, and then
compared, so ``"1"`` is compared with ``"10"``. Do *not* use
with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s, l, t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s, l, t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException('', 0, '')
rep.setParseAction(mustMatchTheseTokens, callDuringTry=True)
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
# ~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c, _bslash + c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return _ustr(s)
def oneOf(strs, caseless=False, useRegex=True, asKeyword=False):
"""Helper to quickly define a set of alternative Literals, and makes
sure to do longest-first testing when there is a conflict,
regardless of the input order, but returns
a :class:`MatchFirst` for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of
string literals
- caseless - (default= ``False``) - treat all literals as
caseless
- useRegex - (default= ``True``) - as an optimization, will
generate a Regex object; otherwise, will generate
a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
creating a :class:`Regex` raises an exception)
- asKeyword - (default=``False``) - enforce Keyword-style matching on the
generated expressions
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if isinstance(caseless, basestring):
warnings.warn("More than one string argument passed to oneOf, pass "
"choices as a list or space-delimited string", stacklevel=2)
if caseless:
isequal = (lambda a, b: a.upper() == b.upper())
masks = (lambda a, b: b.upper().startswith(a.upper()))
parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
else:
isequal = (lambda a, b: a == b)
masks = (lambda a, b: b.startswith(a))
parseElementClass = Keyword if asKeyword else Literal
symbols = []
if isinstance(strs, basestring):
symbols = strs.split()
elif isinstance(strs, Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
if not asKeyword:
# if not producing keywords, need to reorder to take care to avoid masking
# longer choices with shorter ones
i = 0
while i < len(symbols) - 1:
cur = symbols[i]
for j, other in enumerate(symbols[i + 1:]):
if isequal(other, cur):
del symbols[i + j + 1]
break
elif masks(cur, other):
del symbols[i + j + 1]
symbols.insert(i, other)
break
else:
i += 1
if not (caseless or asKeyword) and useRegex:
# ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols]))
try:
if len(symbols) == len("".join(symbols)):
return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(' | '.join(symbols))
else:
return Regex("|".join(re.escape(sym) for sym in symbols)).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf(key, value):
"""Helper to easily and clearly define a dictionary by specifying
the respective patterns for the key and value. Takes care of
defining the :class:`Dict`, :class:`ZeroOrMore`, and
:class:`Group` tokens in the proper order. The key pattern
can include delimiting markers or punctuation, as long as they are
suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the :class:`Dict` results
can include named token fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict(OneOrMore(Group(key + value)))
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b", "i"):
opener, closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s, loc, t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s, l, t: s[t._original_start: t._original_end]
else:
def extractText(s, l, t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions,
even if all but one are non-empty.
"""
return TokenConverter(expr).addParseAction(lambda t: t[0])
def locatedExpr(expr):
"""Helper to decorate a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains ``<TAB>`` characters, you
may want to call :class:`ParserElement.parseWithTabs`
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s, l, t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r'\0x'), 16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]"
def srange(s):
r"""Helper to easily define string ranges for use in Word
construction. Borrows syntax from regexp '[]' string range
definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string
is the expanded character set joined into a single string. The
values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as ``\-``
or ``\]``)
- an escaped hex character with a leading ``'\x'``
(``\x21``, which is a ``'!'`` character) (``\0x##``
is also supported for backwards compatibility)
- an escaped octal character with a leading ``'\0'``
(``\041``, which is a ``'!'`` character)
- a range of any of the above, separated by a dash (``'a-z'``,
etc.)
- any combination of the above (``'aeiouy'``,
``'a-zA-Z0-9_$'``, etc.)
"""
_expanded = lambda p: p if not isinstance(p, ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at
a specific column in the input text.
"""
def verifyCol(strg, locn, toks):
if col(locn, strg) != n:
raise ParseException(strg, locn, "matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return
a literal value. Especially useful when used with
:class:`transformString<ParserElement.transformString>` ().
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s, l, t: [replStr]
def removeQuotes(s, l, t):
"""Helper parse action for removing quotation marks from parsed
quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""Helper to define a parse action by mapping a function to all
elements of a ParseResults list. If any additional args are passed,
they are forwarded to the given function as additional arguments
after the token, as in
``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``,
which will convert the parsed data to an integer using base 16.
Example (compare the last to example in :class:`ParserElement.transformString`::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s, l, t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case.
Deprecated in favor of :class:`pyparsing_common.upcaseTokens`"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case.
Deprecated in favor of :class:`pyparsing_common.downcaseTokens`"""
def _makeTags(tagStr, xml,
suppress_LT=Suppress("<"),
suppress_GT=Suppress(">")):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr, basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas, alphanums + "_-:")
if xml:
tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes)
openTag = (suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
+ Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/')
+ suppress_GT)
else:
tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars=">")
openTag = (suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens)
+ Optional(Suppress("=") + tagAttrValue))))
+ Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/')
+ suppress_GT)
closeTag = Combine(_L("</") + tagStr + ">", adjacent=False)
openTag.setName("<%s>" % resname)
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
openTag.addParseAction(lambda t: t.__setitem__("start" + "".join(resname.replace(":", " ").title().split()), t.copy()))
closeTag = closeTag("end" + "".join(resname.replace(":", " ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
openTag.tag_body = SkipTo(closeTag())
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML,
given a tag name. Matches tags in either upper or lower case,
attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and
# closing tags as a 2-tuple
a, a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are
# also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> https://github.com/pyparsing/pyparsing/wiki
"""
return _makeTags(tagStr, False)
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML,
given a tag name. Matches tags only in the given upper/lower case.
Example: similar to :class:`makeHTMLTags`
"""
return _makeTags(tagStr, True)
def withAttribute(*args, **attrDict):
"""Helper to create a validating parse action to be used with start
tags created with :class:`makeXMLTags` or
:class:`makeHTMLTags`. Use ``withAttribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``.
Call ``withAttribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`withClass`.
To verify that the attribute exists, but without specifying a value,
pass ``withAttribute.ANY_VALUE`` as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k, v) for k, v in attrs]
def pa(s, l, tokens):
for attrName, attrValue in attrs:
if attrName not in tokens:
raise ParseException(s, l, "no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s, l, "attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""Simplified version of :class:`withAttribute` when
matching on a div class - made difficult because ``class`` is
a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr: classname})
opAssoc = SimpleNamespace()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary
or binary, left- or right-associative. Parse actions can also be
attached to operator expressions. The generated parser will also
recognize the use of parentheses to override operator precedences
(see example below).
Note: if you define a deep operator list, you may see performance
issues when using infixNotation. See
:class:`ParserElement.enablePackrat` for a mechanism to potentially
improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the
nested
- opList - list of tuples, one for each operator precedence level
in the expression grammar; each tuple is of the form ``(opExpr,
numTerms, rightLeftAssoc, parseAction)``, where:
- opExpr is the pyparsing expression for the operator; may also
be a string, which will be converted to a Literal; if numTerms
is 3, opExpr is a tuple of two expressions, for the two
operators separating the 3 terms
- numTerms is the number of terms for this operator (must be 1,
2, or 3)
- rightLeftAssoc is the indicator whether the operator is right
or left associative, using the pyparsing-defined constants
``opAssoc.RIGHT`` and ``opAssoc.LEFT``.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the parse action
tuple member may be omitted); if the parse action is passed
a tuple or list of functions, this is equivalent to calling
``setParseAction(*fn)``
(:class:`ParserElement.setParseAction`)
- lpar - expression for matching left-parentheses
(default= ``Suppress('(')``)
- rpar - expression for matching right-parentheses
(default= ``Suppress(')')``)
Example::
# simple example of four-function arithmetic with ints and
# variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
# captive version of FollowedBy that does not do parse actions or capture results names
class _FB(FollowedBy):
def parseImpl(self, instring, loc, doActions=True):
self.expr.tryParse(instring, loc)
return loc, []
ret = Forward()
lastExpr = baseExpr | (lpar + ret + rpar)
for i, operDef in enumerate(opList):
opExpr, arity, rightLeftAssoc, pa = (operDef + (None, ))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError(
"if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr))
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr))
else:
matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr))
elif arity == 3:
matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr)
+ Group(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr))
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr))
else:
matchExpr = _FB(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr))
elif arity == 3:
matchExpr = (_FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
+ Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr))
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.setParseAction(*pa)
else:
matchExpr.setParseAction(pa)
thisExpr <<= (matchExpr.setName(termName) | lastExpr)
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of :class:`infixNotation`, will be
dropped in a future release."""
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
| Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and
closing delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list
(default= ``"("``); can also be a pyparsing expression
- closer - closing character for a nested list
(default= ``")"``); can also be a pyparsing expression
- content - expression for items within the nested lists
(default= ``None``)
- ignoreExpr - expression for ignoring opening and closing
delimiters (default= :class:`quotedString`)
If an expression is not provided for the content argument, the
nested expression will capture all whitespace-delimited content
between delimiters as a list of separate values.
Use the ``ignoreExpr`` argument to define expressions that may
contain opening or closing characters that should not be treated as
opening or closing characters for nesting, such as quotedString or
a comment expression. Specify multiple expressions using an
:class:`Or` or :class:`MatchFirst`. The default is
:class:`quotedString`, but if no expressions are to be ignored, then
pass ``None`` for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR, RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener, basestring) and isinstance(closer, basestring):
if len(opener) == 1 and len(closer) == 1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr
+ CharsNotIn(opener
+ closer
+ ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).setParseAction(lambda t: t[0].strip()))
else:
content = (empty.copy() + CharsNotIn(opener
+ closer
+ ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t: t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr
+ ~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1))
).setParseAction(lambda t: t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1))
).setParseAction(lambda t: t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer))
else:
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
ret.setName('nested %s%s expression' % (opener, closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks,
such as those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single
grammar should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond
the current level; set to False for block of left-most
statements (default= ``True``)
A valid block must contain at least one ``blockStatement``.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group(funcDecl + func_body)
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << (funcDef | assignment | identifier)
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
backup_stack = indentStack[:]
def reset_stack():
indentStack[:] = backup_stack
def checkPeerIndent(s, l, t):
if l >= len(s): return
curCol = col(l, s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseException(s, l, "illegal nesting")
raise ParseException(s, l, "not a peer entry")
def checkSubIndent(s, l, t):
curCol = col(l, s)
if curCol > indentStack[-1]:
indentStack.append(curCol)
else:
raise ParseException(s, l, "not a subentry")
def checkUnindent(s, l, t):
if l >= len(s): return
curCol = col(l, s)
if not(indentStack and curCol in indentStack):
raise ParseException(s, l, "not an unindent")
if curCol < indentStack[-1]:
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group(Optional(NL)
+ INDENT
+ OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL))
+ UNDENT)
else:
smExpr = Group(Optional(NL)
+ OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL))
+ UNDENT)
smExpr.setFailAction(lambda a, b, c, d: reset_stack())
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums + "_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), '><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form ``/* ... */``"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form ``<!-- ... -->``"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form ``// ... (to end of line)``"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment")
"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"
javaStyleComment = cppStyleComment
"Same as :class:`cppStyleComment`"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form ``# ... (to end of line)``"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',')
+ Optional(Word(" \t")
+ ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem")
commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or
quoted strings, separated by commas.
This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.
"""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""Here are some common low-level expressions that may be useful in
jump-starting parser development:
- numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
:class:`scientific notation<sci_real>`)
- common :class:`programming identifiers<identifier>`
- network addresses (:class:`MAC<mac_address>`,
:class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
- ISO8601 :class:`dates<iso8601_date>` and
:class:`datetime<iso8601_datetime>`
- :class:`UUID<uuid>`
- :class:`comma-separated list<comma_separated_list>`
Parse actions:
- :class:`convertToInteger`
- :class:`convertToFloat`
- :class:`convertToDate`
- :class:`convertToDatetime`
- :class:`stripHTMLTags`
- :class:`upcaseTokens`
- :class:`downcaseTokens`
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0]/t[-1])
mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r'[+-]?(:?\d+\.\d*|\.\d+)').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = Regex(r'[+-]?(:?\d+(:?[eE][+-]?\d+)|(:?\d+\.\d*|\.\d+)(:?[eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional
scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas + '_', alphanums + '_').setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
"IPv4 address (``0.0.0.0 - 255.255.255.255``)"
_ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (':' + _ipv6_part) * 7).setName("full IPv6 address")
_short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6))
+ "::"
+ Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6))
).setName("short IPv6 address")
_short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""Helper to create a parse action for converting parsed
datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
"ISO8601 date (``yyyy-mm-dd``)"
iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
td, td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body)
Prints::
More info at the pyparsing wiki page
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = Combine(OneOrMore(~Literal(",")
+ ~LineEnd()
+ Word(printables, excludeChars=',')
+ Optional(White(" \t")))).streamline().setName("commaItem")
comma_separated_list = delimitedList(Optional(quotedString.copy()
| _commasepitem, default='')
).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
class _lazyclassproperty(object):
def __init__(self, fn):
self.fn = fn
self.__doc__ = fn.__doc__
self.__name__ = fn.__name__
def __get__(self, obj, cls):
if cls is None:
cls = type(obj)
if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', [])
for superclass in cls.__mro__[1:]):
cls._intern = {}
attrname = self.fn.__name__
if attrname not in cls._intern:
cls._intern[attrname] = self.fn(cls)
return cls._intern[attrname]
class unicode_set(object):
"""
A set of Unicode characters, for language-specific strings for
``alphas``, ``nums``, ``alphanums``, and ``printables``.
A unicode_set is defined by a list of ranges in the Unicode character
set, in a class attribute ``_ranges``, such as::
_ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]
A unicode set can also be defined using multiple inheritance of other unicode sets::
class CJK(Chinese, Japanese, Korean):
pass
"""
_ranges = []
@classmethod
def _get_chars_for_ranges(cls):
ret = []
for cc in cls.__mro__:
if cc is unicode_set:
break
for rr in cc._ranges:
ret.extend(range(rr[0], rr[-1] + 1))
return [unichr(c) for c in sorted(set(ret))]
@_lazyclassproperty
def printables(cls):
"all non-whitespace characters in this range"
return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges()))
@_lazyclassproperty
def alphas(cls):
"all alphabetic characters in this range"
return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges()))
@_lazyclassproperty
def nums(cls):
"all numeric digit characters in this range"
return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges()))
@_lazyclassproperty
def alphanums(cls):
"all alphanumeric characters in this range"
return cls.alphas + cls.nums
class pyparsing_unicode(unicode_set):
"""
A namespace class for defining common language unicode_sets.
"""
_ranges = [(32, sys.maxunicode)]
class Latin1(unicode_set):
"Unicode set for Latin-1 Unicode Character Range"
_ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]
class LatinA(unicode_set):
"Unicode set for Latin-A Unicode Character Range"
_ranges = [(0x0100, 0x017f),]
class LatinB(unicode_set):
"Unicode set for Latin-B Unicode Character Range"
_ranges = [(0x0180, 0x024f),]
class Greek(unicode_set):
"Unicode set for Greek Unicode Character Ranges"
_ranges = [
(0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d),
(0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4),
(0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe),
]
class Cyrillic(unicode_set):
"Unicode set for Cyrillic Unicode Character Range"
_ranges = [(0x0400, 0x04ff)]
class Chinese(unicode_set):
"Unicode set for Chinese Unicode Character Range"
_ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f),]
class Japanese(unicode_set):
"Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
_ranges = []
class Kanji(unicode_set):
"Unicode set for Kanji Unicode Character Range"
_ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f),]
class Hiragana(unicode_set):
"Unicode set for Hiragana Unicode Character Range"
_ranges = [(0x3040, 0x309f),]
class Katakana(unicode_set):
"Unicode set for Katakana Unicode Character Range"
_ranges = [(0x30a0, 0x30ff),]
class Korean(unicode_set):
"Unicode set for Korean Unicode Character Range"
_ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f),]
class CJK(Chinese, Japanese, Korean):
"Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
pass
class Thai(unicode_set):
"Unicode set for Thai Unicode Character Range"
_ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b),]
class Arabic(unicode_set):
"Unicode set for Arabic Unicode Character Range"
_ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f),]
class Hebrew(unicode_set):
"Unicode set for Hebrew Unicode Character Range"
_ranges = [(0x0590, 0x05ff),]
class Devanagari(unicode_set):
"Unicode set for Devanagari Unicode Character Range"
_ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)]
pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges
+ pyparsing_unicode.Japanese.Hiragana._ranges
+ pyparsing_unicode.Japanese.Katakana._ranges)
# define ranges in language character sets
if PY_3:
setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic)
setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese)
setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic)
setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek)
setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew)
setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese)
setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji)
setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana)
setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana)
setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean)
setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai)
setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari)
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.fnumber.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
pyparsing_common.hex_integer.runTests("""
100
FF
""")
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests("""
12345678-1234-5678-1234-567812345678
""")
| rouge8/pip | src/pip/_vendor/pyparsing.py | Python | mit | 263,468 | [
"VisIt"
] | b89b85bff5282e628e1e5094f8484888a8d8a984b63828fe1a6afeb11902ce65 |
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
from StringIO import StringIO
import cPickle
from collections import defaultdict
import logging
import os
import time
log = logging.getLogger("main")
from ete2.tools.phylobuild_lib.master_task import CogSelectorTask
from ete2.tools.phylobuild_lib.errors import DataError
from ete2.tools.phylobuild_lib.utils import (GLOBALS, print_as_table, generate_node_ids,
encode_seqname, md5, pjoin, _mean, _median, _max, _min, _std)
from ete2.tools.phylobuild_lib import db
__all__ = ["BrhCogCreator"]
quote = lambda _x: '"%s"' %_x
class BrhCogCreator(CogSelectorTask):
def __init__(self, target_sp, out_sp, seqtype, conf, confname):
self.seed = conf[confname]["_seed"]
self.missing_factor = float(conf[confname]["_species_missing_factor"])
node_id, clade_id = generate_node_ids(target_sp, out_sp)
# Initialize task
CogSelectorTask.__init__(self, node_id, "cog_selector",
"Cog-Selector", None, conf[confname])
# taskid does not depend on jobs, so I set it manually
self.cladeid = clade_id
self.seqtype = seqtype
self.targets = target_sp
self.outgroups = out_sp
self.init()
self.size = len(target_sp | out_sp)
self.cog_analysis = None
self.cogs = None
def finish(self):
tm_start = time.ctime()
all_species = self.targets | self.outgroups
cogs, cog_analysis = brh_cogs2(db, all_species,
missing_factor=self.missing_factor,
seed_sp=self.seed)
self.raw_cogs = cogs
self.cog_analysis = cog_analysis
self.cogs = []
for co in cogs:
# self.cogs.append(map(encode_seqname, co))
encoded_names = db.translate_names(co)
if len(encoded_names) != len(co):
print set(co) - set(encoded_names.keys())
raise DataError("Some sequence ids could not be translated")
self.cogs.append(encoded_names.values())
# Sort Cogs according to the md5 hash of its content. Random
# sorting but kept among runs
map(lambda x: x.sort(), self.cogs)
self.cogs.sort(lambda x,y: cmp(md5(','.join(x)), md5(','.join(y))))
log.log(28, "%s COGs detected" %len(self.cogs))
tm_end = time.ctime()
#open(pjoin(self.taskdir, "__time__"), "w").write(
# '\n'.join([tm_start, tm_end]))
CogSelectorTask.store_data(self, self.cogs, self.cog_analysis)
def brh_cogs(DB, species, missing_factor=0.0, seed_sp=None, min_score=0):
"""It scans all precalculate BRH relationships among the species
passed as an argument, and detects Clusters of Orthologs
according to several criteria:
min_score: the min coverage/overalp value required for a
blast to be a reliable hit.
missing_factor: the min percentage of species in which a
given seq must have orthologs.
"""
log.log(26, "Searching BRH orthologs")
species = set(map(str, species))
min_species = len(species) - round(missing_factor * len(species))
if seed_sp == "auto":
# seed2size = get_sorted_seeds(seed_sp, species, species, min_species, DB)
# sort_seeds = sorted([(len(size), sp) for sp, size in seed2size.iteritems()])
# sp_to_test = [sort_seeds[-1][1]]
sp_to_test = list(species)
elif seed_sp == "largest":
cmd = """SELECT taxid, size FROM species"""
db.seqcursor.execute(cmd)
sp2size = {}
for tax, counter in db.seqcursor.fetchall():
if tax in species:
sp2size[tax] = counter
sorted_sp = sorted(sp2size.items(), lambda x,y: cmp(x[1],y[1]))
log.log(24, sorted_sp[:6])
largest_sp = sorted_sp[-1][0]
sp_to_test = [largest_sp]
log.log(28, "Using %s as search seed. Proteome size=%s genes" %\
(largest_sp, sp2size[largest_sp]))
else:
sp_to_test = [str(seed_sp)]
# The following loop tests each possible seed if none is
# specified.
log.log(28, "Detecting Clusters of Orthologs groups (COGs)")
log.log(28, "Min number of species per COG: %d" %min_species)
cogs_selection = []
for j, seed in enumerate(sp_to_test):
log.log(26,"Testing new seed species:%s (%d/%d)", seed, j+1, len(sp_to_test))
species_side1 = ','.join(map(quote, [s for s in species if str(s)>str(seed)]))
species_side2 = ','.join(map(quote, [s for s in species if str(s)<str(seed)]))
pairs1 = []
pairs2 = []
# Select all ids with matches in the target species, and
# return the total number of species covered by each of
# such ids.
if species_side1 != "":
cmd = """SELECT seqid1, taxid1, seqid2, taxid2 from ortho_pair WHERE
taxid1="%s" AND taxid2 IN (%s) """ %\
(seed, species_side1)
DB.orthocursor.execute(cmd)
pairs1 = DB.orthocursor.fetchall()
if species_side2 != "":
cmd = """SELECT seqid2, taxid2, seqid1, taxid1 from ortho_pair WHERE
taxid1 IN (%s) AND taxid2 = "%s" """ %\
(species_side2, seed)
#taxid2="%s" AND taxid1 IN (%s) AND score >= %s""" %\
#(seed, species_side2, min_score)
DB.orthocursor.execute(cmd)
pairs2 = DB.orthocursor.fetchall()
cog_candidates = defaultdict(set)
for seq1, sp1, seq2, sp2 in pairs1 + pairs2:
s1 = (sp1, seq1)
s2 = (sp2, seq2)
cog_candidates[(sp1, seq1)].update([s1, s2])
all_cogs = [cand for cand in cog_candidates.values() if
len(cand) >= min_species]
cog_sizes = [len(cog) for cog in all_cogs]
cog_spsizes = [len(set([e[0] for e in cog])) for cog in all_cogs]
if [1 for i in xrange(len(cog_sizes)) if cog_sizes[i] != cog_spsizes[i]]:
# for i in xrange(len(cog_sizes)):
# if cog_sizes[i] != cog_spsizes[i]:
# print cog_sizes[i], cog_spsizes[i]
# raw_input()
raise ValueError("Inconsistent COG found")
if cog_sizes:
cogs_selection.append([seed, all_cogs])
log.log(26, "Found %d COGs" % len(all_cogs))
def _sort_cogs(cogs1, cogs2):
cogs1 = cogs1[1] # discard seed info
cogs2 = cogs2[1] # discard seed info
cog_sizes1 = [len(cog) for cog in cogs1]
cog_sizes2 = [len(cog) for cog in cogs2]
mx1, mn1, avg1 = _max(cog_sizes1), _min(cog_sizes1), round(_mean(cog_sizes1))
mx2, mn2, avg2 = _max(cog_sizes2), _min(cog_sizes2), round(_mean(cog_sizes2))
# we want to maximize all these values in the following order:
for i, j in ((mx1, mx2), (avg1, avg2), (len(cogs1), len(cogs2))):
v = -1 * cmp(i, j)
if v != 0:
break
return v
log.log(26, "Finding best COG selection...")
cogs_selection.sort(_sort_cogs)
lines = []
for seed, all_cogs in cogs_selection:
cog_sizes = [len(cog) for cog in all_cogs]
mx, mn, avg = max(cog_sizes), min(cog_sizes), round(_mean(cog_sizes))
lines.append([seed, mx, mn, avg, len(all_cogs)])
analysis_txt = StringIO()
print_as_table(lines[:25], stdout=analysis_txt,
header=["Seed","largest COG", "smallest COGs", "avg COG size", "total COGs"])
log.log(28, "Analysis details:\n"+analysis_txt.getvalue())
best_seed, best_cogs = cogs_selection[0]
cog_sizes = [len(cog) for cog in best_cogs]
# Not necessary since they will be sorted differently later on
#best_cogs.sort(lambda x,y: cmp(len(x), len(y)), reverse=True)
if max(cog_sizes) < len(species):
raise ValueError("Current COG selection parameters do not permit to cover all species")
recoded_cogs = []
for cog in best_cogs:
named_cog = map(lambda x: "%s%s%s" %(x[0], GLOBALS["spname_delimiter"],x[1]), cog)
recoded_cogs.append(named_cog)
return recoded_cogs, analysis_txt.getvalue()
def brh_cogs2(DB, species, missing_factor=0.0, seed_sp=None, min_score=0):
"""It scans all precalculate BRH relationships among the species
passed as an argument, and detects Clusters of Orthologs
according to several criteria:
min_score: the min coverage/overalp value required for a
blast to be a reliable hit.
missing_factor: the min percentage of species in which a
given seq must have orthologs.
"""
def _sort_cogs(cogs1, cogs2):
seed1, mx1, avg1, ncogs1 = cogs1
seed2, mx2, avg2, ncogs2 = cogs2
for i, j in ((mx1, mx2), (avg1, avg2), (ncogs1, ncogs2)):
v = -1 * cmp(i, j)
if v != 0:
break
return v
log.log(26, "Searching BRH orthologs")
species = set(map(str, species))
min_species = len(species) - round(missing_factor * len(species))
if seed_sp == "auto":
sp_to_test = list(species)
elif seed_sp == "largest":
cmd = """SELECT taxid, size FROM species"""
db.seqcursor.execute(cmd)
sp2size = {}
for tax, counter in db.seqcursor.fetchall():
if tax in species:
sp2size[tax] = counter
sorted_sp = sorted(sp2size.items(), lambda x,y: cmp(x[1],y[1]))
log.log(24, sorted_sp[:6])
largest_sp = sorted_sp[-1][0]
sp_to_test = [largest_sp]
log.log(28, "Using %s as search seed. Proteome size=%s genes" %\
(largest_sp, sp2size[largest_sp]))
else:
sp_to_test = [str(seed_sp)]
analysis_txt = StringIO()
if sp_to_test:
log.log(26, "Finding best COG selection...")
seed2size = get_sorted_seeds(seed_sp, species, sp_to_test, min_species, DB)
size_analysis = []
for seedname, content in seed2size.iteritems():
cog_sizes = [size for seq, size in content]
mx, avg = _max(cog_sizes), round(_mean(cog_sizes))
size_analysis.append([seedname, mx, avg, len(content)])
size_analysis.sort(_sort_cogs)
#print '\n'.join(map(str, size_analysis))
seed = size_analysis[0][0]
print_as_table(size_analysis[:25], stdout=analysis_txt,
header=["Seed","largest COG", "avg COG size", "total COGs"])
if size_analysis[0][1] < len(species)-1:
print size_analysis[0][1]
raise ValueError("Current COG selection parameters do not permit to cover all species")
log.log(28, analysis_txt.getvalue())
# The following loop tests each possible seed if none is
# specified.
log.log(28, "Computing Clusters of Orthologs groups (COGs)")
log.log(28, "Min number of species per COG: %d" %min_species)
cogs_selection = []
log.log(26,"Using seed species:%s", seed)
species_side1 = ','.join(map(quote, [s for s in species if str(s)>str(seed)]))
species_side2 = ','.join(map(quote, [s for s in species if str(s)<str(seed)]))
pairs1 = []
pairs2 = []
# Select all ids with matches in the target species, and
# return the total number of species covered by each of
# such ids.
if species_side1 != "":
cmd = """SELECT seqid1, taxid1, seqid2, taxid2 from ortho_pair WHERE
taxid1="%s" AND taxid2 IN (%s) """ % (seed, species_side1)
DB.orthocursor.execute(cmd)
pairs1 = DB.orthocursor.fetchall()
if species_side2 != "":
cmd = """SELECT seqid2, taxid2, seqid1, taxid1 from ortho_pair WHERE
taxid1 IN (%s) AND taxid2 = "%s" """ % (species_side2, seed)
DB.orthocursor.execute(cmd)
pairs2 = DB.orthocursor.fetchall()
cog_candidates = defaultdict(set)
for seq1, sp1, seq2, sp2 in pairs1 + pairs2:
s1 = (sp1, seq1)
s2 = (sp2, seq2)
cog_candidates[(sp1, seq1)].update([s1, s2])
all_cogs = [cand for cand in cog_candidates.values() if
len(cand) >= min_species]
# CHECK CONSISTENCY
seqs = set()
for cand in all_cogs:
seqs.update([b for a,b in cand if a == seed])
pre_selected_seqs = set([v[0] for v in seed2size[seed]])
if len(seqs & pre_selected_seqs) != len(set(seed2size[seed])) or\
len(seqs & pre_selected_seqs) != len(seqs):
print "old method seqs", len(seqs), "new seqs", len(set(seed2size[seed])), "Common", len(seqs & pre_selected_seqs)
raise ValueError("ooops")
cog_sizes = [len(cog) for cog in all_cogs]
cog_spsizes = [len(set([e[0] for e in cog])) for cog in all_cogs]
if [1 for i in xrange(len(cog_sizes)) if cog_sizes[i] != cog_spsizes[i]]:
raise ValueError("Inconsistent COG found")
if cog_sizes:
cogs_selection.append([seed, all_cogs])
log.log(26, "Found %d COGs" % len(all_cogs))
recoded_cogs = []
for cog in all_cogs:
named_cog = map(lambda x: "%s%s%s" %(x[0], GLOBALS["spname_delimiter"],x[1]), cog)
recoded_cogs.append(named_cog)
return recoded_cogs, analysis_txt.getvalue()
def get_sorted_seeds(seed, species, sp_to_test, min_species, DB):
seed2count = {}
species = set(species)
for j, seed in enumerate(sp_to_test):
log.log(26,"Testing SIZE of new seed species:%s (%d/%d)", seed, j+1, len(sp_to_test))
pairs1 = []
pairs2 = []
cmd = """SELECT seqid1, GROUP_CONCAT(taxid2) FROM ortho_pair WHERE
taxid1="%s" GROUP BY (seqid1)""" % (seed)
DB.orthocursor.execute(cmd)
pairs1= DB.orthocursor.fetchall()
cmd = """SELECT seqid2, GROUP_CONCAT(taxid1) FROM ortho_pair WHERE
taxid2 = "%s" GROUP BY seqid2""" % (seed)
DB.orthocursor.execute(cmd)
pairs2 = DB.orthocursor.fetchall()
# Compute number of species for each seqid representing a cog
counter = defaultdict(set)
all_pairs = pairs1 + pairs2
for seqid, targets in all_pairs:
counter[seqid].update(set(targets.split(",")) & species)
# Filter out too small COGs
valid_seqs = [(k, len(v)) for k, v in counter.iteritems() if
len(v)>= min_species-1]
seed2count[seed] = valid_seqs
log.log(28, "Seed species:%s COGs:%s" %(seed, len(seed2count[seed])))
return seed2count
def get_best_selection(cogs_selections, species):
ALL_SPECIES = set(species)
def _compare_cog_selection(cs1, cs2):
seed_1, missing_sp_allowed_1, candidates_1, sp2hits_1 = cs1
seed_2, missing_sp_allowed_2, candidates_2, sp2hits_2 = cs2
score_1, min_cov_1, max_cov_1, median_cov_1, cov_std_1, cog_cov_1 = get_cog_score(candidates_1, sp2hits_1, median_cogs, ALL_SPECIES-set([seed_1]))
score_2, min_cov_2, max_cov_2, median_cov_2, cov_std_2, cog_cov_2 = get_cog_score(candidates_2, sp2hits_2, median_cogs, ALL_SPECIES-set([seed_2]))
sp_represented_1 = len(sp2hits_1)
sp_represented_2 = len(sp2hits_1)
cmp_rpr = cmp(sp_represented_1, sp_represented_2)
if cmp_rpr == 1:
return 1
elif cmp_rpr == -1:
return -1
else:
cmp_score = cmp(score_1, score_2)
if cmp_score == 1:
return 1
elif cmp_score == -1:
return -1
else:
cmp_mincov = cmp(min_cov_1, min_cov_2)
if cmp_mincov == 1:
return 1
elif cmp_mincov == -1:
return -1
else:
cmp_maxcov = cmp(max_cov_1, max_cov_2)
if cmp_maxcov == 1:
return 1
elif cmp_maxcov == -1:
return -1
else:
cmp_cand = cmp(len(candidates_1), len(candidates_2))
if cmp_cand == 1:
return 1
elif cmp_cand == -1:
return -1
else:
return 0
min_score = 0.5
max_cogs = _max([len(data[2]) for data in cogs_selections])
median_cogs = _median([len(data[2]) for data in cogs_selections])
cogs_selections.sort(_compare_cog_selection)
cogs_selections.reverse()
header = ['seed',
'missing sp allowed',
'spcs covered',
'#COGs',
'mean sp coverage)',
'#COGs for worst sp.',
'#COGs for best sp.',
'sp. in COGS(avg)',
'SCORE' ]
print_header = True
best_cog_selection = None
cog_analysis = StringIO()
for i, cogs in enumerate(cogs_selections):
seed, missing_sp_allowed, candidates, sp2hits = cogs
sp_percent_coverages = [(100*sp2hits.get(sp,0))/float(len(candidates)) for sp in species]
sp_coverages = [sp2hits.get(sp, 0) for sp in species]
score, min_cov, max_cov, median_cov, cov_std, cog_cov = get_cog_score(candidates, sp2hits, median_cogs, ALL_SPECIES-set([seed]))
if best_cog_selection is None:
best_cog_selection = i
flag = "*"
else:
flag = " "
data = (candidates,
flag+"%10s" %seed, \
missing_sp_allowed, \
"%d (%0.1f%%)" %(len(set(sp2hits.keys()))+1, 100*float(len(ALL_SPECIES))/(len(sp2hits)+1)) , \
len(candidates), \
"%0.1f%% +- %0.1f" %(_mean(sp_percent_coverages), _std(sp_percent_coverages)), \
"% 3d (%0.1f%%)" %(min(sp_coverages),100*min(sp_coverages)/float(len(candidates))), \
"% 3d (%0.1f%%)" %(max(sp_coverages),100*max(sp_coverages)/float(len(candidates))), \
cog_cov,
score
)
if print_header:
print_as_table([data[1:]], header=header, print_header=True, stdout=cog_analysis)
print_header = False
else:
print_as_table([data[1:]], header=header, print_header=False, stdout=cog_analysis)
#raw_input("Press")
print cog_analysis.getvalue()
#best_cog_selection = int(raw_input("choose:"))
return cogs_selections[best_cog_selection], cog_analysis
def _analyze_cog_selection(all_cogs):
print "total cogs:", len(all_cogs)
sp2cogcount = {}
size2cogs = {}
for cog in all_cogs:
for seq in cog:
sp = seq.split(GLOBALS["spname_delimiter"])[0]
sp2cogcount[sp] = sp2cogcount.setdefault(sp, 0)+1
size2cogs.setdefault(len(cog), []).append(cog)
sorted_spcs = sorted(sp2cogcount.items(), lambda x,y: cmp(x[1], y[1]))
# Take only first 20 species
coverages = [s[1] for s in sorted_spcs][:20]
spnames = [str(s[0])+ s[0] for s in sorted_spcs][:20]
pylab.subplot(1,2,1)
pylab.bar(range(len(coverages)), coverages)
labels = pylab.xticks(pylab.arange(len(spnames)), spnames)
pylab.subplots_adjust(bottom=0.35)
pylab.title(str(len(all_cogs))+" COGs")
pylab.setp(labels[1], 'rotation', 90,fontsize=10, horizontalalignment = 'center')
pylab.subplot(1,2,2)
pylab.title("Best COG contains "+str(max(size2cogs.values()))+" species" )
pylab.bar(range(1,216), [len(size2cogs.get(s, [])) for s in range(1,216)])
pylab.show()
def cog_info(candidates, sp2hits):
sp_coverages = [hits/float(len(candidates)) for hits in sp2hits.values()]
species_covered = len(set(sp2hits.keys()))+1
min_cov = _min(sp_coverages)
max_cov = _min(sp_coverages)
median_cov = _median(sp_coverages)
return min_cov, max_cov, median_cov
def get_cog_score(candidates, sp2hits, max_cogs, all_species):
cog_cov = _mean([len(cogs) for cogs in candidates])/float(len(sp2hits)+1)
cog_mean_cov = _mean([len(cogs)/float(len(sp2hits)) for cogs in candidates]) # numero medio de especies en cada cog
cog_min_sp = _min([len(cogs) for cogs in candidates])
sp_coverages = [sp2hits.get(sp, 0)/float(len(candidates)) for sp in all_species]
species_covered = len(set(sp2hits.keys()))+1
nfactor = len(candidates)/float(max_cogs) # Numero de cogs
min_cov = _min(sp_coverages) # el coverage de la peor especie
max_cov = _min(sp_coverages)
median_cov = _median(sp_coverages)
cov_std = _std(sp_coverages)
score = _min([nfactor, cog_mean_cov, min_cov])
return score, min_cov, max_cov, median_cov, cov_std, cog_cov
| sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/ete2/tools/phylobuild_lib/task/cog_creator.py | Python | mit | 22,294 | [
"BLAST"
] | 31545cc3bd20244841b34955b9ad47e4a117056965fb06038a68be7910e9b81e |
#! /usr/bin/env python
import setuptools # necessary for install_requires
from setuptools import find_packages
from distutils.core import Command
from numpy.distutils.core import Extension
from numpy.distutils.command.build_clib import build_clib
from numpy.distutils.command.build_ext import build_ext
from numpy.distutils.command.sdist import sdist
from numpy.distutils.command.build import build
from distutils.command.clean import clean
import subprocess
from glob import glob
import os
import numpy
import shutil
descr = """pyRSD
pyRSD is a Python package for computing the theoretical predictions of the
redshift-space power spectrum of galaxies. The package also includes
functionality for fitting data measurements and finding the optimal model
parameters, using both MCMC and nonlinear optimization techniques.
"""
DISTNAME = 'pyRSD'
DESCRIPTION = 'Accurate predictions for the clustering of galaxies in redshift-space in Python'
LONG_DESCRIPTION = descr
MAINTAINER = 'Nick Hand'
MAINTAINER_EMAIL = 'nicholas.adam.hand@gmail.com'
# base directory of package
package_basedir = os.path.abspath(os.path.dirname(__file__))
# determine if swig will need to be called on GCL extension
swig_needed = not all(os.path.isfile(f) for f in ['pyRSD/gcl.py', 'pyRSD/gcl_wrap.cpp'])
# the CLASS version to install
CLASS_VERSION = "2.6.1"
def check_swig_version():
"""
Check the version of swig, >= 3.0 is required
Notes
-----
* swig is only needed for developers installing from the source directory,
with ``python setup.py install``
* the swig-generated files are included by default in the pypi distribution,
so the swig dependency is not needed
"""
import subprocess, re
try:
output = subprocess.check_output(["swig", "-version"])
except OSError:
raise ValueError(("`swig` not found on PATH -- either install `swig` or use "
"``conda install -c nickhan pyrsd`` (recommended)"))
try:
version = re.findall("SWIG Version [0-9].[0-9].[0-9]", output)[0].split()[-1]
except:
return
# need >= 3.0
if version < "3.0":
raise ValueError(("the version of `swig` on PATH must greater or equal to 3.0; "
"recommended installation without swig is ``conda install -c nickhan pyrsd``"))
def build_CLASS(prefix):
"""
Function to download CLASS from github and and build the library
"""
# latest class version and download link
args = (package_basedir, CLASS_VERSION, prefix, "/opt/class/willfail")
command = 'sh %s/depends/install_class.sh %s %s %s' %args
ret = os.system(command)
if ret != 0:
raise ValueError("could not build CLASS v%s" %CLASS_VERSION)
def find_version(path):
import re
# path shall be a plain ascii text file.
s = open(path, 'rt').read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
s, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Version not found")
class build_external_clib(build_clib):
"""
Custom command to build CLASS first, and then GCL library
"""
def finalize_options(self):
build_clib.finalize_options(self)
# create the CLASS build directory and save the include path
self.class_build_dir = self.build_temp
self.include_dirs.insert(0, os.path.join(self.class_build_dir, 'include'))
def build_libraries(self, libraries):
# build CLASS first
build_CLASS(self.class_build_dir)
# update the link objects with CLASS library
link_objects = ['libclass.a']
link_objects = list(glob(os.path.join(self.class_build_dir, '*', 'libclass.a')))
self.compiler.set_link_objects(link_objects)
self.compiler.library_dirs.insert(0, os.path.join(self.class_build_dir, 'lib'))
for (library, build_info) in libraries:
# check swig version
if library == "gcl" and swig_needed:
check_swig_version()
# update include dirs
self.include_dirs += build_info.get('include_dirs', [])
build_clib.build_libraries(self, libraries)
class custom_build_ext(build_ext):
"""
Custom extension building to grab include directories
from the ``build_clib`` command
"""
def finalize_options(self):
build_ext.finalize_options(self)
self.include_dirs.append(numpy.get_include())
def run(self):
if self.distribution.has_c_libraries():
self.run_command('build_clib')
build_clib = self.get_finalized_command('build_clib')
self.include_dirs += build_clib.include_dirs
self.library_dirs += build_clib.compiler.library_dirs
# copy data files from temp to pyRSD package directory
shutil.rmtree(os.path.join(self.build_lib, 'pyRSD', 'data', 'class'), ignore_errors=True)
shutil.copytree(os.path.join(self.build_temp, 'data'), os.path.join(self.build_lib, 'pyRSD', 'data', 'class'))
build_ext.run(self)
class custom_sdist(sdist):
def run(self):
from six.moves.urllib import request
# download CLASS
tarball_link = "https://github.com/lesgourg/class_public/archive/v%s.tar.gz" %CLASS_VERSION
tarball_local = os.path.join('depends', 'class-v%s.tar.gz' %CLASS_VERSION)
request.urlretrieve(tarball_link, tarball_local)
# run the default
sdist.run(self)
class custom_clean(clean):
def run(self):
# run the built-in clean
clean.run(self)
# remove the CLASS tmp directories
os.system("rm -rf depends/tmp*")
os.system("rm -f pyRSD/*.so*")
# remove build directory
if os.path.exists('build'):
shutil.rmtree('build')
def libgcl_config():
# c++ GCL sources and fortran FFTLog sources
gcl_sources = list(glob("pyRSD/_gcl/cpp/*cpp"))
# GCL library extension
gcl_info = {}
gcl_info['sources'] = gcl_sources
gcl_info['include_dirs'] = ['pyRSD/_gcl/include', '/usr/local/include']
gcl_info['language'] = 'c++'
gcl_info['extra_compiler_args'] = ["-O2", '-std=c++11']
return ('gcl', gcl_info)
def libfftlog_config():
info = {}
info['sources'] = list(glob("pyRSD/_gcl/extern/fftlog/*f"))
return ('fftlog', info)
def libemu_config():
info = {}
# determine gsl path
try:
gsl_prefix = subprocess.check_output('gsl-config --prefix', shell=True).decode('utf-8').strip()
except:
raise ValueError("GSL is not installed!")
info['sources'] = list(glob("pyRSD/_gcl/extern/FrankenEmu/src/*.c"))
info['include_dirs'] = ['pyRSD/_gcl/extern/FrankenEmu/include', os.path.join(gsl_prefix, 'include')]
info['library_dirs'] = [os.path.join(gsl_prefix, 'lib')]
info['extra_compiler_args'] = ["-O2", "-Wno-missing-braces"]
return ('emu', info)
def gcl_extension_config():
# the configuration for GCL python extension
config = {}
config['name'] = 'pyRSD._gcl'
config['extra_link_args'] = ['-g', '-fPIC']
config['extra_compile_args'] = []
config['libraries'] = ['gcl', 'fftlog', 'emu', 'class', 'gsl', 'gslcblas', 'gfortran']
# determine if swig needs to be called
if not swig_needed:
config['sources'] = ['pyRSD/gcl_wrap.cpp']
else:
config['sources'] = ['pyRSD/gcl.i']
config['depends'] = ['pyRSD/_gcl/python/*.i']
config['swig_opts'] = ['-c++']
return config
# the dependencies
with open('requirements.txt', 'r') as fh:
dependencies = [l.strip() for l in fh]
# extra dependencies
extras = {}
with open('requirements-extras.txt', 'r') as fh:
extras['extras'] = [l.strip() for l in fh][1:]
extras['full'] = extras['extras']
with open('requirements-tests.txt', 'r') as fh:
extras['tests'] = [l.strip() for l in fh][1:]
extras['test'] = extras['tests']
pkg_data = ['data/dark_matter/pkmu_P*',
'data/dark_matter/hzpt*',
'data/galaxy/full/*',
'data/galaxy/2-halo/*',
'data/params/*',
'data/simulation_fits/*',
'data/examples/*',
'tests/baseline/*png']
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(name=DISTNAME,
version=find_version("pyRSD/version.py"),
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
license='GPL3',
zip_safe=False,
ext_modules = [Extension(**gcl_extension_config())],
libraries=[libfftlog_config(), libemu_config(), libgcl_config()],
cmdclass = {
'sdist': custom_sdist,
'build_clib': build_external_clib,
'build_ext': custom_build_ext,
'clean': custom_clean
},
packages=find_packages(),
install_requires=dependencies,
extras_require=extras,
package_data={'pyRSD': pkg_data},
entry_points={'console_scripts' :
['rsdfit = pyRSD.rsdfit.rsdfit:main',
'pyrsd-quickstart = pyRSD.quickstart.core:main']}
)
| nickhand/pyRSD | setup.py | Python | gpl-3.0 | 9,322 | [
"Galaxy"
] | 71d7b73445cecee934caf2a7343f68f27bf407ee4277ceb9c96e6583b9ed6e35 |
#!/usr/bin/env pvbatch
from paraview.simple import *
import pviz
import sys
oviz = pviz.viz(sys.argv) # instantiate viz object (and load data)
part = pviz.makeSlice( y = 0. ) # make slice
oviz.view.CameraViewUp = [0,0,1]
oviz.view.CameraPosition = [0,1,0]
ResetCamera() # auto-adapt camera to part extent
for var in part.PointData: # loop over node-centered data
varName = var.GetName() # get variable name
oviz.colorPartByVarName(part,varName,barPosition = 'right')
oviz.writeImage(varName) # save image
| ruizanthony/pviz | examples/simple/cut_y0.py | Python | lgpl-3.0 | 573 | [
"ParaView"
] | f4a02944720e331eb0c7e65fe0255018f191de8455ae4d330b8e473b57f1071b |
#!/usr/bin/env python
"""
Unwrap fasta file so that each sequence takes up only one line.
Usage:
%program input_file output_file
"""
import sys
try:
from Bio import SeqIO
except:
print "This program requires the Biopython library"
sys.exit(0)
try:
in_file = open(sys.argv[1], "rU")
out_file = open(sys.argv[2], "w")
except:
print __doc__
sys.exit(0)
sequences = ([seq.id, seq.seq.tostring()] for seq in SeqIO.parse(in_file, 'fasta'))
with open(sys.argv[2], "w") as out_file:
for seq in sequences:
out_file.write(">" + seq[0] + "\n" + seq[1] + "\n")
| wkh124/wkh124 | fasta_unwrap.py | Python | gpl-3.0 | 600 | [
"Biopython"
] | e7dd70ecedebdd0bd2782229142fc9d3a6a8587533f0b2e2cfdbfba20f67cf91 |
"""compctl
Usage:
compctl halt
compctl usage
compctl schedule (lunch | league | knockout | open | briefing | photo | prizes | tinker) <time>
compctl unschedule <id>
compctl show-schedule <from> <to>
compctl add-team <tla> <name> <college>
compctl del-team <tla>
compctl set-team-name <tla> <name>
compctl set-team-college <tla> <college>
compctl set-team-notes <tla> <notes>
compctl append-note <tla> <note>
compctl set-team-present <tla>
compctl set-team-absent <tla>
compctl list-teams
compctl team <tla>
compctl screen-list
compctl screen-set-mode <id> <mode>
compctl screen-override <id> <message>
compctl screen-clear-override <id>
compctl set-score <match-id> <tla> <score>
compctl get-score [--yaml] <match-id> <tla>
compctl get-scores [--yaml] <match-id>
compctl calc-league-points [--yaml] <match-id>
compctl get-league-points [--yaml] <tla>
compctl get-dsqs [--yaml] <match-id>
compctl disqualify <match-id> <tla>
compctl re-qualify <match-id> <tla>
compctl add-match [--knockout] <name> <time>
compctl del-match <name>
compctl set-match-teams <name> <team>...
compctl clear-match-teams <name>
compctl list-matches <from> <to>
"""
import shlex
import re
from docopt import docopt, DocoptExit
class CommandError(Exception):
pass
def parse(cmd):
parts = shlex.split(cmd.strip())
try:
options = docopt(__doc__, argv = parts,
help = False, version = None)
return options
except DocoptExit:
raise CommandError()
HANDLERS = {}
def handler(subcommand):
def wrapper(fn):
HANDLERS[subcommand] = fn
print "Installed command: {0}".format(subcommand)
return fn
return wrapper
def dispatch(options, responder):
for name, callback in HANDLERS.iteritems():
if options.get(name):
callback(responder, options)
return
raise CommandError()
@handler('usage')
def handle_usage(responder, opts):
regex = re.compile(' compctl (.*)')
for line in __doc__.split('\n')[3:]:
match = regex.match(line)
if match:
responder('Usage: {0}'.format(match.group(1)))
@handler('halt')
def halt_system(responder, options):
from twisted.internet import reactor
responder("System going down for halt")
reactor.stop()
def default_responder(output):
print output
def handle(cmd, responder = default_responder, no_auto_fail = False,
short_fail = False):
try:
dispatch(parse(cmd), responder)
except CommandError:
if no_auto_fail:
raise
elif short_fail:
responder('Syntax error.')
else:
handle('usage', responder, no_auto_fail = True)
RECEIVERS = {}
def broadcast(message):
for receiver in RECEIVERS.itervalues():
receiver(message)
def subscribe(handler):
key = object()
RECEIVERS[key] = handler
def unsubscribe():
del RECEIVERS[key]
return unsubscribe
| prophile/compd | src/control.py | Python | mit | 3,010 | [
"TINKER"
] | b6931f65462edf76a41aa64ea6c237be7da0fb34bd8b44a50f786b4c71249344 |
# -*- coding: utf-8 -*-
# +---------------------------------------------------------------------------+
# | pylstar : Implementation of the LSTAR Grammatical Inference Algorithm |
# +---------------------------------------------------------------------------+
# | Copyright (C) 2015 Georges Bossert |
# | This program is free software: you can redistribute it and/or modify |
# | it under the terms of the GNU General Public License as published by |
# | the Free Software Foundation, either version 3 of the License, or |
# | (at your option) any later version. |
# | |
# | This program is distributed in the hope that it will be useful, |
# | but WITHOUT ANY WARRANTY; without even the implied warranty of |
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
# | GNU General Public License for more details. |
# | |
# | You should have received a copy of the GNU General Public License |
# | along with this program. If not, see <http://www.gnu.org/licenses/>. |
# +---------------------------------------------------------------------------+
# | @url : https://github.com/gbossert/pylstar |
# | @contact : gbossert@miskin.fr |
# +---------------------------------------------------------------------------+
# +----------------------------------------------------------------------------
# | Global Imports
# +----------------------------------------------------------------------------
# +----------------------------------------------------------------------------
# | Pylstar Imports
# +----------------------------------------------------------------------------
from pylstar.tools.Decorators import PylstarLogger
from pylstar.Word import Word
from pylstar.automata.State import State
@PylstarLogger
class Automata(object):
"""Definition of an automata
"""
def __init__(self, initial_state, name = "Automata"):
self.initial_state = initial_state
self.name = name
def play_query(self, query):
return self.play_word(query.input_word, self.initial_state)
def play_word(self, input_word, starting_state=None):
"""This method can be used to play the specified word
accross the current automata.
It returns a tupple made of the output_word and the visited state
captured while visiting the automata
>>> from pylstar.Letter import Letter, EmptyLetter
>>> from pylstar.Word import Word
>>> from pylstar.automata.State import State
>>> from pylstar.automata.Transition import Transition
>>> from pylstar.automata.Automata import Automata
>>> l_lambda = EmptyLetter()
>>> l_a = Letter('a')
>>> l_b = Letter('b')
>>> l_0 = Letter(0)
>>> l_1 = Letter(1)
>>> s0 = State("S0")
>>> s1 = State("S1")
>>> s2 = State("S2")
>>> s3 = State("S3")
>>> t1 = Transition("T1", s3, l_a, l_0)
>>> t2 = Transition("T2", s1, l_b, l_0)
>>> s0.transitions = [t1, t2]
>>> t3 = Transition("T3", s0, l_a, l_1)
>>> t4 = Transition("T4", s2, l_b, l_1)
>>> s1.transitions = [t3, t4]
>>> t5 = Transition("T5", s3, l_a, l_0)
>>> t6 = Transition("T6", s0, l_b, l_0)
>>> s2.transitions = [t5, t6]
>>> t7 = Transition("T7", s3, l_a, l_1)
>>> t8 = Transition("T8", s3, l_b, l_1)
>>> s3.transitions = [t7, t8]
>>> automata = Automata(s0)
>>> print(automata.play_word(Word([l_a, l_a, l_a]))[0])
[Letter(0), Letter(1), Letter(1)]
>>> print(automata.play_word(Word([l_b, l_b, l_b]))[0])
[Letter(0), Letter(1), Letter(0)]
>>> print(automata.play_word(Word([l_b, l_a, l_b, l_a, l_b]))[0])
[Letter(0), Letter(1), Letter(0), Letter(1), Letter(0)]
"""
if input_word is None or len(input_word) == 0:
raise Exception("Input word cannot be None or empty")
if starting_state is None:
current_state = self.initial_state
else:
current_state = starting_state
self._logger.debug("Playing word '{}'".format(input_word))
output_letters = []
visited_states = []
for letter in input_word.letters:
(output_letter, output_state) = current_state.visit(letter)
output_letters.append(output_letter)
visited_states.append(output_state)
current_state = output_state
output_word = Word(letters=output_letters)
return (output_word, visited_states)
def get_states(self):
"""Visits the automata to discover all the available states.
:return: a list containing all the discovered states.
:rtype: a :class:`list`
"""
states = []
toAnalyze = []
toAnalyze.append(self.initial_state)
while (len(toAnalyze) > 0):
currentState = toAnalyze.pop()
if currentState is not None:
found = False
for tmpState in states:
if tmpState.name == currentState.name:
found = True
if not found:
for transition in currentState.transitions:
outputState = transition.output_state
found = False
for tmpState in states:
if tmpState.name == outputState.name:
found = True
for tmpState in toAnalyze:
if tmpState.name == outputState.name:
found = True
if not found:
toAnalyze.append(outputState)
states.append(currentState)
return states
@staticmethod
def create_from_dot_code(dot_code):
"""This statis method returns the Automata object that can represents the provided DOT code
:param dot_code: DOT definition of the Automata to parse
:type dot_code: str
:rtype: pylstar.automata.Automata.Automata
"""
if dot_code is None:
raise Exception("Dot code cannot be None")
if not isinstance(dot_code, str):
raise Exception("Dot code must be a String")
from pylstar.automata.DOTParser import DOTParser
return DOTParser.parse(dot_code)
def build_dot_code(self):
"""Returns the dot code representing the automata.
:rtype: str
>>> from pylstar.automata.State import State
>>> from pylstar.Letter import Letter
>>> from pylstar.automata.Transition import Transition
>>> la = Letter('A')
>>> lb = Letter('B')
>>> l0 = Letter(0)
>>> l1 = Letter(1)
>>> q0 = State("Q0")
>>> q1 = State("Q1")
>>> t0 = Transition("t0", q0, la, l0)
>>> q0.transitions.append(t0)
>>> t1 = Transition("t1", q1, lb, l1)
>>> q0.transitions.append(t1)
>>> t2 = Transition("t2", q1, la, l0)
>>> q1.transitions.append(t2)
>>> t3 = Transition("t3", q0, lb, l1)
>>> q1.transitions.append(t3)
>>> automata = Automata(initial_state = q0)
>>> print(automata.build_dot_code())
digraph "Automata" {
"Q0" [shape=doubleoctagon, style=filled, fillcolor=white, URL="Q0"];
"Q1" [shape=ellipse, style=filled, fillcolor=white, URL="Q1"];
"Q0" -> "Q0" [fontsize=5, label="A / 0", URL="t0"];
"Q0" -> "Q1" [fontsize=5, label="B / 1", URL="t1"];
"Q1" -> "Q1" [fontsize=5, label="A / 0", URL="t2"];
"Q1" -> "Q0" [fontsize=5, label="B / 1", URL="t3"];
}
"""
from pylstar.automata.DOTParser import DOTParser
return DOTParser.build_dot_code(self)
@property
def initial_state(self):
"""The initial state of the Automata"""
return self.__initial_state
@initial_state.setter
def initial_state(self, state):
if state is None:
raise Exception("Initial state cannot be None")
if not isinstance(state, State):
raise Exception("Initial state must be a state")
self.__initial_state = state
@property
def name(self):
"""The name of the state machine"""
return self.__name
@name.setter
def name(self, name):
if name is None:
raise Exception("Name of the automata cannot be None")
if not isinstance(name, str):
raise Exception("Name of the automata must be a String")
self.__name = name
| gbossert/pylstar | src/pylstar/automata/Automata.py | Python | gpl-3.0 | 9,152 | [
"VisIt"
] | 59b6c63ac48cd16a3083d990a3c4ddfd51f9a2dd654421f48bf8ba44d6a37dba |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The widget is called from web2py.
"""
import datetime
import sys
import cStringIO
import time
import thread
import threading
import os
import socket
import signal
import math
import logging
import newcron
import getpass
import gluon.main as main
from gluon.fileutils import read_file, write_file, create_welcome_w2p
from gluon.settings import global_settings
from gluon.shell import run, test
from gluon.utils import is_valid_ip_address, is_loopback_ip_address, getipaddrinfo
ProgramName = 'web2py Web Framework'
ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-' + str(
datetime.datetime.now().year)
ProgramVersion = read_file('VERSION').strip()
ProgramInfo = '''%s
%s
%s''' % (ProgramName, ProgramAuthor, ProgramVersion)
if not sys.version[:3] in ['2.5', '2.6', '2.7']:
msg = 'Warning: web2py requires Python 2.5, 2.6 or 2.7 but you are running:\n%s'
msg = msg % sys.version
sys.stderr.write(msg)
logger = logging.getLogger("web2py")
def run_system_tests(options):
"""
Runs unittests for gluon.tests
"""
import subprocess
major_version = sys.version_info[0]
minor_version = sys.version_info[1]
if major_version == 2:
if minor_version in (5, 6):
sys.stderr.write("Python 2.5 or 2.6\n")
ret = subprocess.call(['unit2', '-v', 'gluon.tests'])
elif minor_version in (7,):
call_args = [sys.executable, '-m', 'unittest', '-v', 'gluon.tests']
if options.with_coverage:
try:
import coverage
coverage_config = os.environ.get(
"COVERAGE_PROCESS_START",
os.path.join('gluon', 'tests', 'coverage.ini'))
call_args = ['coverage', 'run', '--rcfile=%s' %
coverage_config,
'-m', 'unittest', '-v', 'gluon.tests']
except:
sys.stderr.write('Coverage was not installed, skipping\n')
sys.stderr.write("Python 2.7\n")
ret = subprocess.call(call_args)
else:
sys.stderr.write("unknown python 2.x version\n")
ret = 256
else:
sys.stderr.write("Only Python 2.x supported.\n")
ret = 256
sys.exit(ret and 1)
class IO(object):
""" """
def __init__(self):
""" """
self.buffer = cStringIO.StringIO()
def write(self, data):
""" """
sys.__stdout__.write(data)
if hasattr(self, 'callback'):
self.callback(data)
else:
self.buffer.write(data)
def get_url(host, path='/', proto='http', port=80):
if ':' in host:
host = '[%s]' % host
else:
host = host.replace('0.0.0.0', '127.0.0.1')
if path.startswith('/'):
path = path[1:]
if proto.endswith(':'):
proto = proto[:-1]
if not port or port == 80:
port = ''
else:
port = ':%s' % port
return '%s://%s%s/%s' % (proto, host, port, path)
def start_browser(url, startup=False):
if startup:
print 'please visit:'
print '\t', url
print 'starting browser...'
try:
import webbrowser
webbrowser.open(url)
except:
print 'warning: unable to detect your browser'
class web2pyDialog(object):
""" Main window dialog """
def __init__(self, root, options):
""" web2pyDialog constructor """
import Tkinter
import tkMessageBox
bg_color = 'white'
root.withdraw()
self.root = Tkinter.Toplevel(root, bg=bg_color)
self.root.resizable(0,0)
self.root.title(ProgramName)
self.options = options
self.scheduler_processes = {}
self.menu = Tkinter.Menu(self.root)
servermenu = Tkinter.Menu(self.menu, tearoff=0)
httplog = os.path.join(self.options.folder, 'httpserver.log')
iconphoto = os.path.join('extras','icons','web2py.gif')
if os.path.exists(iconphoto):
img = Tkinter.PhotoImage(file=iconphoto)
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
# Building the Menu
item = lambda: start_browser(httplog)
servermenu.add_command(label='View httpserver.log',
command=item)
servermenu.add_command(label='Quit (pid:%i)' % os.getpid(),
command=self.quit)
self.menu.add_cascade(label='Server', menu=servermenu)
self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Pages', menu=self.pagesmenu)
#scheduler menu
self.schedmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Scheduler', menu=self.schedmenu)
#start and register schedulers from options
self.update_schedulers(start=True)
helpmenu = Tkinter.Menu(self.menu, tearoff=0)
# Home Page
item = lambda: start_browser('http://www.web2py.com/')
helpmenu.add_command(label='Home Page',
command=item)
# About
item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo)
helpmenu.add_command(label='About',
command=item)
self.menu.add_cascade(label='Info', menu=helpmenu)
self.root.config(menu=self.menu)
if options.taskbar:
self.root.protocol('WM_DELETE_WINDOW',
lambda: self.quit(True))
else:
self.root.protocol('WM_DELETE_WINDOW', self.quit)
sticky = Tkinter.NW
# Prepare the logo area
self.logoarea = Tkinter.Canvas(self.root,
background=bg_color,
width=300,
height=300)
self.logoarea.grid(row=0, column=0, columnspan=4, sticky=sticky)
self.logoarea.after(1000, self.update_canvas)
logo = os.path.join('extras','icons','splashlogo.gif')
if os.path.exists(logo):
img = Tkinter.PhotoImage(file=logo)
pnl = Tkinter.Label(self.logoarea, image=img, background=bg_color, bd=0)
pnl.pack(side='top', fill='both', expand='yes')
# Prevent garbage collection of img
pnl.image = img
# Prepare the banner area
self.bannerarea = Tkinter.Canvas(self.root,
bg=bg_color,
width=300,
height=300)
self.bannerarea.grid(row=1, column=1, columnspan=2, sticky=sticky)
Tkinter.Label(self.bannerarea, anchor=Tkinter.N,
text=str(ProgramVersion + "\n" + ProgramAuthor),
font=('Helvetica', 11), justify=Tkinter.CENTER,
foreground='#195866', background=bg_color,
height=3).pack( side='top',
fill='both',
expand='yes')
self.bannerarea.after(1000, self.update_canvas)
# IP
Tkinter.Label(self.root,
text='Server IP:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=4,
column=1,
sticky=sticky)
self.ips = {}
self.selected_ip = Tkinter.StringVar()
row = 4
ips = [('127.0.0.1', 'Local (IPv4)')] + \
([('::1', 'Local (IPv6)')] if socket.has_ipv6 else []) + \
[(ip, 'Public') for ip in options.ips] + \
[('0.0.0.0', 'Public')]
for ip, legend in ips:
self.ips[ip] = Tkinter.Radiobutton(
self.root, bg=bg_color, highlightthickness=0,
selectcolor='light grey', width=30,
anchor=Tkinter.W, text='%s (%s)' % (legend, ip),
justify=Tkinter.LEFT,
variable=self.selected_ip, value=ip)
self.ips[ip].grid(row=row, column=2, sticky=sticky)
if row == 4:
self.ips[ip].select()
row += 1
shift = row
# Port
Tkinter.Label(self.root,
text='Server Port:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=shift,
column=1, pady=10,
sticky=sticky)
self.port_number = Tkinter.Entry(self.root)
self.port_number.insert(Tkinter.END, self.options.port)
self.port_number.grid(row=shift, column=2, sticky=sticky, pady=10)
# Password
Tkinter.Label(self.root,
text='Choose Password:', bg=bg_color,
justify=Tkinter.RIGHT).grid(row=shift + 1,
column=1,
sticky=sticky)
self.password = Tkinter.Entry(self.root, show='*')
self.password.bind('<Return>', lambda e: self.start())
self.password.focus_force()
self.password.grid(row=shift + 1, column=2, sticky=sticky)
# Prepare the canvas
self.canvas = Tkinter.Canvas(self.root,
width=400,
height=100,
bg='black')
self.canvas.grid(row=shift + 2, column=1, columnspan=2, pady=5,
sticky=sticky)
self.canvas.after(1000, self.update_canvas)
# Prepare the frame
frame = Tkinter.Frame(self.root)
frame.grid(row=shift + 3, column=1, columnspan=2, pady=5,
sticky=sticky)
# Start button
self.button_start = Tkinter.Button(frame,
text='start server',
command=self.start)
self.button_start.grid(row=0, column=0, sticky=sticky)
# Stop button
self.button_stop = Tkinter.Button(frame,
text='stop server',
command=self.stop)
self.button_stop.grid(row=0, column=1, sticky=sticky)
self.button_stop.configure(state='disabled')
if options.taskbar:
import gluon.contrib.taskbar_widget
self.tb = gluon.contrib.taskbar_widget.TaskBarIcon()
self.checkTaskBar()
if options.password != '<ask>':
self.password.insert(0, options.password)
self.start()
self.root.withdraw()
else:
self.tb = None
def update_schedulers(self, start=False):
apps = []
available_apps = [arq for arq in os.listdir('applications/')]
available_apps = [arq for arq in available_apps
if os.path.exists(
'applications/%s/models/scheduler.py' % arq)]
if start:
#the widget takes care of starting the scheduler
if self.options.scheduler and self.options.with_scheduler:
apps = [app.strip() for app
in self.options.scheduler.split(',')
if app in available_apps]
for app in apps:
self.try_start_scheduler(app)
#reset the menu
self.schedmenu.delete(0, len(available_apps))
for arq in available_apps:
if arq not in self.scheduler_processes:
item = lambda u = arq: self.try_start_scheduler(u)
self.schedmenu.add_command(label="start %s" % arq,
command=item)
if arq in self.scheduler_processes:
item = lambda u = arq: self.try_stop_scheduler(u)
self.schedmenu.add_command(label="stop %s" % arq,
command=item)
def start_schedulers(self, app):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
code = "from gluon import current;current._scheduler.loop()"
print 'starting scheduler from widget for "%s"...' % app
args = (app, True, True, None, False, code)
logging.getLogger().setLevel(self.options.debuglevel)
p = Process(target=run, args=args)
self.scheduler_processes[app] = p
self.update_schedulers()
print "Currently running %s scheduler processes" % (
len(self.scheduler_processes))
p.start()
print "Processes started"
def try_stop_scheduler(self, app):
if app in self.scheduler_processes:
p = self.scheduler_processes[app]
del self.scheduler_processes[app]
p.terminate()
p.join()
self.update_schedulers()
def try_start_scheduler(self, app):
if app not in self.scheduler_processes:
t = threading.Thread(target=self.start_schedulers, args=(app,))
t.start()
def checkTaskBar(self):
""" Check taskbar status """
if self.tb.status:
if self.tb.status[0] == self.tb.EnumStatus.QUIT:
self.quit()
elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE:
if self.root.state() == 'withdrawn':
self.root.deiconify()
else:
self.root.withdraw()
elif self.tb.status[0] == self.tb.EnumStatus.STOP:
self.stop()
elif self.tb.status[0] == self.tb.EnumStatus.START:
self.start()
elif self.tb.status[0] == self.tb.EnumStatus.RESTART:
self.stop()
self.start()
del self.tb.status[0]
self.root.after(1000, self.checkTaskBar)
def update(self, text):
""" Update app text """
try:
self.text.configure(state='normal')
self.text.insert('end', text)
self.text.configure(state='disabled')
except:
pass # ## this should only happen in case app is destroyed
def connect_pages(self):
""" Connect pages """
#reset the menu
available_apps = [arq for arq in os.listdir('applications/')
if os.path.exists(
'applications/%s/__init__.py' % arq)]
self.pagesmenu.delete(0, len(available_apps))
for arq in available_apps:
url = self.url + arq
self.pagesmenu.add_command(
label=url, command=lambda u=url: start_browser(u))
def quit(self, justHide=False):
""" Finish the program execution """
if justHide:
self.root.withdraw()
else:
try:
scheds = self.scheduler_processes.keys()
for t in scheds:
self.try_stop_scheduler(t)
except:
pass
try:
newcron.stopcron()
except:
pass
try:
self.server.stop()
except:
pass
try:
self.tb.Destroy()
except:
pass
self.root.destroy()
sys.exit(0)
def error(self, message):
""" Show error message """
import tkMessageBox
tkMessageBox.showerror('web2py start server', message)
def start(self):
""" Start web2py server """
password = self.password.get()
if not password:
self.error('no password, no web admin interface')
ip = self.selected_ip.get()
if not is_valid_ip_address(ip):
return self.error('invalid host ip address')
try:
port = int(self.port_number.get())
except:
return self.error('invalid port number')
# Check for non default value for ssl inputs
if (len(self.options.ssl_certificate) > 0 or
len(self.options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
self.url = get_url(ip, proto=proto, port=port)
self.connect_pages()
self.button_start.configure(state='disabled')
try:
options = self.options
req_queue_size = options.request_queue_size
self.server = main.HttpServer(
ip,
port,
password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=req_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
thread.start_new_thread(self.server.start, ())
except Exception, e:
self.button_start.configure(state='normal')
return self.error(str(e))
if not self.server_ready():
self.button_start.configure(state='normal')
return
self.button_stop.configure(state='normal')
if not options.taskbar:
thread.start_new_thread(
start_browser, (get_url(ip, proto=proto, port=port), True))
self.password.configure(state='readonly')
[ip.configure(state='disabled') for ip in self.ips.values()]
self.port_number.configure(state='readonly')
if self.tb:
self.tb.SetServerRunning()
def server_ready(self):
for listener in self.server.server.listeners:
if listener.ready:
return True
return False
def stop(self):
""" Stop web2py server """
self.button_start.configure(state='normal')
self.button_stop.configure(state='disabled')
self.password.configure(state='normal')
[ip.configure(state='normal') for ip in self.ips.values()]
self.port_number.configure(state='normal')
self.server.stop()
if self.tb:
self.tb.SetServerStopped()
def update_canvas(self):
""" Update canvas """
try:
t1 = os.path.getsize('httpserver.log')
except:
self.canvas.after(1000, self.update_canvas)
return
try:
fp = open('httpserver.log', 'r')
fp.seek(self.t0)
data = fp.read(t1 - self.t0)
fp.close()
value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))]
self.p0 = value
for i in xrange(len(self.p0) - 1):
c = self.canvas.coords(self.q0[i])
self.canvas.coords(self.q0[i],
(c[0],
self.p0[i],
c[2],
self.p0[i + 1]))
self.t0 = t1
except BaseException:
self.t0 = time.time()
self.t0 = t1
self.p0 = [100] * 400
self.q0 = [self.canvas.create_line(i, 100, i + 1, 100,
fill='green') for i in xrange(len(self.p0) - 1)]
self.canvas.after(1000, self.update_canvas)
def console():
""" Defines the behavior of the console web2py execution """
import optparse
import textwrap
usage = "python web2py.py"
description = """\
web2py Web Framework startup script.
ATTENTION: unless a password is specified (-a 'passwd') web2py will
attempt to run a GUI. In this case command line options are ignored."""
description = textwrap.dedent(description)
parser = optparse.OptionParser(
usage, None, optparse.Option, ProgramVersion)
parser.description = description
msg = ('IP address of the server (e.g., 127.0.0.1 or ::1); '
'Note: This value is ignored when using the \'interfaces\' option.')
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help=msg)
parser.add_option('-p',
'--port',
default='8000',
dest='port',
type='int',
help='port of server (8000)')
msg = ('password to be used for administration '
'(use -a "<recycle>" to reuse the last password))')
parser.add_option('-a',
'--password',
default='<ask>',
dest='password',
help=msg)
parser.add_option('-c',
'--ssl_certificate',
default='',
dest='ssl_certificate',
help='file that contains ssl certificate')
parser.add_option('-k',
'--ssl_private_key',
default='',
dest='ssl_private_key',
help='file that contains ssl private key')
msg = ('Use this file containing the CA certificate to validate X509 '
'certificates from clients')
parser.add_option('--ca-cert',
action='store',
dest='ssl_ca_certificate',
default=None,
help=msg)
parser.add_option('-d',
'--pid_filename',
default='httpserver.pid',
dest='pid_filename',
help='file to store the pid of the server')
parser.add_option('-l',
'--log_filename',
default='httpserver.log',
dest='log_filename',
help='file to log connections')
parser.add_option('-n',
'--numthreads',
default=None,
type='int',
dest='numthreads',
help='number of threads (deprecated)')
parser.add_option('--minthreads',
default=None,
type='int',
dest='minthreads',
help='minimum number of server threads')
parser.add_option('--maxthreads',
default=None,
type='int',
dest='maxthreads',
help='maximum number of server threads')
parser.add_option('-s',
'--server_name',
default=socket.gethostname(),
dest='server_name',
help='server name for the web server')
msg = 'max number of queued requests when server unavailable'
parser.add_option('-q',
'--request_queue_size',
default='5',
type='int',
dest='request_queue_size',
help=msg)
parser.add_option('-o',
'--timeout',
default='10',
type='int',
dest='timeout',
help='timeout for individual request (10 seconds)')
parser.add_option('-z',
'--shutdown_timeout',
default='5',
type='int',
dest='shutdown_timeout',
help='timeout on shutdown of server (5 seconds)')
parser.add_option('--socket-timeout',
default=5,
type='int',
dest='socket_timeout',
help='timeout for socket (5 second)')
parser.add_option('-f',
'--folder',
default=os.getcwd(),
dest='folder',
help='folder from which to run web2py')
parser.add_option('-v',
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='increase --test verbosity')
parser.add_option('-Q',
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='disable all output')
msg = ('set debug output level (0-100, 0 means all, 100 means none; '
'default is 30)')
parser.add_option('-D',
'--debug',
dest='debuglevel',
default=30,
type='int',
help=msg)
msg = ('run web2py in interactive shell or IPython (if installed) with '
'specified appname (if app does not exist it will be created). '
'APPNAME like a/c/f (c,f optional)')
parser.add_option('-S',
'--shell',
dest='shell',
metavar='APPNAME',
help=msg)
msg = ('run web2py in interactive shell or bpython (if installed) with '
'specified appname (if app does not exist it will be created).\n'
'Use combined with --shell')
parser.add_option('-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg)
msg = 'only use plain python shell; should be used with --shell option'
parser.add_option('-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help=msg)
msg = ('auto import model files; default is False; should be used '
'with --shell option')
parser.add_option('-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help=msg)
msg = ('run PYTHON_FILE in web2py environment; '
'should be used with --shell option')
parser.add_option('-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help=msg)
msg = ('run scheduled tasks for the specified apps: expects a list of '
'app names as -K app1,app2,app3 '
'or a list of app:groups as -K app1:group1:group2,app2:group1 '
'to override specific group_names. (only strings, no spaces '
'allowed. Requires a scheduler defined in the models')
parser.add_option('-K',
'--scheduler',
dest='scheduler',
default=None,
help=msg)
msg = 'run schedulers alongside webserver, needs -K app1 and -a too'
parser.add_option('-X',
'--with-scheduler',
action='store_true',
default=False,
dest='with_scheduler',
help=msg)
msg = ('run doctests in web2py environment; '
'TEST_PATH like a/c/f (c,f optional)')
parser.add_option('-T',
'--test',
dest='test',
metavar='TEST_PATH',
default=None,
help=msg)
msg = 'trigger a cron run manually; usually invoked from a system crontab'
parser.add_option('-C',
'--cron',
action='store_true',
dest='extcron',
default=False,
help=msg)
msg = 'triggers the use of softcron'
parser.add_option('--softcron',
action='store_true',
dest='softcron',
default=False,
help=msg)
parser.add_option('-Y',
'--run-cron',
action='store_true',
dest='runcron',
default=False,
help='start the background cron process')
parser.add_option('-J',
'--cronjob',
action='store_true',
dest='cronjob',
default=False,
help='identify cron-initiated command')
parser.add_option('-L',
'--config',
dest='config',
default='',
help='config file')
parser.add_option('-F',
'--profiler',
dest='profiler_dir',
default=None,
help='profiler dir')
parser.add_option('-t',
'--taskbar',
action='store_true',
dest='taskbar',
default=False,
help='use web2py gui and run in taskbar (system tray)')
parser.add_option('',
'--nogui',
action='store_true',
default=False,
dest='nogui',
help='text-only, no GUI')
msg = ('should be followed by a list of arguments to be passed to script, '
'to be used with -S, -A must be the last option')
parser.add_option('-A',
'--args',
action='store',
dest='args',
default=None,
help=msg)
parser.add_option('--no-banner',
action='store_true',
default=False,
dest='nobanner',
help='Do not print header banner')
msg = ('listen on multiple addresses: '
'"ip1:port1:key1:cert1:ca_cert1;ip2:port2:key2:cert2:ca_cert2;..." '
'(:key:cert:ca_cert optional; no spaces; IPv6 addresses must be in '
'square [] brackets)')
parser.add_option('--interfaces',
action='store',
dest='interfaces',
default=None,
help=msg)
msg = 'runs web2py tests'
parser.add_option('--run_system_tests',
action='store_true',
dest='run_system_tests',
default=False,
help=msg)
msg = ('adds coverage reporting (needs --run_system_tests), '
'python 2.7 and the coverage module installed. '
'You can alter the default path setting the environmental '
'var "COVERAGE_PROCESS_START". '
'By default it takes gluon/tests/coverage.ini')
parser.add_option('--with_coverage',
action='store_true',
dest='with_coverage',
default=False,
help=msg)
if '-A' in sys.argv:
k = sys.argv.index('-A')
elif '--args' in sys.argv:
k = sys.argv.index('--args')
else:
k = len(sys.argv)
sys.argv, other_args = sys.argv[:k], sys.argv[k + 1:]
(options, args) = parser.parse_args()
options.args = [options.run] + other_args
global_settings.cmd_options = options
global_settings.cmd_args = args
try:
options.ips = list(set( # no duplicates
[addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn())
if not is_loopback_ip_address(addrinfo=addrinfo)]))
except socket.gaierror:
options.ips = []
if options.run_system_tests:
run_system_tests(options)
if options.quiet:
capture = cStringIO.StringIO()
sys.stdout = capture
logger.setLevel(logging.CRITICAL + 1)
else:
logger.setLevel(options.debuglevel)
if options.config[-3:] == '.py':
options.config = options.config[:-3]
if options.cronjob:
global_settings.cronjob = True # tell the world
options.plain = True # cronjobs use a plain shell
options.nobanner = True
options.nogui = True
options.folder = os.path.abspath(options.folder)
# accept --interfaces in the form
# "ip1:port1:key1:cert1:ca_cert1;[ip2]:port2;ip3:port3:key3:cert3"
# (no spaces; optional key:cert indicate SSL)
if isinstance(options.interfaces, str):
interfaces = options.interfaces.split(';')
options.interfaces = []
for interface in interfaces:
if interface.startswith('['): # IPv6
ip, if_remainder = interface.split(']', 1)
ip = ip[1:]
if_remainder = if_remainder[1:].split(':')
if_remainder[0] = int(if_remainder[0]) # numeric port
options.interfaces.append(tuple([ip] + if_remainder))
else: # IPv4
interface = interface.split(':')
interface[1] = int(interface[1]) # numeric port
options.interfaces.append(tuple(interface))
# accepts --scheduler in the form
# "app:group1,group2,app2:group1"
scheduler = []
options.scheduler_groups = None
if isinstance(options.scheduler, str):
if ':' in options.scheduler:
for opt in options.scheduler.split(','):
scheduler.append(opt.split(':'))
options.scheduler = ','.join([app[0] for app in scheduler])
options.scheduler_groups = scheduler
if options.numthreads is not None and options.minthreads is None:
options.minthreads = options.numthreads # legacy
create_welcome_w2p()
if not options.cronjob:
# If we have the applications package or if we should upgrade
if not os.path.exists('applications/__init__.py'):
write_file('applications/__init__.py', '')
return options, args
def check_existent_app(options, appname):
if os.path.isdir(os.path.join(options.folder, 'applications', appname)):
return True
def get_code_for_scheduler(app, options):
if len(app) == 1 or app[1] is None:
code = "from gluon import current;current._scheduler.loop()"
else:
code = "from gluon import current;current._scheduler.group_names = ['%s'];"
code += "current._scheduler.loop()"
code = code % ("','".join(app[1:]))
app_ = app[0]
if not check_existent_app(options, app_):
print "Application '%s' doesn't exist, skipping" % app_
return None, None
return app_, code
def start_schedulers(options):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
processes = []
apps = [(app.strip(), None) for app in options.scheduler.split(',')]
if options.scheduler_groups:
apps = options.scheduler_groups
code = "from gluon import current;current._scheduler.loop()"
logging.getLogger().setLevel(options.debuglevel)
if len(apps) == 1 and not options.with_scheduler:
app_, code = get_code_for_scheduler(apps[0], options)
if not app_:
return
print 'starting single-scheduler for "%s"...' % app_
run(app_, True, True, None, False, code)
return
for app in apps:
app_, code = get_code_for_scheduler(app, options)
if not app_:
continue
print 'starting scheduler for "%s"...' % app_
args = (app_, True, True, None, False, code)
p = Process(target=run, args=args)
processes.append(p)
print "Currently running %s scheduler processes" % (len(processes))
p.start()
##to avoid bashing the db at the same time
time.sleep(0.7)
print "Processes started"
for p in processes:
try:
p.join()
except (KeyboardInterrupt, SystemExit):
print "Processes stopped"
except:
p.terminate()
p.join()
def start(cron=True):
""" Start server """
# ## get command line arguments
(options, args) = console()
if not options.nobanner:
print ProgramName
print ProgramAuthor
print ProgramVersion
from dal import DRIVERS
if not options.nobanner:
print 'Database drivers available: %s' % ', '.join(DRIVERS)
# ## if -L load options from options.config file
if options.config:
try:
options2 = __import__(options.config, {}, {}, '')
except Exception:
try:
# Jython doesn't like the extra stuff
options2 = __import__(options.config)
except Exception:
print 'Cannot import config file [%s]' % options.config
sys.exit(1)
for key in dir(options2):
if hasattr(options, key):
setattr(options, key, getattr(options2, key))
logfile0 = os.path.join('extras','examples','logging.example.conf')
if not os.path.exists('logging.conf') and os.path.exists(logfile0):
import shutil
sys.stdout.write("Copying logging.conf.example to logging.conf ... ")
shutil.copyfile('logging.example.conf', logfile0)
sys.stdout.write("OK\n")
# ## if -T run doctests (no cron)
if hasattr(options, 'test') and options.test:
test(options.test, verbose=options.verbose)
return
# ## if -S start interactive shell (also no cron)
if options.shell:
if not options.args is None:
sys.argv[:] = options.args
run(options.shell, plain=options.plain, bpython=options.bpython,
import_models=options.import_models, startfile=options.run,
cronjob=options.cronjob)
return
# ## if -C start cron run (extcron) and exit
# ## -K specifies optional apps list (overloading scheduler)
if options.extcron:
logger.debug('Starting extcron...')
global_settings.web2py_crontype = 'external'
if options.scheduler: # -K
apps = [app.strip() for app in options.scheduler.split(
',') if check_existent_app(options, app.strip())]
else:
apps = None
extcron = newcron.extcron(options.folder, apps=apps)
extcron.start()
extcron.join()
return
# ## if -K
if options.scheduler and not options.with_scheduler:
try:
start_schedulers(options)
except KeyboardInterrupt:
pass
return
# ## if -H cron is enabled in this *process*
# ## if --softcron use softcron
# ## use hardcron in all other cases
if cron and options.runcron and options.softcron:
print 'Using softcron (but this is not very efficient)'
global_settings.web2py_crontype = 'soft'
elif cron and options.runcron:
logger.debug('Starting hardcron...')
global_settings.web2py_crontype = 'hard'
newcron.hardcron(options.folder).start()
# ## if no password provided and havetk start Tk interface
# ## or start interface if we want to put in taskbar (system tray)
try:
options.taskbar
except:
options.taskbar = False
if options.taskbar and os.name != 'nt':
print 'Error: taskbar not supported on this platform'
sys.exit(1)
root = None
if not options.nogui and options.password=='<ask>':
try:
import Tkinter
havetk = True
try:
root = Tkinter.Tk()
except:
pass
except (ImportError, OSError):
logger.warn(
'GUI not available because Tk library is not installed')
havetk = False
options.nogui = True
if root:
root.focus_force()
# Mac OS X - make the GUI window rise to the top
if os.path.exists("/usr/bin/osascript"):
applescript = """
tell application "System Events"
set proc to first process whose unix id is %d
set frontmost of proc to true
end tell
""" % (os.getpid())
os.system("/usr/bin/osascript -e '%s'" % applescript)
master = web2pyDialog(root, options)
signal.signal(signal.SIGTERM, lambda a, b: master.quit())
try:
root.mainloop()
except:
master.quit()
sys.exit()
# ## if no tk and no password, ask for a password
if not root and options.password == '<ask>':
options.password = getpass.getpass('choose a password:')
if not options.password and not options.nobanner:
print 'no password, no admin interface'
# ##-X (if no tk, the widget takes care of it himself)
if not root and options.scheduler and options.with_scheduler:
t = threading.Thread(target=start_schedulers, args=(options,))
t.start()
# ## start server
# Use first interface IP and port if interfaces specified, since the
# interfaces option overrides the IP (and related) options.
if not options.interfaces:
(ip, port) = (options.ip, int(options.port))
else:
first_if = options.interfaces[0]
(ip, port) = first_if[0], first_if[1]
# Check for non default value for ssl inputs
if (len(options.ssl_certificate) > 0) or (len(options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
url = get_url(ip, proto=proto, port=port)
if not options.nobanner:
print 'please visit:'
print '\t', url
print 'use "kill -SIGTERM %i" to shutdown the web2py server' % os.getpid()
# enhance linecache.getline (used by debugger) to look at the source file
# if the line was not found (under py2exe & when file was modified)
import linecache
py2exe_getline = linecache.getline
def getline(filename, lineno, *args, **kwargs):
line = py2exe_getline(filename, lineno, *args, **kwargs)
if not line:
try:
f = open(filename, "r")
try:
for i, line in enumerate(f):
if lineno == i + 1:
break
else:
line = None
finally:
f.close()
except (IOError, OSError):
line = None
return line
linecache.getline = getline
server = main.HttpServer(ip=ip,
port=port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
socket_timeout=options.socket_timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
try:
server.start()
except KeyboardInterrupt:
server.stop()
try:
t.join()
except:
pass
logging.shutdown()
| pouyana/teireader | webui/gluon/widget.py | Python | mit | 44,631 | [
"VisIt"
] | 45ef0088ddbf8414e31e371e624ff8ddda4e927b3dc7375657a675c8486ded1c |
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from peacock.utils import Testing
from peacock.Input.ParameterInfo import ParameterInfo
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from PyQt5 import QtWidgets
class Tests(Testing.PeacockTester):
qapp = QtWidgets.QApplication([])
def createData(self,
name,
default="",
cpp_type="string",
basic_type="String",
description="",
group_name="",
required=True,
options="",
):
return {"name": name,
"default": default,
"cpp_type": cpp_type,
"basic_type": basic_type,
"description": description,
"group_name": group_name,
"required": required,
"options": options,
}
def checkParameter(self,
p,
name,
default="",
description="",
value="",
user_added=False,
required=False,
cpp_type="string",
group_name="Main",
comments="",
):
self.assertEqual(p.name, name)
self.assertEqual(p.default, default)
self.assertEqual(p.description, description)
self.assertEqual(p.value, value)
self.assertEqual(p.user_added, user_added)
self.assertEqual(p.required, required)
self.assertEqual(p.cpp_type, cpp_type)
self.assertEqual(p.group_name, group_name)
self.assertEqual(p.comments, comments)
def testBasic(self):
p = ParameterInfo(None, "p0")
y = self.createData("p1", default="foo", cpp_type="some type", description="description", group_name="group", required=True)
p.setFromData(y)
y["default"] = "foo"
self.checkParameter(p, "p1", value="foo", default="foo", cpp_type="some type", description="description", group_name="group", required=True)
def testCopy(self):
p = ParameterInfo(None, "p0")
p1 = p.copy(None)
self.assertEqual(p.__dict__, p1.__dict__)
def testDump(self):
p = ParameterInfo(None, "p0")
o = StringIO()
p.dump(o)
val = o.getvalue()
self.assertIn("Name", val)
def testTypes(self):
p = ParameterInfo(None, "p0")
y = self.createData("p1", cpp_type="vector<string>", basic_type="Array", default=None)
p.setFromData(y)
self.assertEqual(p.needsQuotes(), True)
self.assertEqual(p.isVectorType(), True)
self.assertEqual(p.default, "")
p.value = "foo"
self.assertEqual(p.inputFileValue(), "'foo'")
y = self.createData("p1", cpp_type="bool", basic_type="Boolean", default="0")
p.setFromData(y)
self.assertEqual(p.value, "false")
self.assertEqual(p.default, "false")
self.assertEqual(p.needsQuotes(), False)
self.assertEqual(p.isVectorType(), False)
self.assertEqual(p.inputFileValue(), "false")
y = self.createData("p1", cpp_type="bool", basic_type="Boolean", default="1")
p.setFromData(y)
self.assertEqual(p.value, "true")
self.assertEqual(p.default, "true")
y = self.createData("p1", cpp_type="bool")
p.setFromData(y)
self.assertEqual(p.value, "false")
self.assertEqual(p.default, "false")
if __name__ == '__main__':
Testing.run_tests()
| nuclear-wizard/moose | python/peacock/tests/input_tab/ParameterInfo/test_ParameterInfo.py | Python | lgpl-2.1 | 3,778 | [
"MOOSE"
] | fbfe55fa31f352ebb4876a3972a2081a0aacb0e26cdd444b689032a5d3ed816b |
"""
This example demonstrates using Mayavi as a component of a large Qt
application.
For this use, Mayavi is embedded in a QWidget. To understand this
example, please read section :ref:`builing-applications`.
"""
# First, and before importing any Enthought packages, set the ETS_TOOLKIT
# environment variable to qt4, to tell Traits that we will use Qt.
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
# By default, the PySide binding will be used. If you want the PyQt bindings
# to be used, you need to set the QT_API environment variable to 'pyqt'
#os.environ['QT_API'] = 'pyqt'
# To be able to use PySide or PyQt4 and not run in conflicts with traits,
# we need to import QtGui and QtCore from pyface.qt
from pyface.qt import QtGui, QtCore
# Alternatively, you can bypass this line, but you need to make sure that
# the following lines are executed before the import of PyQT:
# import sip
# sip.setapi('QString', 2)
from traits.api import HasTraits, Instance, on_trait_change
from traitsui.api import View, Item
from mayavi.core.ui.api import MayaviScene, MlabSceneModel, \
SceneEditor
################################################################################
#The actual visualization
class Visualization(HasTraits):
scene = Instance(MlabSceneModel, ())
@on_trait_change('scene.activated')
def update_plot(self):
# This function is called when the view is opened. We don't
# populate the scene when the view is not yet open, as some
# VTK features require a GLContext.
# We can do normal mlab calls on the embedded scene.
self.scene.mlab.test_points3d()
# the layout of the dialog screated
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=250, width=300, show_label=False),
resizable=True # We need this to resize with the parent widget
)
################################################################################
# The QWidget containing the visualization, this is pure PyQt4 code.
class MayaviQWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout(self)
layout.setContentsMargins(0,0,0,0)
layout.setSpacing(0)
self.visualization = Visualization()
# If you want to debug, beware that you need to remove the Qt
# input hook.
#QtCore.pyqtRemoveInputHook()
#import pdb ; pdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# The edit_traits call will generate the widget to embed.
self.ui = self.visualization.edit_traits(parent=self,
kind='subpanel').control
layout.addWidget(self.ui)
self.ui.setParent(self)
if __name__ == "__main__":
# Don't create a new QApplication, it would unhook the Events
# set by Traits on the existing QApplication. Simply use the
# '.instance()' method to retrieve the existing one.
app = QtGui.QApplication.instance()
container = QtGui.QWidget()
container.setWindowTitle("Embedding Mayavi in a PyQt4 Application")
# define a "complex" layout to test the behaviour
layout = QtGui.QGridLayout(container)
# put some stuff around mayavi
label_list = []
for i in range(3):
for j in range(3):
if (i==1) and (j==1):continue
label = QtGui.QLabel(container)
label.setText("Your QWidget at (%d, %d)" % (i,j))
label.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter)
layout.addWidget(label, i, j)
label_list.append(label)
mayavi_widget = MayaviQWidget(container)
layout.addWidget(mayavi_widget, 1, 1)
container.show()
window = QtGui.QMainWindow()
window.setCentralWidget(container)
window.show()
# Start the main event loop.
app.exec_()
| dmsurti/mayavi | examples/mayavi/interactive/qt_embedding.py | Python | bsd-3-clause | 3,934 | [
"Mayavi",
"VTK"
] | 5c530fd13185b90309559c8a48728f9017e38dd2614aa30a09441fbff95728e9 |
from ..utils import *
##
# Hero Powers
# Lesser Heal (Anduin Wrynn)
class CS1h_001:
activate = Heal(TARGET, 2)
##
# Minions
# Northshire Cleric
class CS2_235:
events = Heal(ALL_MINIONS).on(Draw(CONTROLLER))
# Lightwarden
class EX1_001:
events = Heal().on(Buff(SELF, "EX1_001e"))
EX1_001e = buff(atk=2)
# Cabal Shadow Priest
class EX1_091:
play = Steal(TARGET)
# Lightspawn
class EX1_335:
update = Refresh(SELF, {GameTag.ATK: lambda self, i: self.health}, priority=100)
# Lightwell
class EX1_341:
events = OWN_TURN_BEGIN.on(Heal(RANDOM(FRIENDLY + DAMAGED_CHARACTERS), 3))
# Prophet Velen
class EX1_350:
update = Refresh(CONTROLLER, {
GameTag.HEALING_DOUBLE: 1,
GameTag.SPELLPOWER_DOUBLE: 1,
GameTag.HERO_POWER_DOUBLE: 1,
})
# Auchenai Soulpriest
class EX1_591:
update = Refresh(CONTROLLER, {
GameTag.OUTGOING_HEALING_ADJUSTMENT: -1,
})
# Temple Enforcer
class EX1_623:
play = Buff(TARGET, "EX1_623e")
EX1_623e = buff(health=3)
##
# Spells
# Power Word: Shield
class CS2_004:
play = Buff(TARGET, "CS2_004e"), Draw(CONTROLLER)
CS2_004e = buff(health=2)
# Holy Nova
class CS1_112:
play = Hit(ENEMY_CHARACTERS, 2), Heal(FRIENDLY_CHARACTERS, 2)
# Mind Control
class CS1_113:
play = Steal(TARGET)
# Inner Fire
class CS1_129:
play = Buff(TARGET, "CS1_129e")
class CS1_129e:
atk = lambda self, i: self._xatk
def apply(self, target):
self._xatk = target.health
# Holy Smite
class CS1_130:
play = Hit(TARGET, 2)
# Mind Vision
class CS2_003:
play = Give(CONTROLLER, Copy(RANDOM(ENEMY_HAND)))
# Shadow Word: Pain
class CS2_234:
play = Destroy(TARGET)
# Divine Spirit
class CS2_236:
play = Buff(TARGET, "CS2_236e")
class CS2_236e:
def apply(self, target):
self.max_health = target.health
# Mind Blast
class DS1_233:
play = Hit(ENEMY_HERO, 5)
# Silence
class EX1_332:
play = Silence(TARGET)
# Shadow Madness
class EX1_334:
play = Steal(TARGET), Buff(TARGET, "EX1_334e")
class EX1_334e:
events = [
TURN_END.on(Destroy(SELF), Steal(OWNER, OPPONENT)),
Silence(OWNER).on(Steal(OWNER, OPPONENT))
]
tags = {GameTag.CHARGE: True}
# Thoughtsteal
class EX1_339:
play = Give(CONTROLLER, Copy(RANDOM(ENEMY_DECK) * 2))
# Mindgames
class EX1_345:
play = (
Find(ENEMY_DECK + MINION) &
Summon(CONTROLLER, Copy(RANDOM(ENEMY_DECK + MINION))) |
Summon(CONTROLLER, "EX1_345t")
)
# Circle of Healing
class EX1_621:
play = Heal(ALL_MINIONS, 4)
# Shadow Word: Death
class EX1_622:
play = Destroy(TARGET)
# Holy Fire
class EX1_624:
play = Hit(TARGET, 5), Heal(FRIENDLY_HERO, 5)
# Shadowform
class EX1_625:
def play(self):
if self.controller.hero.power.id == "EX1_625t":
yield Summon(CONTROLLER, "EX1_625t2")
elif self.controller.hero.power.id == "EX1_625t2":
pass
else:
yield Summon(CONTROLLER, "EX1_625t")
# Mind Spike
class EX1_625t:
activate = Hit(TARGET, 2)
update = Refresh(CONTROLLER, {GameTag.SHADOWFORM: True})
# Mind Shatter
class EX1_625t2:
activate = Hit(TARGET, 3)
update = Refresh(CONTROLLER, {GameTag.SHADOWFORM: True})
# Mass Dispel
class EX1_626:
play = Silence(ENEMY_MINIONS), Draw(CONTROLLER)
| amw2104/fireplace | fireplace/cards/classic/priest.py | Python | agpl-3.0 | 3,116 | [
"BLAST"
] | 9d8ea706b9774273e1185130b842118880a27f458a92b227717d594e6e467628 |
from distribute_jobs import (
SLURM, format_slurm_parameters, write_python_script)
from os.path import join
from qmflows import Settings
import fnmatch
import h5py
import os
def main():
# Current Work Directory
cwd = os.getcwd()
# ========== Fill in the following variables
# Varaible to define the Path ehere the Cp2K jobs will be computed
scratch = "/path/to/scratch"
project_name = 'My_awesome_project' # name use to create folders
# Path to the basis set used by Cp2k
basisCP2K = "/Path/to/CP2K/BASIS_MOLOPT"
potCP2K = "/Path/to/CP2K/GTH_POTENTIALS"
path_to_trajectory = 'Path/to/trajectory/in/XYZ'
# Number of MO used to compute the coupling
nHOMO = None
couplings_range = None
# Basis
basis = "DZVP-MOLOPT-SR-GTH"
# Algorithm to compute the NAC
algorithm = 'levine'
# Integation step used for the dynamics (femtoseconds)
dt = 1
# ============== End of User definitions ===================================
cp2k_args = Settings()
cp2k_args.basis = basis
# Results folder
results_dir = join(cwd, 'total_results')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
# Merge all the HDF5 files
file_hdf5 = merge_hdf5(scratch, project_name, cwd, results_dir)
# compute missing couplings
script_name = "merge_data.py"
write_python_script(scratch, 'total_results', path_to_trajectory, project_name,
basisCP2K, potCP2K, cp2k_args, Settings(), 0, script_name,
file_hdf5, nHOMO, couplings_range, algorithm, dt)
# Script using SLURM
write_slurm_script(scratch, results_dir, script_name)
def write_slurm_script(scratch, results_dir, script_name):
slurm = SLURM(1, 24, "00:60:00", "merged_namd")
python = f"python {script_name}\n"
copy = f"cp -r {join(scratch, 'hamiltonians')} {results_dir}"
# script
xs = format_slurm_parameters(slurm) + python + copy
# File path
file_path = join(results_dir, "launchSlurm.sh")
with open(file_path, 'w') as f:
f.write(xs)
def merge_hdf5(scratch, project_name, cwd, results_dir):
"""
Merge all the hdf5 into a unique file.
"""
# create path to hdf5 containing all the results
file_hdf5 = join(results_dir, f'{project_name}.hdf5')
# Equivalent to touch in unix
with open(file_hdf5, 'a'):
os.utime(file_hdf5)
# read all the HDF5 of the project
files = fnmatch.filter(os.listdir(scratch), '*.hdf5')
print("files: ", files)
# Merge the files into one
for f in files:
path = join(scratch, f)
print("Merging file: ", path)
merge_files(path, file_hdf5)
return file_hdf5
def merge_files(file_inp, file_out):
"""
Merge Recursively two hdf5 Files
"""
print("Merging files: ", file_inp, file_out)
with h5py.File(file_inp, 'r') as f5, h5py.File(file_out, 'r+') as g5:
for k in f5.keys():
if k not in g5:
g5.create_group(k)
for l in f5[k].keys():
if l not in g5[k]:
path = join(k, l)
f5.copy(path, g5[k])
if __name__ == "__main__":
main()
| felipeZ/nonAdiabaticCoupling | scripts/distribution/merge_job.py | Python | mit | 3,235 | [
"CP2K"
] | 74041d26cb7e16ccc4e19fc23917106bc8075db6d770617f93c4f25de4463ac6 |
# This file is part of cclib (http://cclib.sf.net), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
__revision__ = "$Revision$"
import random # For sometimes running the progress updater
import numpy
numpy.inv = numpy.linalg.inv
from .calculationmethod import *
class FragmentAnalysis(Method):
"""Convert a molecule's basis functions from atomic-based to fragment MO-based"""
def __init__(self, data, progress=None, loglevel=logging.INFO,
logname="FragmentAnalysis of"):
# Call the __init__ method of the superclass.
super(FragmentAnalysis, self).__init__(data, progress, loglevel, logname)
self.parsed = False
def __str__(self):
"""Return a string representation of the object."""
return "Fragment molecule basis of" % (self.data)
def __repr__(self):
"""Return a representation of the object."""
return 'Fragment molecular basis("%s")' % (self.data)
def calculate(self, fragments, cupdate=0.05):
nFragBasis = 0
nFragAlpha = 0
nFragBeta = 0
self.fonames = []
unrestricted = ( len(self.data.mocoeffs) == 2 )
self.logger.info("Creating attribute fonames[]")
# Collect basis info on the fragments.
for j in range(len(fragments)):
nFragBasis += fragments[j].nbasis
nFragAlpha += fragments[j].homos[0] + 1
if unrestricted and len(fragments[j].homos) == 1:
nFragBeta += fragments[j].homos[0] + 1 #assume restricted fragment
elif unrestricted and len(fragments[j].homos) == 2:
nFragBeta += fragments[j].homos[1] + 1 #assume unrestricted fragment
#assign fonames based on fragment name and MO number
for i in range(fragments[j].nbasis):
if hasattr(fragments[j],"name"):
self.fonames.append("%s_%i"%(fragments[j].name,i+1))
else:
self.fonames.append("noname%i_%i"%(j,i+1))
nBasis = self.data.nbasis
nAlpha = self.data.homos[0] + 1
if unrestricted:
nBeta = self.data.homos[1] + 1
# Check to make sure calcs have the right properties.
if nBasis != nFragBasis:
self.logger.error("Basis functions don't match")
return False
if nAlpha != nFragAlpha:
self.logger.error("Alpha electrons don't match")
return False
if unrestricted and nBeta != nFragBeta:
self.logger.error("Beta electrons don't match")
return False
if len(self.data.atomcoords) != 1:
self.logger.warning("Molecule calc appears to be an optimization")
for frag in fragments:
if len(frag.atomcoords) != 1:
self.logger.warning("One or more fragment appears to be an optimization")
break
last = 0
for frag in fragments:
size = frag.natom
if self.data.atomcoords[0][last:last+size].tolist() != frag.atomcoords[0].tolist():
self.logger.error("Atom coordinates aren't aligned")
return False
last += size
# And let's begin!
self.mocoeffs = []
self.logger.info("Creating mocoeffs in new fragment MO basis: mocoeffs[]")
for spin in range(len(self.data.mocoeffs)):
blockMatrix = numpy.zeros((nBasis,nBasis), "d")
pos = 0
# Build up block-diagonal matrix from fragment mocoeffs.
# Need to switch ordering from [mo,ao] to [ao,mo].
for i in range(len(fragments)):
size = fragments[i].nbasis
if len(fragments[i].mocoeffs) == 1:
blockMatrix[pos:pos+size,pos:pos+size] = numpy.transpose(fragments[i].mocoeffs[0])
else:
blockMatrix[pos:pos+size,pos:pos+size] = numpy.transpose(fragments[i].mocoeffs[spin])
pos += size
# Invert and mutliply to result in fragment MOs as basis.
iBlockMatrix = numpy.inv(blockMatrix)
results = numpy.transpose(numpy.dot(iBlockMatrix, numpy.transpose(self.data.mocoeffs[spin])))
self.mocoeffs.append(results)
if hasattr(self.data, "aooverlaps"):
tempMatrix = numpy.dot(self.data.aooverlaps, blockMatrix)
tBlockMatrix = numpy.transpose(blockMatrix)
if spin == 0:
self.fooverlaps = numpy.dot(tBlockMatrix, tempMatrix)
self.logger.info("Creating fooverlaps: array[x,y]")
elif spin == 1:
self.fooverlaps2 = numpy.dot(tBlockMatrix, tempMatrix)
self.logger.info("Creating fooverlaps (beta): array[x,y]")
else:
self.logger.warning("Overlap matrix missing")
self.parsed = True
self.nbasis = nBasis
self.homos = self.data.homos
return True
| Clyde-fare/cclib_bak | src/cclib/method/fragments.py | Python | lgpl-2.1 | 5,424 | [
"cclib"
] | 4310bb9d162b46e4c41b8d65a8148832cf9a1995a6a8be00e77dbc9d8347f6f8 |
"""K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features))
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300,
init='k-means++', verbose=False, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
if squared_norm(centers_old - centers) <= tol:
if verbose:
print("Converged at iteration %d" % i)
break
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
if len(centers) != k:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, k))
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True, n_jobs=1):
if hasattr(init, '__array__'):
n_clusters = init.shape[0]
init = np.asarray(init, dtype=np.float64)
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES,
warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulte the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C', dtype=np.float64)
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=np.float64)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| WangWenjun559/Weiss | summary/sumy/sklearn/cluster/k_means_.py | Python | apache-2.0 | 54,694 | [
"Gaussian"
] | 2912b2ab6bd732b699a8f3512b5bc2da328c22c7b608f0a244bb2c0ef0b35ee4 |
vocab1k = [
"a",
"ability",
"able",
"about",
"above",
"accept",
"according",
"account",
"across",
"act",
"action",
"activity",
"actually",
"add",
"address",
"administration",
"admit",
"adult",
"affect",
"after",
"again",
"against",
"age",
"agency",
"agent",
"ago",
"agree",
"agreement",
"ahead",
"air",
"all",
"allow",
"almost",
"alone",
"along",
"already",
"also",
"although",
"always",
"american",
"among",
"amount",
"analysis",
"and",
"animal",
"another",
"answer",
"any",
"anyone",
"anything",
"appear",
"apply",
"approach",
"area",
"argue",
"arm",
"around",
"arrive",
"art",
"article",
"artist",
"as",
"ask",
"assume",
"at",
"attack",
"attention",
"attorney",
"audience",
"author",
"authority",
"available",
"avoid",
"away",
"baby",
"back",
"bad",
"bag",
"ball",
"bank",
"bar",
"base",
"be",
"beat",
"beautiful",
"because",
"become",
"bed",
"before",
"begin",
"behavior",
"behind",
"believe",
"benefit",
"best",
"better",
"between",
"beyond",
"big",
"bill",
"billion",
"bit",
"black",
"blood",
"blue",
"board",
"body",
"book",
"born",
"both",
"box",
"boy",
"break",
"bring",
"brother",
"budget",
"build",
"building",
"business",
"but",
"buy",
"by",
"call",
"camera",
"campaign",
"can",
"cancer",
"candidate",
"capital",
"car",
"card",
"care",
"career",
"carry",
"case",
"catch",
"cause",
"cell",
"center",
"central",
"century",
"certain",
"certainly",
"chair",
"challenge",
"chance",
"change",
"character",
"charge",
"check",
"child",
"choice",
"choose",
"church",
"citizen",
"city",
"civil",
"claim",
"class",
"clear",
"clearly",
"close",
"coach",
"cold",
"collection",
"college",
"color",
"come",
"commercial",
"common",
"community",
"company",
"compare",
"computer",
"concern",
"condition",
"conference",
"congress",
"consider",
"consumer",
"contain",
"continue",
"control",
"cost",
"could",
"country",
"couple",
"course",
"court",
"cover",
"create",
"crime",
"cultural",
"culture",
"cup",
"current",
"customer",
"cut",
"dark",
"data",
"daughter",
"day",
"dead",
"deal",
"death",
"debate",
"decade",
"decide",
"decision",
"deep",
"defense",
"degree",
"democrat",
"democratic",
"describe",
"design",
"despite",
"detail",
"determine",
"develop",
"development",
"die",
"difference",
"different",
"difficult",
"dinner",
"direction",
"director",
"discover",
"discuss",
"discussion",
"disease",
"do",
"doctor",
"dog",
"door",
"down",
"draw",
"dream",
"drive",
"drop",
"drug",
"during",
"each",
"early",
"east",
"easy",
"eat",
"economic",
"economy",
"edge",
"education",
"effect",
"effort",
"eight",
"either",
"election",
"else",
"employee",
"end",
"energy",
"enjoy",
"enough",
"enter",
"entire",
"environment",
"environmental",
"especially",
"establish",
"even",
"evening",
"event",
"ever",
"every",
"everybody",
"everyone",
"everything",
"evidence",
"exactly",
"example",
"executive",
"exist",
"expect",
"experience",
"expert",
"explain",
"eye",
"face",
"fact",
"factor",
"fail",
"fall",
"family",
"far",
"fast",
"father",
"fear",
"federal",
"feel",
"feeling",
"few",
"field",
"fight",
"figure",
"fill",
"film",
"final",
"finally",
"financial",
"find",
"fine",
"finger",
"finish",
"fire",
"firm",
"first",
"fish",
"five",
"floor",
"fly",
"focus",
"follow",
"food",
"foot",
"for",
"force",
"foreign",
"forget",
"form",
"former",
"forward",
"four",
"free",
"friend",
"from",
"front",
"full",
"fund",
"future",
"game",
"garden",
"gas",
"general",
"generation",
"get",
"girl",
"give",
"glass",
"go",
"goal",
"good",
"government",
"great",
"green",
"ground",
"group",
"grow",
"growth",
"guess",
"gun",
"guy",
"hair",
"half",
"hand",
"hang",
"happen",
"happy",
"hard",
"have",
"he",
"head",
"health",
"hear",
"heart",
"heat",
"heavy",
"help",
"her",
"here",
"herself",
"high",
"him",
"himself",
"his",
"history",
"hit",
"hold",
"home",
"hope",
"hospital",
"hot",
"hotel",
"hour",
"house",
"how",
"however",
"huge",
"human",
"hundred",
"husband",
"i",
"idea",
"identify",
"if",
"image",
"imagine",
"impact",
"important",
"improve",
"in",
"include",
"including",
"increase",
"indeed",
"indicate",
"individual",
"industry",
"information",
"inside",
"instead",
"institution",
"interest",
"interesting",
"international",
"interview",
"into",
"investment",
"involve",
"issue",
"it",
"item",
"its",
"itself",
"job",
"join",
"just",
"keep",
"key",
"kid",
"kill",
"kind",
"kitchen",
"know",
"knowledge",
"land",
"language",
"large",
"last",
"late",
"later",
"laugh",
"law",
"lawyer",
"lay",
"lead",
"leader",
"learn",
"least",
"leave",
"left",
"leg",
"legal",
"less",
"let",
"letter",
"level",
"lie",
"life",
"light",
"like",
"likely",
"line",
"list",
"listen",
"little",
"live",
"local",
"long",
"look",
"lose",
"loss",
"lot",
"love",
"low",
"machine",
"magazine",
"main",
"maintain",
"major",
"majority",
"make",
"man",
"manage",
"management",
"manager",
"many",
"market",
"marriage",
"material",
"matter",
"may",
"maybe",
"me",
"mean",
"measure",
"media",
"medical",
"meet",
"meeting",
"member",
"memory",
"mention",
"message",
"method",
"middle",
"might",
"military",
"million",
"mind",
"minute",
"miss",
"mission",
"model",
"modern",
"moment",
"money",
"month",
"more",
"morning",
"most",
"mother",
"mouth",
"move",
"movement",
"movie",
"mr",
"mrs",
"much",
"music",
"must",
"my",
"myself",
"name",
"nation",
"national",
"natural",
"nature",
"near",
"nearly",
"necessary",
"need",
"network",
"never",
"new",
"news",
"newspaper",
"next",
"nice",
"night",
"no",
"none",
"nor",
"north",
"not",
"note",
"nothing",
"notice",
"now",
"n't",
"number",
"occur",
"of",
"off",
"offer",
"office",
"officer",
"official",
"often",
"oh",
"oil",
"ok",
"old",
"on",
"once",
"one",
"only",
"onto",
"open",
"operation",
"opportunity",
"option",
"or",
"order",
"organization",
"other",
"others",
"our",
"out",
"outside",
"over",
"own",
"owner",
"page",
"pain",
"painting",
"paper",
"parent",
"part",
"participant",
"particular",
"particularly",
"partner",
"party",
"pass",
"past",
"patient",
"pattern",
"pay",
"peace",
"people",
"per",
"perform",
"performance",
"perhaps",
"period",
"person",
"personal",
"phone",
"physical",
"pick",
"picture",
"piece",
"place",
"plan",
"plant",
"play",
"player",
"pm",
"point",
"police",
"policy",
"political",
"politics",
"poor",
"popular",
"population",
"position",
"positive",
"possible",
"power",
"practice",
"prepare",
"present",
"president",
"pressure",
"pretty",
"prevent",
"price",
"private",
"probably",
"problem",
"process",
"produce",
"product",
"production",
"professional",
"professor",
"program",
"project",
"property",
"protect",
"prove",
"provide",
"public",
"pull",
"purpose",
"push",
"put",
"quality",
"question",
"quickly",
"quite",
"race",
"radio",
"raise",
"range",
"rate",
"rather",
"reach",
"read",
"ready",
"real",
"reality",
"realize",
"really",
"reason",
"receive",
"recent",
"recently",
"recognize",
"record",
"red",
"reduce",
"reflect",
"region",
"relate",
"relationship",
"religious",
"remain",
"remember",
"remove",
"report",
"represent",
"republican",
"require",
"research",
"resource",
"respond",
"response",
"responsibility",
"rest",
"result",
"return",
"reveal",
"rich",
"right",
"rise",
"risk",
"road",
"rock",
"role",
"room",
"rule",
"run",
"safe",
"same",
"save",
"say",
"scene",
"school",
"science",
"scientist",
"score",
"sea",
"season",
"seat",
"second",
"section",
"security",
"see",
"seek",
"seem",
"sell",
"send",
"senior",
"sense",
"series",
"serious",
"serve",
"service",
"set",
"seven",
"several",
"sex",
"sexual",
"shake",
"share",
"she",
"shoot",
"short",
"shot",
"should",
"shoulder",
"show",
"side",
"sign",
"significant",
"similar",
"simple",
"simply",
"since",
"sing",
"single",
"sister",
"sit",
"site",
"situation",
"six",
"size",
"skill",
"skin",
"small",
"smile",
"so",
"social",
"society",
"soldier",
"some",
"somebody",
"someone",
"something",
"sometimes",
"son",
"song",
"soon",
"sort",
"sound",
"source",
"south",
"southern",
"space",
"speak",
"special",
"specific",
"speech",
"spend",
"sport",
"spring",
"staff",
"stage",
"stand",
"standard",
"star",
"start",
"state",
"statement",
"station",
"stay",
"step",
"still",
"stock",
"stop",
"store",
"story",
"strategy",
"street",
"strong",
"structure",
"student",
"study",
"stuff",
"style",
"subject",
"success",
"successful",
"such",
"suddenly",
"suffer",
"suggest",
"summer",
"support",
"sure",
"surface",
"system",
"table",
"take",
"talk",
"task",
"tax",
"teach",
"teacher",
"team",
"technology",
"television",
"tell",
"ten",
"tend",
"term",
"test",
"than",
"thank",
"that",
"the",
"their",
"them",
"themselves",
"then",
"theory",
"there",
"these",
"they",
"thing",
"think",
"third",
"this",
"those",
"though",
"thought",
"thousand",
"threat",
"three",
"through",
"throughout",
"throw",
"thus",
"time",
"to",
"today",
"together",
"tonight",
"too",
"top",
"total",
"tough",
"toward",
"town",
"trade",
"traditional",
"training",
"travel",
"treat",
"treatment",
"tree",
"trial",
"trip",
"trouble",
"true",
"truth",
"try",
"turn",
"tv",
"two",
"type",
"under",
"understand",
"unit",
"until",
"up",
"upon",
"us",
"use",
"usually",
"value",
"various",
"very",
"victim",
"view",
"violence",
"visit",
"voice",
"vote",
"wait",
"walk",
"wall",
"want",
"war",
"watch",
"water",
"way",
"we",
"weapon",
"wear",
"week",
"weight",
"well",
"west",
"western",
"what",
"whatever",
"when",
"where",
"whether",
"which",
"while",
"white",
"who",
"whole",
"whom",
"whose",
"why",
"wide",
"wife",
"will",
"win",
"wind",
"window",
"wish",
"with",
"within",
"without",
"woman",
"wonder",
"word",
"work",
"worker",
"world",
"worry",
"would",
"write",
"writer",
"wrong",
"yard",
"yeah",
"year",
"yes",
"yet",
"you",
"young",
"your",
"yourself",
]
| HazyResearch/metal | synthetic/words1k.py | Python | apache-2.0 | 13,536 | [
"VisIt"
] | 12e78dcd667a1940afc64ed0a4a45f24809dab4a8ed1e77744a918edd06d185d |
import sys, pysam, gzip, pdb, argparse, pdb
parser = argparse.ArgumentParser()
parser.add_argument("-p", action='store_true', dest='is_paired_end', default=False)
parser.add_argument("orig_bam")
parser.add_argument("remap_bam")
parser.add_argument("keep_bam")
parser.add_argument("orig_num_file")
options= parser.parse_args()
orig_bam=pysam.Samfile(options.orig_bam,"rb")
remap_bam=pysam.Samfile(options.remap_bam,"rb")
keep_bam=pysam.Samfile(options.keep_bam,"wb",template=orig_bam)
orig_num_file=gzip.open(options.orig_num_file)
correct_maps=[]
end_of_file=False
# Get a list of reads that remapped correctly
remap_read=remap_bam.next()
while not end_of_file:
chrm=remap_read.qname.strip().split(":")[1]
if remap_read.is_reverse:
pos=int(remap_read.qname.strip().split(":")[3])
else:
pos=int(remap_read.qname.strip().split(":")[2])
read_num=int(remap_read.qname.strip().split(":")[0])
if remap_read.tid != -1 and remap_read.pos==pos and remap_bam.getrname(remap_read.tid)==chrm:
dels=0 #Throw out the remapped read if it remapped with a deletion...for now
for cig in remap_read.cigar:
if not cig[0] in (0,3,4):
dels+=1
if dels==0:
correct_maps.append(read_num)
try:
remap_read=remap_bam.next()
except:
end_of_file=True
# Sort this list
correct_maps.sort()
#pdb.set_trace()
sys.stderr.write(str(len(correct_maps))+" reads remapped to the correct position\n")
# Pull out original aligned reads if all of the alternatives mapped correctly
orig_read=orig_bam.next()
orig_num=int(orig_num_file.readline().strip())
line_num=1
map_indx=0
correct=0
end_of_file=False
while not end_of_file and map_indx< len(correct_maps) and line_num <= correct_maps[-1]:
if line_num < correct_maps[map_indx]:
if orig_num==correct:
keep_bam.write(orig_read)
if options.is_paired_end:
try:
orig_read=orig_bam.next()
except:
sys.stderr.write("File ended unexpectedly (no pair found)")
exit()
if orig_num==correct:
keep_bam.write(orig_read)
line_num+=1
correct=0
try:
orig_read=orig_bam.next()
orig_num=int(orig_num_file.readline().strip())
except:
end_of_file=True
elif line_num == correct_maps[map_indx]:
correct+=1
map_indx+=1
else:
sys.stderr.write("There was a problem with the index sorting\n")
exit()
| PrincetonUniversity/WASP | mapping/filter_remapped_reads.py | Python | apache-2.0 | 2,576 | [
"pysam"
] | 9bdffb5a5d2d12fa4d4656ed4d50af2b100e69df16a8d9a0e20efabb7622dc9a |
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing FoldX, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import shutil
import stat
from easybuild.easyblocks.generic.tarball import Tarball
from easybuild.tools.filetools import adjust_permissions
class EB_FoldX(Tarball):
"""
Support for installing FoldX
"""
def install_step(self):
"""Install by copying files to install dir and binary to 'bin' install subdir, and fixing permissions."""
bindir = os.path.join(self.installdir, 'bin')
binaries = [
'foldx_%s.linux' % self.version, # FoldX v2.x
'FoldX.linux64', # FoldX 3.x
'foldx64Linux', # FoldX 3.0-beta6
]
try:
os.makedirs(bindir)
for item in os.listdir(self.cfg['start_dir']):
if os.path.isfile(item):
if item in binaries:
shutil.copy2(os.path.join(self.cfg['start_dir'], item), bindir)
# make sure binary has executable permissions
adjust_permissions(os.path.join(bindir, item), stat.S_IXUSR|stat.S_IXGRP|stat.S_IXOTH, add=True)
self.log.debug("Copied %s to %s and fixed permissions" % (item, bindir))
else:
# copy everything else straight into install directory
shutil.copy2(os.path.join(self.cfg['start_dir'], item), self.installdir)
self.log.debug("Copied %s to install dir %s" % (item, self.installdir))
else:
self.log.warning("Skipping non-file %s in %s, not copying it." % (item, self.cfg['start_dir']))
except OSError, err:
self.log.exception("Copying binaries in %s to install dir 'bin' failed: %s" % (self.cfg['start_dir'], err))
| geimer/easybuild-easyblocks | easybuild/easyblocks/f/foldx.py | Python | gpl-2.0 | 2,924 | [
"FoldX"
] | 7468b6ba35187f0400f5135973e40acae3312e2d72fe8ea7eb8b79f3ed035fbc |
# coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* manifest_url
The URL of the manifest file in case of
fragmented media (DASH, hls, hds)
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country. (experimental)
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled. (experimental)
NB: both these geo attributes are experimental and may change in future
or be completely removed.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return compat_str(m.group('id'))
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass(self._GEO_COUNTRIES)
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, countries):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES.
You may also manually call it from extractor's code if geo countries
information is not available beforehand (e.g. obtained during
extraction) or due to some another reason.
"""
if not self._x_forwarded_for_ip:
country_code = self._downloader.params.get('geo_bypass_country', None)
# If there is no explicit country for geo bypass specified and
# the extractor is known to be geo restricted let's fake IP
# as X-Forwarded-For right away.
if (not country_code and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
countries):
country_code = random.choice(countries)
if country_code:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None) and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
not self._x_forwarded_for_ip and
countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and
'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
return self._parse_xml(
xml_string, video_id, transform_source=transform_source,
fatal=fatal)
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
if transform_source:
xml_string = transform_source(xml_string)
try:
return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve:
errmsg = '%s: Failed to parse XML ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
urls = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urls, playlist_id=playlist_id, playlist_title=playlist_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta(
'isFamilyFriendly', html, default=None)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
info.update({
'url': e.get('contentUrl'),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': e.get('thumbnailUrl') or e.get('thumbnailURL'),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
for e in json_ld:
if e.get('@context') == 'http://schema.org':
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type in ('TVEpisode', 'Episode'):
info.update({
'episode': unescapeHTML(e.get('name')),
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
continue
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
formats = []
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/rg3/youtube-dl/issues/12211
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH') or
last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing audio group an audio group, it represents
# a complete (with audio and video) format. So, for such cases
# we will ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
t = representation_ms_info[template_name]
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/rg3/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration':
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({
location_key(segment_uri): segment_uri,
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
# NB: MPD manifest may contain direct URLs to unfragmented media.
# No fragments key is present in this case.
if 'fragments' in representation_ms_info:
f.update({
'fragment_base_url': base_url,
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
res = self._download_webpage_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal)
if res is False:
return []
ism, urlh = res
return self._parse_ism_formats(
compat_etree_fromstring(ism.encode('utf-8')), urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC')
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(video_url):
return compat_urlparse.urljoin(base_url, video_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type, type_info={}):
full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference, fatal=False)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id, fatal=False)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
# amp-video and amp-audio are very similar to their HTML5 counterparts
# so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video)
media_tags = [(media_tag, media_type, '')
for media_tag, media_type
in re.findall(r'(?s)(<(?:amp-)?(video|audio)[^>]*/>)', webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/rg3/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>(?:amp-)?(?:video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
for media_tag, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = media_attributes.get('poster')
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
f = parse_content_type(source_attributes.get('type'))
is_plain_url, formats = _media_formats(src, media_type, f)
if is_plain_url:
# res attribute is not standard but seen several times
# in the wild
f.update({
'height': int_or_none(source_attributes.get('res')),
'format_id': source_attributes.get('label'),
})
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
url_base = self._search_regex(
r'(?:(?:https?|rtmp|rtsp):)?(//[^?]+)', url, 'format url')
http_base_url = '%s:%s' % ('http', url_base)
formats = []
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
http_base_url + '/playlist.m3u8', video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
http_base_url + '/manifest.f4m',
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
http_base_url + '/manifest.mpd',
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
http_base_url + '/jwplayer.smil',
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': '%s:%s' % (protocol, url_base),
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
self._sort_formats(formats)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
if track.get('kind') != 'captions':
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entries.append({
'id': this_video_id,
'title': video_data['title'] if require_title else video_data.get('title'),
'description': video_data.get('description'),
'thumbnail': self._proto_relative_url(video_data.get('image')),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
'formats': formats,
})
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
if not isinstance(source, dict):
continue
source_url = self._proto_relative_url(source.get('file'))
if not source_url:
continue
if base_url:
source_url = compat_urlparse.urljoin(base_url, source_url)
if source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = compat_cookiejar.Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
| Ballz0fSteel/Umeko | lib/youtube_dl/extractor/common.py | Python | gpl-3.0 | 121,355 | [
"VisIt"
] | 8b38a59a96781f4b2462721ef87750129051b72c8d983d78c0cd69aa8677c508 |
from __future__ import absolute_import
import os.path as op
import numpy as nm
import sfepy
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.base.base import assert_
from sfepy.base.testing import TestCommon
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
mesh = Mesh.from_file('meshes/2d/square_unit_tri.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('linear', nm.float64, 'scalar', omega,
approx_order=1)
test = Test(conf=conf, options=options, omega=omega, field=field)
return test
def test_mass_matrix(self):
from sfepy.discrete.projections import create_mass_matrix
field = self.field
mtx = create_mass_matrix(field)
assert_(mtx.shape == (field.n_nod, field.n_nod))
assert_(abs(mtx.sum() - 1.0) < 1e-14)
return True
def test_projection_tri_quad(self):
from sfepy.discrete.projections import make_l2_projection
source = FieldVariable('us', 'unknown', self.field)
coors = self.field.get_coor()
vals = nm.sin(2.0 * nm.pi * coors[:,0] * coors[:,1])
source.set_data(vals)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_source.vtk')
source.save_as_mesh(name)
mesh = Mesh.from_file('meshes/2d/square_quad.mesh',
prefix_dir=sfepy.data_dir)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('bilinear', nm.float64, 'scalar', omega,
approx_order=1)
target = FieldVariable('ut', 'unknown', field)
make_l2_projection(target, source)
name = op.join(self.options.out_dir,
'test_projection_tri_quad_target.vtk')
target.save_as_mesh(name)
bbox = self.field.domain.get_mesh_bounding_box()
x = nm.linspace(bbox[0, 0] + 0.001, bbox[1, 0] - 0.001, 20)
y = nm.linspace(bbox[0, 1] + 0.001, bbox[1, 1] - 0.001, 20)
xx, yy = nm.meshgrid(x, y)
test_coors = nm.c_[xx.ravel(), yy.ravel()].copy()
vec1 = source.evaluate_at(test_coors)
vec2 = target.evaluate_at(test_coors)
ok = (nm.abs(vec1 - vec2) < 0.01).all()
return ok
def test_projection_iga_fem(self):
from sfepy.discrete import FieldVariable
from sfepy.discrete.fem import FEDomain, Field
from sfepy.discrete.iga.domain import IGDomain
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.discrete.iga.domain_generators import gen_patch_block_domain
from sfepy.discrete.projections import (make_l2_projection,
make_l2_projection_data)
shape = [10, 12, 12]
dims = [5, 6, 6]
centre = [0, 0, 0]
degrees = [2, 2, 2]
nurbs, bmesh, regions = gen_patch_block_domain(dims, shape, centre,
degrees,
cp_mode='greville',
name='iga')
ig_domain = IGDomain('iga', nurbs, bmesh, regions=regions)
ig_omega = ig_domain.create_region('Omega', 'all')
ig_field = Field.from_args('iga', nm.float64, 1, ig_omega,
approx_order='iga', poly_space_base='iga')
ig_u = FieldVariable('ig_u', 'parameter', ig_field,
primary_var_name='(set-to-None)')
mesh = gen_block_mesh(dims, shape, centre, name='fem')
fe_domain = FEDomain('fem', mesh)
fe_omega = fe_domain.create_region('Omega', 'all')
fe_field = Field.from_args('fem', nm.float64, 1, fe_omega,
approx_order=2)
fe_u = FieldVariable('fe_u', 'parameter', fe_field,
primary_var_name='(set-to-None)')
def _eval_data(ts, coors, mode, **kwargs):
return nm.prod(coors**2, axis=1)[:, None, None]
make_l2_projection_data(ig_u, _eval_data)
make_l2_projection(fe_u, ig_u) # This calls ig_u.evaluate_at().
coors = 0.5 * nm.random.rand(20, 3) * dims
ig_vals = ig_u.evaluate_at(coors)
fe_vals = fe_u.evaluate_at(coors)
ok = nm.allclose(ig_vals, fe_vals, rtol=0.0, atol=1e-12)
if not ok:
self.report('iga-fem projection failed!')
self.report('coors:')
self.report(coors)
self.report('iga fem diff:')
self.report(nm.c_[ig_vals, fe_vals, nm.abs(ig_vals - fe_vals)])
return ok
def test_project_tensors(self):
from sfepy.discrete import FieldVariable
from sfepy.discrete.projections import project_by_component
ok = True
u = FieldVariable('u', 'parameter', self.field,
primary_var_name='(set-to-None)')
u.set_constant(1.0)
component = FieldVariable('component', 'parameter', self.field,
primary_var_name='(set-to-None)')
nls_options = {'eps_a' : 1e-16, 'i_max' : 1}
u_qp = u.evaluate()
u2 = FieldVariable('u2', 'parameter', self.field,
primary_var_name='(set-to-None)')
project_by_component(u2, u_qp, component, self.field.approx_order,
nls_options=nls_options)
_ok = self.compare_vectors(u(), u2())
ok = ok and _ok
gu_qp = u.evaluate(mode='grad')
gfield = Field.from_args('gu', nm.float64, 2, self.field.region,
approx_order=self.field.approx_order)
gu = FieldVariable('gu', 'parameter', gfield,
primary_var_name='(set-to-None)')
project_by_component(gu, gu_qp, component, gfield.approx_order,
nls_options=nls_options)
_ok = self.compare_vectors(gu(), nm.zeros_like(gu()))
ok = ok and _ok
return ok
| lokik/sfepy | tests/test_projections.py | Python | bsd-3-clause | 6,322 | [
"VTK"
] | d209273effc198302a575d8c98116b74dc0f22b8a20bf0210f045567d68bb27c |
import sys
msg = "\nThe matplotlib python module is missing or not installed properly.\n"
msg += "Is the PYTHONPATH environment variable set correctly?\n"
msg += "Please verify your installation by running on the command line:\n"
msg += "python -c 'import matplotlib'\n"
msg += "\n"
msg += "This module is optional and required in order to use "
msg += "ASE's simple GUI (ase-gui).\n"
msg += "If you don't wish to use ase-gui ignore this error, otherwise\n"
msg += "please install the package using "
msg += "your distribution package manager, i.e.:\n"
msg += "\n"
msg += " Debian/Ubuntu: sudo apt-get python-matplotlib\n"
msg += "\n"
msg += " OpenSUSE: yast -i python-matplotlib\n"
msg += "\n"
msg += " Red Hat/Fedora: yum install python-matplotlib\n"
msg += "\n"
msg += "or perform manual installation, preferably as non-root user,\n"
msg += "following http://matplotlib.sourceforge.net/users/installing.html."
if locals().get('display'):
try:
import matplotlib
except ImportError:
print >> sys.stderr, msg
raise
| grhawk/ASE | tools/ase/test/dependency_matplotlib.py | Python | gpl-2.0 | 1,052 | [
"ASE"
] | 7791dded95e1bc63479bb2b44d3a4f132d4f869b2a317a4792e83d57a99ba1fe |
# -*- coding: utf-8 -*-
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
"""This module defines a Hamiltonian."""
from math import pi, sqrt
import numpy as np
from gpaw.poisson import PoissonSolver
from gpaw.transformers import Transformer
from gpaw.lfc import LFC
from gpaw.utilities import pack2,unpack,unpack2
from gpaw.utilities.tools import tri2full
class Hamiltonian:
"""Hamiltonian object.
Attributes:
=============== =====================================================
``xc`` ``XC3DGrid`` object.
``poisson`` ``PoissonSolver``.
``gd`` Grid descriptor for coarse grids.
``finegd`` Grid descriptor for fine grids.
``restrict`` Function for restricting the effective potential.
=============== =====================================================
Soft and smooth pseudo functions on uniform 3D grids:
========== =========================================
``vHt_g`` Hartree potential on the fine grid.
``vt_sG`` Effective potential on the coarse grid.
``vt_sg`` Effective potential on the fine grid.
========== =========================================
Energy contributions and forces:
=========== ==========================================
Description
=========== ==========================================
``Ekin`` Kinetic energy.
``Epot`` Potential energy.
``Etot`` Total energy.
``Exc`` Exchange-Correlation energy.
``Eext`` Energy of external potential
``Eref`` Reference energy for all-electron atoms.
``S`` Entropy.
``Ebar`` Should be close to zero!
=========== ==========================================
"""
def __init__(self, gd, finegd, nspins, setups, timer, xc,
vext=None, collinear=True):
"""Create the Hamiltonian."""
self.gd = gd
self.finegd = finegd
self.nspins = nspins
self.setups = setups
self.timer = timer
self.xc = xc
self.collinear = collinear
self.ncomp = 2 - int(collinear)
self.dH_asp = None
# The external potential
self.vext = vext
self.vt_sG = None
self.vHt_g = None
self.vt_sg = None
self.rank_a = None
self.Ekin0 = None
self.Ekin = None
self.Epot = None
self.Ebar = None
self.Eext = None
self.Exc = None
self.Etot = None
self.S = None
def summary(self, fd):
fd.write('XC and Coulomb potentials evaluated on a %d*%d*%d grid\n' %
tuple(self.finegd.N_c))
def set_positions(self, spos_ac, rank_a=None):
self.spos_ac = spos_ac
self.vbar.set_positions(spos_ac)
self.xc.set_positions(spos_ac)
# If both old and new atomic ranks are present, start a blank dict if
# it previously didn't exist but it will needed for the new atoms.
if (self.rank_a is not None and rank_a is not None and
self.dH_asp is None and (rank_a == self.gd.comm.rank).any()):
self.dH_asp = {}
if self.rank_a is not None and self.dH_asp is not None:
self.timer.start('Redistribute')
requests = []
flags = (self.rank_a != rank_a)
my_incoming_atom_indices = np.argwhere(np.bitwise_and(flags, \
rank_a == self.gd.comm.rank)).ravel()
my_outgoing_atom_indices = np.argwhere(np.bitwise_and(flags, \
self.rank_a == self.gd.comm.rank)).ravel()
for a in my_incoming_atom_indices:
# Get matrix from old domain:
ni = self.setups[a].ni
dH_sp = np.empty((self.nspins * self.ncomp**2,
ni * (ni + 1) // 2))
requests.append(self.gd.comm.receive(dH_sp, self.rank_a[a],
tag=a, block=False))
assert a not in self.dH_asp
self.dH_asp[a] = dH_sp
for a in my_outgoing_atom_indices:
# Send matrix to new domain:
dH_sp = self.dH_asp.pop(a)
requests.append(self.gd.comm.send(dH_sp, rank_a[a],
tag=a, block=False))
self.gd.comm.waitall(requests)
self.timer.stop('Redistribute')
self.rank_a = rank_a
def aoom(self, DM, a, l, scale=1):
"""Atomic Orbital Occupation Matrix.
Determine the Atomic Orbital Occupation Matrix (aoom) for a
given l-quantum number.
This operation, takes the density matrix (DM), which for
example is given by unpack2(D_asq[i][spin]), and corrects for
the overlap between the selected orbitals (l) upon which the
the density is expanded (ex <p|p*>,<p|p>,<p*|p*> ).
Returned is only the "corrected" part of the density matrix,
which represents the orbital occupation matrix for l=2 this is
a 5x5 matrix.
"""
S=self.setups[a]
l_j = S.l_j
n_j = S.n_j
lq = S.lq
nl = np.where(np.equal(l_j, l))[0]
V = np.zeros(np.shape(DM))
if len(nl) == 2:
aa = (nl[0])*len(l_j)-((nl[0]-1)*(nl[0])/2)
bb = (nl[1])*len(l_j)-((nl[1]-1)*(nl[1])/2)
ab = aa+nl[1]-nl[0]
if(scale==0 or scale=='False' or scale =='false'):
lq_a = lq[aa]
lq_ab = lq[ab]
lq_b = lq[bb]
else:
lq_a = 1
lq_ab = lq[ab]/lq[aa]
lq_b = lq[bb]/lq[aa]
# and the correct entrances in the DM
nn = (2*np.array(l_j)+1)[0:nl[0]].sum()
mm = (2*np.array(l_j)+1)[0:nl[1]].sum()
# finally correct and add the four submatrices of NC_DM
A = DM[nn:nn+2*l+1,nn:nn+2*l+1]*(lq_a)
B = DM[nn:nn+2*l+1,mm:mm+2*l+1]*(lq_ab)
C = DM[mm:mm+2*l+1,nn:nn+2*l+1]*(lq_ab)
D = DM[mm:mm+2*l+1,mm:mm+2*l+1]*(lq_b)
V[nn:nn+2*l+1,nn:nn+2*l+1]=+(lq_a)
V[nn:nn+2*l+1,mm:mm+2*l+1]=+(lq_ab)
V[mm:mm+2*l+1,nn:nn+2*l+1]=+(lq_ab)
V[mm:mm+2*l+1,mm:mm+2*l+1]=+(lq_b)
return A+B+C+D, V
else:
nn =(2*np.array(l_j)+1)[0:nl[0]].sum()
A=DM[nn:nn+2*l+1,nn:nn+2*l+1]*lq[-1]
V[nn:nn+2*l+1,nn:nn+2*l+1]=+lq[-1]
return A,V
def update(self, density):
"""Calculate effective potential.
The XC-potential and the Hartree potential are evaluated on
the fine grid, and the sum is then restricted to the coarse
grid."""
self.timer.start('Hamiltonian')
if self.vt_sg is None:
self.timer.start('Initialize Hamiltonian')
self.vt_sg = self.finegd.empty(self.nspins * self.ncomp**2)
self.vHt_g = self.finegd.zeros()
self.vt_sG = self.gd.empty(self.nspins * self.ncomp**2)
self.poisson.initialize()
self.timer.stop('Initialize Hamiltonian')
Ekin, Epot, Ebar, Eext, Exc, W_aL = \
self.update_pseudo_potential(density)
self.timer.start('Atomic')
self.dH_asp = {}
for a, D_sp in density.D_asp.items():
W_L = W_aL[a]
setup = self.setups[a]
D_p = D_sp[:self.nspins].sum(0)
dH_p = (setup.K_p + setup.M_p +
setup.MB_p + 2.0 * np.dot(setup.M_pp, D_p) +
np.dot(setup.Delta_pL, W_L))
Ekin += np.dot(setup.K_p, D_p) + setup.Kc
Ebar += setup.MB + np.dot(setup.MB_p, D_p)
Epot += setup.M + np.dot(D_p, (setup.M_p +
np.dot(setup.M_pp, D_p)))
if (self.vext is not None
and hasattr(self.vext, 'get_dVext_p')
and self.vext.use_dVext_p(a)):
_dVext_p = self.vext.get_dVext_p(a)
# RTXS modification
Eext += np.dot(D_p, _dVext_p)
dH_p += _dVext_p
elif self.vext is not None:
vext = self.vext.get_taylor(spos_c=self.spos_ac[a, :])
# Tailor expansion to the zeroth order
Eext += vext[0][0] * (sqrt(4 * pi) * density.Q_aL[a][0]
+ setup.Z)
dH_p += vext[0][0] * sqrt(4 * pi) * setup.Delta_pL[:, 0]
if len(vext) > 1:
# Tailor expansion to the first order
Eext += sqrt(4 * pi / 3) * np.dot(vext[1],
density.Q_aL[a][1:4])
# there must be a better way XXXX
Delta_p1 = np.array([setup.Delta_pL[:, 1],
setup.Delta_pL[:, 2],
setup.Delta_pL[:, 3]])
dH_p += sqrt(4 * pi / 3) * np.dot(vext[1], Delta_p1)
self.dH_asp[a] = dH_sp = np.zeros_like(D_sp)
self.timer.start('XC Correction')
Exc += self.xc.calculate_paw_correction(setup, D_sp, dH_sp, a=a)
self.timer.stop('XC Correction')
if setup.HubU is not None:
assert self.collinear
nspins = len(D_sp)
l_j = setup.l_j
l = setup.Hubl
nl = np.where(np.equal(l_j,l))[0]
nn = (2*np.array(l_j)+1)[0:nl[0]].sum()
for D_p, H_p in zip(D_sp, self.dH_asp[a]):
[N_mm,V] =self.aoom(unpack2(D_p),a,l)
N_mm = N_mm / 2 * nspins
Eorb = setup.HubU / 2. * (N_mm - np.dot(N_mm,N_mm)).trace()
Vorb = setup.HubU * (0.5 * np.eye(2*l+1) - N_mm)
Exc += Eorb
if nspins == 1:
# add contribution of other spin manyfold
Exc += Eorb
if len(nl)==2:
mm = (2*np.array(l_j)+1)[0:nl[1]].sum()
V[nn:nn+2*l+1,nn:nn+2*l+1] *= Vorb
V[mm:mm+2*l+1,nn:nn+2*l+1] *= Vorb
V[nn:nn+2*l+1,mm:mm+2*l+1] *= Vorb
V[mm:mm+2*l+1,mm:mm+2*l+1] *= Vorb
else:
V[nn:nn+2*l+1,nn:nn+2*l+1] *= Vorb
Htemp = unpack(H_p)
Htemp += V
H_p[:] = pack2(Htemp)
dH_sp[:self.nspins] += dH_p
Ekin -= (D_sp * dH_sp).sum() # NCXXX
self.timer.stop('Atomic')
# Make corrections due to non-local xc:
#xcfunc = self.xc.xcfunc
self.Enlxc = 0.0#XXXxcfunc.get_non_local_energy()
Ekin += self.xc.get_kinetic_energy_correction() / self.gd.comm.size
energies = np.array([Ekin, Epot, Ebar, Eext, Exc])
self.timer.start('Communicate energies')
self.gd.comm.sum(energies)
self.timer.stop('Communicate energies')
(self.Ekin0, self.Epot, self.Ebar, self.Eext, self.Exc) = energies
#self.Exc += self.Enlxc
#self.Ekin0 += self.Enlkin
self.timer.stop('Hamiltonian')
def get_energy(self, occupations):
self.Ekin = self.Ekin0 + occupations.e_band
self.S = occupations.e_entropy
# Total free energy:
self.Etot = (self.Ekin + self.Epot + self.Eext +
self.Ebar + self.Exc - self.S)
return self.Etot
def calculate_forces(self, dens, F_av):
ghat_aLv = dens.ghat.dict(derivative=True)
nct_av = dens.nct.dict(derivative=True)
vbar_av = self.vbar.dict(derivative=True)
self.calculate_forces2(dens, ghat_aLv, nct_av, vbar_av)
# Force from compensation charges:
for a, dF_Lv in ghat_aLv.items():
F_av[a] += np.dot(dens.Q_aL[a], dF_Lv)
# Force from smooth core charge:
for a, dF_v in nct_av.items():
F_av[a] += dF_v[0]
# Force from zero potential:
for a, dF_v in vbar_av.items():
F_av[a] += dF_v[0]
self.xc.add_forces(F_av)
self.gd.comm.sum(F_av, 0)
def apply_local_potential(self, psit_nG, Htpsit_nG, s):
"""Apply the Hamiltonian operator to a set of vectors.
XXX Parameter description is deprecated!
Parameters:
a_nG: ndarray
Set of vectors to which the overlap operator is applied.
b_nG: ndarray, output
Resulting H times a_nG vectors.
kpt: KPoint object
k-point object defined in kpoint.py.
calculate_projections: bool
When True, the integrals of projector times vectors
P_ni = <p_i | a_nG> are calculated.
When False, existing P_uni are used
local_part_only: bool
When True, the non-local atomic parts of the Hamiltonian
are not applied and calculate_projections is ignored.
"""
vt_G = self.vt_sG[s]
if psit_nG.ndim == 3:
Htpsit_nG += psit_nG * vt_G
else:
for psit_G, Htpsit_G in zip(psit_nG, Htpsit_nG):
Htpsit_G += psit_G * vt_G
def apply(self, a_xG, b_xG, wfs, kpt, calculate_P_ani=True):
"""Apply the Hamiltonian operator to a set of vectors.
Parameters:
a_nG: ndarray
Set of vectors to which the overlap operator is applied.
b_nG: ndarray, output
Resulting S times a_nG vectors.
wfs: WaveFunctions
Wave-function object defined in wavefunctions.py
kpt: KPoint object
k-point object defined in kpoint.py.
calculate_P_ani: bool
When True, the integrals of projector times vectors
P_ni = <p_i | a_nG> are calculated.
When False, existing P_ani are used
"""
wfs.kin.apply(a_xG, b_xG, kpt.phase_cd)
self.apply_local_potential(a_xG, b_xG, kpt.s)
shape = a_xG.shape[:-3]
P_axi = wfs.pt.dict(shape)
if calculate_P_ani: #TODO calculate_P_ani=False is experimental
wfs.pt.integrate(a_xG, P_axi, kpt.q)
else:
for a, P_ni in kpt.P_ani.items():
P_axi[a][:] = P_ni
for a, P_xi in P_axi.items():
dH_ii = unpack(self.dH_asp[a][kpt.s])
P_axi[a] = np.dot(P_xi, dH_ii)
wfs.pt.add(b_xG, P_axi, kpt.q)
def get_xc_difference(self, xc, density):
"""Calculate non-selfconsistent XC-energy difference."""
if density.nt_sg is None:
density.interpolate()
nt_sg = density.nt_sg
if hasattr(xc, 'hybrid'):
xc.calculate_exx()
Exc = xc.calculate(density.finegd, nt_sg) / self.gd.comm.size
for a, D_sp in density.D_asp.items():
setup = self.setups[a]
Exc += xc.calculate_paw_correction(setup, D_sp)
Exc = self.gd.comm.sum(Exc)
return Exc - self.Exc
def estimate_memory(self, mem):
nbytes = self.gd.bytecount()
nfinebytes = self.finegd.bytecount()
arrays = mem.subnode('Arrays', 0)
arrays.subnode('vHt_g', nfinebytes)
arrays.subnode('vt_sG', self.nspins * nbytes)
arrays.subnode('vt_sg', self.nspins * nfinebytes)
self.xc.estimate_memory(mem.subnode('XC'))
self.poisson.estimate_memory(mem.subnode('Poisson'))
self.vbar.estimate_memory(mem.subnode('vbar'))
class RealSpaceHamiltonian(Hamiltonian):
def __init__(self, gd, finegd, nspins, setups, timer, xc,
vext=None, collinear=True, psolver=None, stencil=3):
Hamiltonian.__init__(self, gd, finegd, nspins, setups, timer, xc,
vext, collinear)
# Solver for the Poisson equation:
if psolver is None:
psolver = PoissonSolver(nn=3, relax='J')
self.poisson = psolver
self.poisson.set_grid_descriptor(finegd)
# Restrictor function for the potential:
self.restrictor = Transformer(self.finegd, self.gd, stencil)
self.restrict = self.restrictor.apply
self.vbar = LFC(self.finegd, [[setup.vbar] for setup in setups],
forces=True)
self.vbar_g = None
def summary(self, fd):
Hamiltonian.summary(self, fd)
degree = self.restrictor.nn * 2 - 1
name = ['linear', 'cubic', 'quintic', 'heptic'][degree // 2]
fd.write('Interpolation: tri-%s ' % name +
'(%d. degree polynomial)\n' % degree)
fd.write('Poisson solver: %s\n' % self.poisson.description)
def set_positions(self, spos_ac, rank_a=None):
Hamiltonian.set_positions(self, spos_ac, rank_a)
if self.vbar_g is None:
self.vbar_g = self.finegd.empty()
self.vbar_g[:] = 0.0
self.vbar.add(self.vbar_g)
def update_pseudo_potential(self, density):
self.timer.start('vbar')
Ebar = self.finegd.integrate(self.vbar_g, density.nt_g,
global_integral=False)
vt_g = self.vt_sg[0]
vt_g[:] = self.vbar_g
self.timer.stop('vbar')
Eext = 0.0
if self.vext is not None:
assert self.collinear
# RTXS modification
vt_ext_g = self.vext.get_potential(self.finegd)
vt_g += vt_ext_g
Eext += self.finegd.integrate(vt_ext_g, density.nt_g,
global_integral=False) #- Ebar
self.vt_sg[1:self.nspins] = vt_g
self.vt_sg[self.nspins:] = 0.0
self.timer.start('XC 3D grid')
Exc = self.xc.calculate(self.finegd, density.nt_sg, self.vt_sg)
Exc /= self.gd.comm.size
self.timer.stop('XC 3D grid')
self.timer.start('Poisson')
# npoisson is the number of iterations:
self.npoisson = self.poisson.solve(self.vHt_g, density.rhot_g,
charge=-density.charge)
self.timer.stop('Poisson')
self.timer.start('Hartree integrate/restrict')
Epot = 0.5 * self.finegd.integrate(self.vHt_g, density.rhot_g,
global_integral=False)
Ekin = 0.0
s = 0
for vt_g, vt_G, nt_G in zip(self.vt_sg, self.vt_sG, density.nt_sG):
if s < self.nspins:
vt_g += self.vHt_g
self.restrict(vt_g, vt_G)
if s < self.nspins:
Ekin -= self.gd.integrate(vt_G, nt_G - density.nct_G,
global_integral=False)
else:
Ekin -= self.gd.integrate(vt_G, nt_G, global_integral=False)
s += 1
self.timer.stop('Hartree integrate/restrict')
# Calculate atomic hamiltonians:
W_aL = {}
for a in density.D_asp:
W_aL[a] = np.empty((self.setups[a].lmax + 1)**2)
density.ghat.integrate(self.vHt_g, W_aL)
return Ekin, Epot, Ebar, Eext, Exc, W_aL
def calculate_forces2(self, dens, ghat_aLv, nct_av, vbar_av):
if self.nspins == 2:
vt_G = self.vt_sG.mean(0)
else:
vt_G = self.vt_sG[0]
dens.ghat.derivative(self.vHt_g, ghat_aLv)
dens.nct.derivative(vt_G, nct_av)
self.vbar.derivative(dens.nt_g, vbar_av)
| ajylee/gpaw-rtxs | gpaw/hamiltonian.py | Python | gpl-3.0 | 19,793 | [
"GPAW"
] | 39041006141a777b05d2aea590abd092b921a303b366caba2928768e8cdae8ff |
""" Test cktapps """
import pytest
from StringIO import StringIO
from collections import OrderedDict
from textwrap import dedent
from cktapps import core
from cktapps import Ckt
from cktapps.formats import spice
class TestSpiceReadLine:
def test_simple(self):
f = StringIO("a b\n"
"* c\n"
" d $ e")
f.name = "<string>"
lines = [line for line in spice.Reader.read_line(f)]
assert lines[0] == ("a b", "<string>", 1)
assert lines[1] == ("* c", "<string>", 2)
assert lines[2] == (" d $ e", "<string>", 3)
def test_unwrap_single(self):
f = StringIO("a b\n"
"+ c\n"
" d $ e")
f.name = "<string>"
lines = [line for line in spice.Reader.read_line(f)]
assert lines[0] == ("a b c", "<string>", 2)
assert lines[1] == (" d $ e", "<string>", 3)
def test_unwrap_multi(self):
f = StringIO("a b \n"
"+ c1\n"
"+c2\n"
"+ c3\n"
" d $ e")
f.name = "<string>"
lines = [line for line in spice.Reader.read_line(f)]
assert lines[0] == ("a b c1 c2 c3", "<string>", 4)
assert lines[1] == (" d $ e", "<string>", 5)
def test_unwrap_blank_line(self):
f = StringIO("a b\n"
"c1\n"
" \n"
"+ c2\n"
" d $ e")
f.name = "<string>"
with pytest.raises(spice.SyntaxError) as e:
lines = [line for line in spice.Reader.read_line(f)]
assert e.value.message == "invalid line continuation: <string>, 4\n-> + c2"
def test_unwrap_leading_comment(self):
f = StringIO("a b\n"
"c1\n"
"* comment\n"
"+ c2\n"
" d $ e")
f.name = "<string>"
with pytest.raises(spice.SyntaxError) as e:
lines = [line for line in spice.Reader.read_line(f)]
assert e.value.message == "invalid line continuation: <string>, 4\n-> + c2"
def test_unwrap_trailing_comment(self):
f = StringIO("a b\n"
"c1 $comment\n"
"+ c2\n"
" d $ e")
f.name = "<string>"
with pytest.raises(spice.SyntaxError) as e:
lines = [line for line in spice.Reader.read_line(f)]
assert e.value.message == "invalid line continuation: <string>, 3\n-> + c2"
class TestSpiceSplitLine:
def test_args(self):
line = 'a1 a2 a3'
tokens = spice.Reader._tokenize(line)
assert tokens == ['a1', 'a2', 'a3']
def test_kwargs(self):
line = 'a1 a2 k1=v1 k2= v2 k3 =v3 k4 = v4'
tokens = spice.Reader._tokenize(line)
assert tokens == ['a1', 'a2', 'k1=v1', 'k2=v2',
'k3=v3', 'k4=v4']
def test_kwargs_exp1(self):
line = 'a1 a2 k1=" 1* 2" k2 = " (1 + v2) " k3 = 3.0p k4= v4 '
tokens = spice.Reader._tokenize(line)
assert tokens == ['a1', 'a2', 'k1=1*2', 'k2=(1+v2)',
'k3=3.0p', 'k4=v4']
def test_kwargs_exp2(self):
line = 'a1 a2 k1=\' 1* 2\' k2 = " (1 + v2) " k3 = \'3.0p \' k4= v4 '
tokens = spice.Reader._tokenize(line)
assert tokens == ['a1', 'a2', 'k1=1*2', 'k2=(1+v2)',
'k3=3.0p', 'k4=v4']
def test_blank_line(self):
line = ' '
tokens = spice.Reader._tokenize(line)
assert tokens == []
def test_comment_line(self):
line = '* ab c '
tokens = spice.Reader._tokenize(line)
assert tokens == ['*', 'ab', 'c']
line = ' * ab c '
tokens = spice.Reader._tokenize(line)
assert tokens == ['*', 'ab', 'c']
line = '*ab c '
tokens = spice.Reader._tokenize(line)
assert tokens == ['*', 'ab', 'c']
line = '$ ab c '
tokens = spice.Reader._tokenize(line)
assert tokens == ['$', 'ab', 'c']
def test_tailing_comment(self):
line = 'ab $ c d'
tokens = spice.Reader._tokenize(line)
assert tokens == ['ab', '$', 'c', 'd']
line = 'ab $c d'
tokens = spice.Reader._tokenize(line)
assert tokens == ['ab', '$', 'c', 'd']
line = 'ab$ c d'
tokens = spice.Reader._tokenize(line)
assert tokens == ['ab', '$', 'c', 'd']
line = 'ab$c d'
tokens = spice.Reader._tokenize(line)
assert tokens == ['ab', '$', 'c', 'd']
def test_kwarg_tailing_comment(self):
line = 'ab c=$d'
tokens = spice.Reader._tokenize(line)
assert tokens == ['ab', 'c=', '$', 'd']
class TestSpiceParseLine:
def test_element_args(self):
tokens = 'm a1 a2 a3'.split()
parsed = spice.Reader._parse(tokens)
assert parsed == {'type' : ['element', 'm'],
'args' : ['m', 'a1', 'a2', 'a3'],
'kwargs' : OrderedDict(),
'comment': ''
}
tokens = 'mxy a1 a2 a3'.split()
parsed = spice.Reader._parse(tokens)
assert parsed == {'type' : ['element', 'm'],
'args' : ['mxy', 'a1', 'a2', 'a3'],
'kwargs' : OrderedDict(),
'comment': ''
}
def test_control_args(self):
tokens = '.subckt a1 a2 a3'.split()
parsed = spice.Reader._parse(tokens)
assert parsed == {'type' : ['control', 'subckt'],
'args' : ['.subckt', 'a1', 'a2', 'a3'],
'kwargs' : OrderedDict(),
'comment': ''
}
def test_element_kwargs1(self):
tokens = 'mxy a1 a2 kw1=v1'.split()
parsed = spice.Reader._parse(tokens)
assert parsed == {'type' : ['element', 'm'],
'args' : ['mxy', 'a1', 'a2'],
'kwargs' : OrderedDict(kw1='v1'),
'comment': ''
}
def test_element_kwargs2(self):
tokens = 'mxy a1 a2 kw1=v1 kw2=v2'.split()
parsed = spice.Reader._parse(tokens)
assert parsed == {'type' : ['element', 'm'],
'args' : ['mxy', 'a1', 'a2'],
'kwargs' : OrderedDict(kw1='v1', kw2='v2'),
'comment': ''
}
def test_control_kwargs2(self):
tokens = '.subckt a1 a2 kw1=v1 kw2=v2'.split()
parsed = spice.Reader._parse(tokens)
assert parsed == {'type' : ['control', 'subckt'],
'args' : ['.subckt', 'a1', 'a2'],
'kwargs' : OrderedDict(kw1='v1', kw2='v2'),
'comment': ''
}
def test_control_kwargs_only(self):
tokens = '.subckt kw1=v1 kw2=v2'.split()
parsed = spice.Reader._parse(tokens)
assert parsed == {'type' : ['control', 'subckt'],
'args' : ['.subckt'],
'kwargs' : OrderedDict(kw1='v1', kw2='v2'),
'comment': ''
}
def test_control_kwargs_bad1(self):
tokens = '.subckt kw1=v1 a1 kw2=v2'.split()
with pytest.raises(spice.SyntaxError) as e:
parsed = spice.Reader._parse(tokens)
#assert e.value.message == "unexpected token 'kw2' at pos '6'"
assert e.value.message == "unexpected token 'a1' at pos '2'"
def test_control_kwargs_bad2(self):
tokens = '.subckt kw1=v1 kw2='.split()
with pytest.raises(spice.SyntaxError) as e:
parsed = spice.Reader._parse(tokens)
#assert e.value.message == "unexpected token '=' at pos '6'"
assert e.value.message == "missing parameter value: kw2=?"
def test_control_kwargs_bad3(self):
tokens = '.subckt kw1=v1 kw2'.split()
with pytest.raises(spice.SyntaxError) as e:
parsed = spice.Reader._parse(tokens)
#assert e.value.message == "unexpected token 'kw2' at pos '5'"
assert e.value.message == "unexpected token 'kw2' at pos '2'"
def test_comment_line_skip_true(self):
tokens = '* mxy a1 a2 kw1=v1'.split()
parsed = spice.Reader._parse(tokens)
assert parsed is None
#assert parsed == {'type' : ['comment', '*'],
# 'args' : [],
# 'kwargs' : {},
# 'comment': '* mxy a1 a2 kw1=v1'
# }
def test_comment_line_skip_false(self):
tokens = '* mxy a1 a2 kw1=v1'.split()
parsed = spice.Reader._parse(tokens, skipcomments=False)
assert parsed == {'type' : ['comment', '*'],
'args' : [],
'kwargs' : {},
'comment': '* mxy a1 a2 kw1=v1'
}
def test_trailing_comment_skip_true(self):
tokens = 'mxy a1 a2 kw1=v1 $ c1 c2'.split()
parsed = spice.Reader._parse(tokens)
assert parsed == {'type' : ['element', 'm'],
'args' : ['mxy', 'a1', 'a2'],
'kwargs' : OrderedDict(kw1='v1'),
'comment': '$ c1 c2'
}
def test_trailing_comment_skip_false(self):
tokens = 'mxy a1 a2 kw1=v1 $ c1 c2'.split()
parsed = spice.Reader._parse(tokens, skipcomments=False)
assert parsed == {'type' : ['element', 'm'],
'args' : ['mxy', 'a1', 'a2'],
'kwargs' : OrderedDict(kw1='v1'),
'comment': '$ c1 c2'
}
def test_spice_units1(self):
tokens = 'mxy a1 a2 kw1=1.0e-15 kw2=1ff'.split()
parsed = spice.Reader._parse(tokens)
assert parsed == {'type' : ['element', 'm'],
'args' : ['mxy', 'a1', 'a2'],
'kwargs' : OrderedDict(kw1='1.0e-15', kw2='1e-15'),
'comment': ''
}
def test_spice_units2(self):
tokens = 'mxy a1 a2 kw1=1.0p kw2=(1m*1p)+1e-15'.split()
parsed = spice.Reader._parse(tokens)
assert parsed == {'type' : ['element', 'm'],
'args' : ['mxy', 'a1', 'a2'],
'kwargs' : OrderedDict(kw1='1e-12',
kw2='(0.001*1e-12)+1e-15'),
'comment': ''
}
class TestSpiceMacromodel:
def test_simple(self):
f = StringIO(dedent(
"""\
.macromodel nch_mac nmos d g s b w=1 l=1
+ cg="w * l * 0.05" $ gate cap (F)
"""))
f.name = "<string>"
ckt = Ckt()
ckt.read_spice(f)
assert ckt.prims.get('nch_mac').name == 'nch_mac'
assert ckt.prims.get('nch_mac').type == 'nmos'
assert ckt.prims.get('nch_mac').portnames == ['d', 'g', 's', 'b']
class TestCktObj:
def test_name(self):
obj = core.CktObj(name="myname")
assert obj.name == "myname"
assert obj.container is None
class TestCktObjContainer:
def test_add(self):
objcont = core.CktObjContainer(objtype=core.CktObj, owner="myowner")
obj = objcont.add(name="myname")
assert obj.name == "myname"
assert obj.container.owner is "myowner"
def test_addobj(self):
objcont = core.CktObjContainer(objtype=core.CktObj, owner="myowner")
obj = core.CktObj(name="myname")
assert objcont.addobj(obj) is obj
assert obj.container.owner is "myowner"
def test_addobj_noname(self):
objcont = core.CktObjContainer(objtype=core.CktObj, owner="myowner")
obj = core.CktObj(name=None)
with pytest.raises(core.CktObjValueError) as e:
objcont.addobj(obj)
def test_addobj_badobj(self):
objcont = core.CktObjContainer(objtype=core.CktObj, owner="myowner")
class BadCktObj: pass
badobj = BadCktObj()
with pytest.raises(core.CktObjTypeError) as e:
objcont.addobj(badobj)
def test_get(self):
objcont = core.CktObjContainer(objtype=core.CktObj, owner="myowner")
obj1 = objcont.add(name="name1")
obj2 = objcont.add(name="name2")
assert objcont.get("name1") is obj1
assert objcont.get("name2") is obj2
def test_get_missing(self):
objcont = core.CktObjContainer(objtype=core.CktObj, owner="myowner")
obj1 = objcont.add(name="name1")
with pytest.raises(core.CktObjDoesNotExist) as e:
objcont.get("name2")
def test_get_default(self):
objcont = core.CktObjContainer(objtype=core.CktObj, owner="myowner")
obj1 = objcont.add(name="name1")
assert objcont.get_default("name2") is None
assert objcont.get_default("name2", obj1) is obj1
def test_all(self):
objcont = core.CktObjContainer(objtype=core.CktObj, owner="myowner")
obj1 = objcont.add(name="name1")
obj2 = objcont.add(name="name2")
assert list(objcont.all()) == [obj1, obj2]
def test_filter(self):
objcont = core.CktObjContainer(objtype=core.CktObj, owner="myowner")
obj1 = objcont.add(name="name1")
obj2 = objcont.add(name="name2")
objx1 = objcont.add(name="obx1")
objx2 = objcont.add(name="objx2")
assert list(objcont.filter(name="name.*")) == [obj1, obj2]
assert list(objcont.filter(name=".*x.")) == [objx1, objx2]
assert list(objcont.filter(name=".*1")) == [obj1, objx1]
class TestL0HierarchicalParams:
def make_ckt(self):
f = StringIO(dedent(
"""\
.macromodel pch_mac pmos d g s b m=1 cg="m*w*l*0.05"
.macromodel nch_mac nmos d g s b m=1 cg="m*w*l*0.05"
.subckt pinv a y vdd vss w=2 l=2.0
xmp y a vdd vdd pch_mac w="2*W" l=1.0
xmn y a vss vss nch_mac W=w l=1.0
.ends
"""))
f.name = "<string>"
ckt = Ckt()
ckt.read_spice(f)
return ckt
def test_param_value(self):
ckt = self.make_ckt()
pinv = ckt.get_cell('pinv')
assert pinv.get_param('w').value == '2'
assert pinv.get_param('l').value == '2.0'
xmp = pinv.get_instance('mp')
assert xmp.get_param('w').value == '2*w'
assert xmp.get_param('l').value == '1.0'
xmn = pinv.get_instance('mn')
assert xmn.get_param('w').value == 'w'
assert xmn.get_param('l').value == '1.0'
def test_cell_param_eval(self):
ckt = self.make_ckt()
pinv = ckt.get_cell('pinv')
assert pinv.eval_param('w') == 2.0
assert pinv.eval_param('l') == 2.0
def test_inst_param_eval(self):
ckt = self.make_ckt()
pinv = ckt.get_cell('pinv')
pinv.link()
xmp = pinv.get_instance('mp')
assert xmp.eval_param('w') == 4.0
assert xmp.eval_param('l') == 1.0
xmn = pinv.get_instance('mn')
assert xmn.eval_param('w') == 2.0
assert xmn.eval_param('l') == 1.0
def test_ref_param_eval(self):
ckt = self.make_ckt()
pinv = ckt.get_cell('pinv')
pinv.link()
xmp = pinv.get_instance('mp')
assert xmp.eval_ref_param('w') == 4.0
assert xmp.eval_ref_param('l') == 1.0
assert xmp.ref.get_param('cg').value == 'm*w*l*0.05'
assert xmp.eval_ref_param('cg') == 0.2
xmn = pinv.get_instance('mn')
assert xmn.eval_ref_param('w') == 2.0
assert xmn.eval_ref_param('l') == 1.0
assert xmn.ref.get_param('cg').value == 'm*w*l*0.05'
assert xmn.eval_ref_param('cg') == 0.1
class TestL1HierarchicalParams:
def make_ckt(self):
f = StringIO(dedent(
"""\
.macromodel pch_mac pmos d g s b m=1 cg="m*w*l*0.05"
.macromodel nch_mac nmos d g s b m=1 cg="m*w*l*0.05"
.subckt pinv a y vdd vss w=2 l=2.0
xmp y a vdd vdd pch_mac w="2*W" l=1.0
xmn y a vss vss nch_mac W=w l=1.0
.ends
.subckt buf a y vdd vss
xi1 a n vdd vss pinv
xi2 n y vdd vss pinv
.ends
"""))
f.name = "<string>"
ckt = Ckt()
ckt.read_spice(f)
return ckt
def test_non_hier_ref_param_eval(self):
ckt = self.make_ckt()
pinv = ckt.get_cell('pinv')
pinv.link()
xmp = pinv.get_instance('mp')
assert xmp.ref.get_param('cg').value == 'm*w*l*0.05'
assert xmp.eval_ref_param('cg') == 0.2
xmn = pinv.get_instance('mn')
assert xmn.ref.get_param('cg').value == 'm*w*l*0.05'
assert xmn.eval_ref_param('cg') == 0.1
def test_hier_ref_param_eval(self):
ckt = self.make_ckt()
buf = ckt.get_cell('buf')
buf.link()
xi1 = buf.get_instance('i1')
assert xi1.ref.get_param('w').value == '2'
assert xi1.eval_ref_param('w') == 2.0
assert xi1.eval_ref_param('l') == 2.0
xi2 = buf.get_instance('i2')
assert xi2.ref.get_param('w').value == '2'
assert xi2.eval_ref_param('w') == 2.0
assert xi1.eval_ref_param('l') == 2.0
def test_hier_flatten_param_eval(self):
ckt = self.make_ckt()
buf = ckt.get_cell('buf')
buf.link()
buf.ungroup(flatten=True)
xi1_mp = buf.get_instance('i1/mp')
assert xi1_mp.get_param('w').value == '2*w'
assert xi1_mp.eval_ref_param('w') == 4.0
assert xi1_mp.eval_ref_param('l') == 1.0
assert xi1_mp.eval_ref_param('cg') == 0.2
xi1_mn = buf.get_instance('i1/mn')
assert xi1_mn.get_param('w').value == 'w'
assert xi1_mn.eval_ref_param('w') == 2.0
assert xi1_mn.eval_ref_param('l') == 1.0
assert xi1_mn.eval_ref_param('cg') == 0.1
xi2_mp = buf.get_instance('i2/mp')
assert xi2_mp.get_param('w').value == '2*w'
assert xi2_mp.eval_ref_param('w') == 4.0
assert xi2_mp.eval_ref_param('l') == 1.0
assert xi2_mp.eval_ref_param('cg') == 0.2
xi2_mn = buf.get_instance('i2/mn')
assert xi2_mn.get_param('w').value == 'w'
assert xi2_mn.eval_ref_param('w') == 2.0
assert xi2_mn.eval_ref_param('l') == 1.0
assert xi2_mn.eval_ref_param('cg') == 0.1
class TestL2HierarchicalParams:
def make_ckt(self):
f = StringIO(dedent(
"""\
.macromodel pch_mac pmos d g s b m=1 cg="m*w*l*0.05"
.macromodel nch_mac nmos d g s b m=1 cg="m*w*l*0.05"
.subckt pinv a y vdd vss w=2 l=2.0
xmp y a vdd vdd pch_mac w="2*W" l=1.0
xmn y a vss vss nch_mac W=w l=1.0
.ends
.subckt buf a y vdd vss
xi1 a n vdd vss pinv
xi2 n y vdd vss pinv w=3 l=3
xi3 n y vdd vss pinv w=5 l=5
.ends
"""))
f.name = "<string>"
ckt = Ckt()
ckt.read_spice(f)
return ckt
def test_hier_flatten_param_eval(self):
ckt = self.make_ckt()
buf = ckt.get_cell('buf')
buf.link()
buf.ungroup(flatten=True)
xi1_mp = buf.get_instance('i1/mp')
assert xi1_mp.get_param('w').value == '2*w'
assert xi1_mp.eval_ref_param('w') == 4.0
assert xi1_mp.eval_ref_param('l') == 1.0
assert xi1_mp.eval_ref_param('cg') == 0.2
xi1_mn = buf.get_instance('i1/mn')
assert xi1_mn.get_param('w').value == 'w'
assert xi1_mn.eval_ref_param('w') == 2.0
assert xi1_mn.eval_ref_param('l') == 1.0
assert xi1_mn.eval_ref_param('cg') == 0.1
xi2_mp = buf.get_instance('i2/mp')
assert xi2_mp.get_param('w').value == '2*w'
assert xi2_mp.eval_ref_param('w') == 6.0
assert xi2_mp.eval_ref_param('l') == 1.0
assert abs(xi2_mp.eval_ref_param('cg') - 0.3) < 1e-6
xi2_mn = buf.get_instance('i2/mn')
assert xi2_mn.get_param('w').value == 'w'
assert xi2_mn.eval_ref_param('w') == 3.0
assert xi2_mn.eval_ref_param('l') == 1.0
assert abs(xi2_mn.eval_ref_param('cg') - 0.15) < 1e-6
xi3_mp = buf.get_instance('i3/mp')
assert xi3_mp.get_param('w').value == '2*w'
assert xi3_mp.eval_ref_param('w') == 10.0
assert xi3_mp.eval_ref_param('l') == 1.0
assert abs(xi3_mp.eval_ref_param('cg') - 0.5) < 1e-6
xi3_mn = buf.get_instance('i3/mn')
assert xi3_mn.get_param('w').value == 'w'
assert xi3_mn.eval_ref_param('w') == 5.0
assert xi3_mn.eval_ref_param('l') == 1.0
assert abs(xi3_mn.eval_ref_param('cg') - 0.25) < 1e-6
class TestL3HierarchicalParams:
def make_ckt(self):
f = StringIO(dedent(
"""\
.macromodel pch_mac pmos d g s b m=1
+cga='1fF/(1um * 20nm)'
+cg="m * w * l * cga"
.macromodel nch_mac nmos d g s b m=1
+cga='1fF/(1um * 20nm)'
+cg="m * w * l * cga"
.subckt pinv a y vdd vss w=2 l=2.0
xmp y a vdd vdd pch_mac w="2*W" l=1.0
xmn y a vss vss nch_mac W=w l=1.0
.ends
.subckt buf a y vdd vss w1=0 w2=3 w3=5
xi1 a n vdd vss pinv
xi2 n y vdd vss pinv w='(w2)'
xi3 n y vdd vss pinv w=w3 l=5
.ends
"""))
f.name = "<string>"
ckt = Ckt()
ckt.read_spice(f)
return ckt
def test_hier_flatten_param_eval(self):
ckt = self.make_ckt()
buf = ckt.get_cell('buf')
buf.link()
buf.ungroup(flatten=True)
xi1_mp = buf.get_instance('i1/mp')
assert xi1_mp.get_param('w').value == '2*w'
assert xi1_mp.eval_ref_param('w') == 4.0
assert xi1_mp.eval_ref_param('l') == 1.0
assert xi1_mp.eval_ref_param('cg') == 0.2
xi1_mn = buf.get_instance('i1/mn')
assert xi1_mn.get_param('w').value == 'w'
assert xi1_mn.eval_ref_param('w') == 2.0
assert xi1_mn.eval_ref_param('l') == 1.0
assert xi1_mn.eval_ref_param('cg') == 0.1
xi2_mp = buf.get_instance('i2/mp')
assert xi2_mp.get_param('w').value == '2*w'
assert xi2_mp.eval_ref_param('w') == 6.0
assert xi2_mp.eval_ref_param('l') == 1.0
assert abs(xi2_mp.eval_ref_param('cg') - 0.3) < 1e-6
xi2_mn = buf.get_instance('i2/mn')
assert xi2_mn.get_param('w').value == 'w'
assert xi2_mn.eval_ref_param('w') == 3.0
assert xi2_mn.eval_ref_param('l') == 1.0
assert abs(xi2_mn.eval_ref_param('cg') - 0.15) < 1e-6
xi3_mp = buf.get_instance('i3/mp')
assert xi3_mp.get_param('w').value == '2*w'
assert xi3_mp.eval_ref_param('w') == 10.0
assert xi3_mp.eval_ref_param('l') == 1.0
assert abs(xi3_mp.eval_ref_param('cg') - 0.5) < 1e-6
xi3_mn = buf.get_instance('i3/mn')
assert xi3_mn.get_param('w').value == 'w'
assert xi3_mn.eval_ref_param('w') == 5.0
assert xi3_mn.eval_ref_param('l') == 1.0
assert abs(xi3_mn.eval_ref_param('cg') - 0.25) < 1e-6
class TestL4HierarchicalParams:
def make_ckt(self):
f = StringIO(dedent(
"""\
.macromodel pch_mac pmos d g s b m=1
+cga='1fF/(1um * 20nm)'
+cg="m * w * l * cga"
.macromodel nch_mac nmos d g s b m=1
+cga='1fF/(1um * 20nm)'
+cg="m * w * l * cga"
.subckt pinv a y vdd vss wp=1 wn=1
xmp y a vdd vdd pch_mac w=wp l=1
xmn y a vss vss nch_mac W=wn l=1
.ends
.subckt inv a y vdd vss wp=1 wn=1
xi0 a y vdd vss pinv wp=wp wn=wn
.ends
.subckt buf a y vdd vss wp=2 wn=2
xi0 a n vdd vss inv wp=wp wn=wn
xi1 n y vdd vss inv wp="2*wp" wn="2*wn"
.ends
"""))
f.name = "<string>"
ckt = Ckt()
ckt.read_spice(f)
return ckt
def test_hier_flatten_param_eval(self):
ckt = self.make_ckt()
ckt.link()
buf = ckt.get_cell('buf')
buf.ungroup(flatten=True)
i0_i0_mp = buf.get_instance('i0/i0/mp')
assert i0_i0_mp.eval_ref_param('w') == 2.0
i1_i0_mp = buf.get_instance('i1/i0/mp')
assert i1_i0_mp.eval_ref_param('w') == 4.0
| r-rathi/ckt-apps | cktapps/tests/test_cktapps.py | Python | mit | 24,777 | [
"MacroModel"
] | 545151c424ab8e40c147a29806c543d175dba98321b4cf521a0991df75aeca20 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import numpy as np
import warnings
from math import ceil
from math import cos
from math import sin
from math import tan
from math import pi
from warnings import warn
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
"""
Created on March 25, 2013
@author: geoffroy
"""
class HighSymmKpath(object):
"""
This class looks for path along high symmetry lines in
the Brillouin Zone.
It is based on Setyawan, W., & Curtarolo, S. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
It should be used with primitive structures that
comply with the definition from the paper.
The symmetry is determined by spglib through the
SpacegroupAnalyzer class. The analyzer can be used to
produce the correct primitive structure (method
get_primitive_standard_structure(international_monoclinic=False)).
A warning will signal possible compatibility problems
with the given structure.
Args:
structure (Structure): Structure object
symprec (float): Tolerance for symmetry finding
angle_tolerance (float): Angle tolerance for symmetry finding.
atol (float): Absolute tolerance used to compare the input
structure with the one expected as primitive standard.
A warning will be issued if the lattices don't match.
"""
def __init__(self, structure, symprec=0.01, angle_tolerance=5, atol=1e-8):
self._structure = structure
self._sym = SpacegroupAnalyzer(structure, symprec=symprec,
angle_tolerance=angle_tolerance)
self._prim = self._sym\
.get_primitive_standard_structure(international_monoclinic=False)
self._conv = self._sym.get_conventional_standard_structure(international_monoclinic=False)
self._prim_rec = self._prim.lattice.reciprocal_lattice
self._kpath = None
#Note: this warning will be issued for space groups 38-41, since the primitive cell must be
#reformatted to match Setyawan/Curtarolo convention in order to work with the current k-path
#generation scheme.
if not np.allclose(self._structure.lattice.matrix, self._prim.lattice.matrix, atol=atol):
warnings.warn("The input structure does not match the expected standard primitive! "
"The path can be incorrect. Use at your own risk.")
lattice_type = self._sym.get_lattice_type()
<<<<<<< HEAD
spg_symbol = self._sym.get_spacegroup_symbol()
=======
spg_symbol = self._sym.get_space_group_symbol()
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
if lattice_type == "cubic":
if "P" in spg_symbol:
self._kpath = self.cubic()
elif "F" in spg_symbol:
self._kpath = self.fcc()
elif "I" in spg_symbol:
self._kpath = self.bcc()
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "tetragonal":
if "P" in spg_symbol:
self._kpath = self.tet()
elif "I" in spg_symbol:
a = self._conv.lattice.abc[0]
c = self._conv.lattice.abc[2]
if c < a:
self._kpath = self.bctet1(c, a)
else:
self._kpath = self.bctet2(c, a)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "orthorhombic":
a = self._conv.lattice.abc[0]
b = self._conv.lattice.abc[1]
c = self._conv.lattice.abc[2]
if "P" in spg_symbol:
self._kpath = self.orc()
elif "F" in spg_symbol:
if 1 / a ** 2 > 1 / b ** 2 + 1 / c ** 2:
self._kpath = self.orcf1(a, b, c)
elif 1 / a ** 2 < 1 / b ** 2 + 1 / c ** 2:
self._kpath = self.orcf2(a, b, c)
else:
self._kpath = self.orcf3(a, b, c)
elif "I" in spg_symbol:
self._kpath = self.orci(a, b, c)
elif "C" in spg_symbol or "A" in spg_symbol:
self._kpath = self.orcc(a, b, c)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "hexagonal":
self._kpath = self.hex()
elif lattice_type == "rhombohedral":
alpha = self._prim.lattice.lengths_and_angles[1][0]
if alpha < 90:
self._kpath = self.rhl1(alpha * pi / 180)
else:
self._kpath = self.rhl2(alpha * pi / 180)
elif lattice_type == "monoclinic":
a, b, c = self._conv.lattice.abc
alpha = self._conv.lattice.lengths_and_angles[1][0]
#beta = self._conv.lattice.lengths_and_angles[1][1]
if "P" in spg_symbol:
self._kpath = self.mcl(b, c, alpha * pi / 180)
elif "C" in spg_symbol:
kgamma = self._prim_rec.lengths_and_angles[1][2]
if kgamma > 90:
self._kpath = self.mclc1(a, b, c, alpha * pi / 180)
if kgamma == 90:
self._kpath = self.mclc2(a, b, c, alpha * pi / 180)
if kgamma < 90:
if b * cos(alpha * pi / 180) / c\
+ b ** 2 * sin(alpha) ** 2 / a ** 2 < 1:
self._kpath = self.mclc3(a, b, c, alpha * pi / 180)
if b * cos(alpha * pi / 180) / c \
+ b ** 2 * sin(alpha) ** 2 / a ** 2 == 1:
self._kpath = self.mclc4(a, b, c, alpha * pi / 180)
if b * cos(alpha * pi / 180) / c \
+ b ** 2 * sin(alpha) ** 2 / a ** 2 > 1:
self._kpath = self.mclc5(a, b, c, alpha * pi / 180)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "triclinic":
kalpha = self._prim_rec.lengths_and_angles[1][0]
kbeta = self._prim_rec.lengths_and_angles[1][1]
kgamma = self._prim_rec.lengths_and_angles[1][2]
if kalpha > 90 and kbeta > 90 and kgamma > 90:
self._kpath = self.tria()
if kalpha < 90 and kbeta < 90 and kgamma < 90:
self._kpath = self.trib()
if kalpha > 90 and kbeta > 90 and kgamma == 90:
self._kpath = self.tria()
if kalpha < 90 and kbeta < 90 and kgamma == 90:
self._kpath = self.trib()
else:
warn("Unknown lattice type %s" % lattice_type)
@property
def structure(self):
"""
Returns:
The standardized primitive structure
"""
return self._prim
@property
def conventional(self):
"""
Returns:
The conventional cell structure
"""
return self._conv
@property
def prim(self):
"""
Returns:
The primitive cell structure
"""
return self._prim
@property
def prim_rec(self):
"""
Returns:
The primitive reciprocal cell structure
"""
return self._prim_rec
@property
def kpath(self):
"""
Returns:
The symmetry line path in reciprocal space
"""
return self._kpath
def get_kpoints(self, line_density=20, coords_are_cartesian=True):
"""
Returns:
the kpoints along the paths in cartesian coordinates
together with the labels for symmetry points -Wei
"""
list_k_points = []
sym_point_labels = []
for b in self.kpath['path']:
for i in range(1, len(b)):
start = np.array(self.kpath['kpoints'][b[i - 1]])
end = np.array(self.kpath['kpoints'][b[i]])
distance = np.linalg.norm(
self._prim_rec.get_cartesian_coords(start) -
self._prim_rec.get_cartesian_coords(end))
nb = int(ceil(distance * line_density))
sym_point_labels.extend([b[i - 1]] + [''] * (nb - 1) + [b[i]])
list_k_points.extend(
[self._prim_rec.get_cartesian_coords(start)
+ float(i) / float(nb) *
(self._prim_rec.get_cartesian_coords(end)
- self._prim_rec.get_cartesian_coords(start))
for i in range(0, nb + 1)])
if coords_are_cartesian:
return list_k_points, sym_point_labels
else:
frac_k_points = [self._prim_rec.get_fractional_coords(k)
for k in list_k_points]
return frac_k_points, sym_point_labels
def cubic(self):
self.name = "CUB"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'X': np.array([0.0, 0.5, 0.0]),
'R': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.5, 0.0])}
path = [["\\Gamma", "X", "M", "\\Gamma", "R", "X"], ["M", "R"]]
return {'kpoints': kpoints, 'path': path}
def fcc(self):
self.name = "FCC"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'K': np.array([3.0 / 8.0, 3.0 / 8.0, 3.0 / 4.0]),
'L': np.array([0.5, 0.5, 0.5]),
'U': np.array([5.0 / 8.0, 1.0 / 4.0, 5.0 / 8.0]),
'W': np.array([0.5, 1.0 / 4.0, 3.0 / 4.0]),
'X': np.array([0.5, 0.0, 0.5])}
path = [["\\Gamma", "X", "W", "K",
"\\Gamma", "L", "U", "W", "L", "K"], ["U", "X"]]
return {'kpoints': kpoints, 'path': path}
def bcc(self):
self.name = "BCC"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'H': np.array([0.5, -0.5, 0.5]),
'P': np.array([0.25, 0.25, 0.25]),
'N': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "H", "N", "\\Gamma", "P", "H"], ["P", "N"]]
return {'kpoints': kpoints, 'path': path}
def tet(self):
self.name = "TET"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.5, 0.0]),
'R': np.array([0.0, 0.5, 0.5]),
'X': np.array([0.0, 0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "X", "M", "\\Gamma", "Z", "R", "A", "Z"], ["X", "R"],
["M", "A"]]
return {'kpoints': kpoints, 'path': path}
def bctet1(self, c, a):
self.name = "BCT1"
eta = (1 + c ** 2 / a ** 2) / 4.0
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'M': np.array([-0.5, 0.5, 0.5]),
'N': np.array([0.0, 0.5, 0.0]),
'P': np.array([0.25, 0.25, 0.25]),
'X': np.array([0.0, 0.0, 0.5]),
'Z': np.array([eta, eta, -eta]),
'Z_1': np.array([-eta, 1 - eta, eta])}
path = [["\\Gamma", "X", "M", "\\Gamma", "Z", "P", "N", "Z_1", "M"],
["X", "P"]]
return {'kpoints': kpoints, 'path': path}
def bctet2(self, c, a):
self.name = "BCT2"
eta = (1 + a ** 2 / c ** 2) / 4.0
zeta = a ** 2 / (2 * c ** 2)
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'N': np.array([0.0, 0.5, 0.0]),
'P': np.array([0.25, 0.25, 0.25]),
'\\Sigma': np.array([-eta, eta, eta]),
'\\Sigma_1': np.array([eta, 1 - eta, -eta]),
'X': np.array([0.0, 0.0, 0.5]),
'Y': np.array([-zeta, zeta, 0.5]),
'Y_1': np.array([0.5, 0.5, -zeta]),
'Z': np.array([0.5, 0.5, -0.5])}
path = [["\\Gamma", "X", "Y", "\\Sigma", "\\Gamma", "Z",
"\\Sigma_1", "N", "P", "Y_1", "Z"], ["X", "P"]]
return {'kpoints': kpoints, 'path': path}
def orc(self):
self.name = "ORC"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'R': np.array([0.5, 0.5, 0.5]),
'S': np.array([0.5, 0.5, 0.0]),
'T': np.array([0.0, 0.5, 0.5]),
'U': np.array([0.5, 0.0, 0.5]),
'X': np.array([0.5, 0.0, 0.0]),
'Y': np.array([0.0, 0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "X", "S", "Y", "\\Gamma",
"Z", "U", "R", "T", "Z"], ["Y", "T"], ["U", "X"], ["S", "R"]]
return {'kpoints': kpoints, 'path': path}
def orcf1(self, a, b, c):
self.name = "ORCF1"
zeta = (1 + a ** 2 / b ** 2 - a ** 2 / c ** 2) / 4
eta = (1 + a ** 2 / b ** 2 + a ** 2 / c ** 2) / 4
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5 + zeta, zeta]),
'A_1': np.array([0.5, 0.5 - zeta, 1 - zeta]),
'L': np.array([0.5, 0.5, 0.5]),
'T': np.array([1, 0.5, 0.5]),
'X': np.array([0.0, eta, eta]),
'X_1': np.array([1, 1 - eta, 1 - eta]),
'Y': np.array([0.5, 0.0, 0.5]),
'Z': np.array([0.5, 0.5, 0.0])}
path = [["\\Gamma", "Y", "T", "Z", "\\Gamma", "X", "A_1", "Y"],
["T", "X_1"], ["X", "A", "Z"], ["L", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def orcf2(self, a, b, c):
self.name = "ORCF2"
phi = (1 + c ** 2 / b ** 2 - c ** 2 / a ** 2) / 4
eta = (1 + a ** 2 / b ** 2 - a ** 2 / c ** 2) / 4
delta = (1 + b ** 2 / a ** 2 - b ** 2 / c ** 2) / 4
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'C': np.array([0.5, 0.5 - eta, 1 - eta]),
'C_1': np.array([0.5, 0.5 + eta, eta]),
'D': np.array([0.5 - delta, 0.5, 1 - delta]),
'D_1': np.array([0.5 + delta, 0.5, delta]),
'L': np.array([0.5, 0.5, 0.5]),
'H': np.array([1 - phi, 0.5 - phi, 0.5]),
'H_1': np.array([phi, 0.5 + phi, 0.5]),
'X': np.array([0.0, 0.5, 0.5]),
'Y': np.array([0.5, 0.0, 0.5]),
'Z': np.array([0.5, 0.5, 0.0])}
path = [["\\Gamma", "Y", "C", "D", "X", "\\Gamma",
"Z", "D_1", "H", "C"], ["C_1", "Z"], ["X", "H_1"], ["H", "Y"],
["L", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def orcf3(self, a, b, c):
self.name = "ORCF3"
zeta = (1 + a ** 2 / b ** 2 - a ** 2 / c ** 2) / 4
eta = (1 + a ** 2 / b ** 2 + a ** 2 / c ** 2) / 4
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5 + zeta, zeta]),
'A_1': np.array([0.5, 0.5 - zeta, 1 - zeta]),
'L': np.array([0.5, 0.5, 0.5]),
'T': np.array([1, 0.5, 0.5]),
'X': np.array([0.0, eta, eta]),
'X_1': np.array([1, 1 - eta, 1 - eta]),
'Y': np.array([0.5, 0.0, 0.5]),
'Z': np.array([0.5, 0.5, 0.0])}
path = [["\\Gamma", "Y", "T", "Z", "\\Gamma", "X", "A_1", "Y"],
["X", "A", "Z"], ["L", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def orci(self, a, b, c):
self.name = "ORCI"
zeta = (1 + a ** 2 / c ** 2) / 4
eta = (1 + b ** 2 / c ** 2) / 4
delta = (b ** 2 - a ** 2) / (4 * c ** 2)
mu = (a ** 2 + b ** 2) / (4 * c ** 2)
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'L': np.array([-mu, mu, 0.5 - delta]),
'L_1': np.array([mu, -mu, 0.5 + delta]),
'L_2': np.array([0.5 - delta, 0.5 + delta, -mu]),
'R': np.array([0.0, 0.5, 0.0]),
'S': np.array([0.5, 0.0, 0.0]),
'T': np.array([0.0, 0.0, 0.5]),
'W': np.array([0.25, 0.25, 0.25]),
'X': np.array([-zeta, zeta, zeta]),
'X_1': np.array([zeta, 1 - zeta, -zeta]),
'Y': np.array([eta, -eta, eta]),
'Y_1': np.array([1 - eta, eta, -eta]),
'Z': np.array([0.5, 0.5, -0.5])}
path = [["\\Gamma", "X", "L", "T", "W", "R", "X_1", "Z",
"\\Gamma", "Y", "S", "W"], ["L_1", "Y"], ["Y_1", "Z"]]
return {'kpoints': kpoints, 'path': path}
def orcc(self, a, b, c):
self.name = "ORCC"
zeta = (1 + a ** 2 / b ** 2) / 4
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([zeta, zeta, 0.5]),
'A_1': np.array([-zeta, 1 - zeta, 0.5]),
'R': np.array([0.0, 0.5, 0.5]),
'S': np.array([0.0, 0.5, 0.0]),
'T': np.array([-0.5, 0.5, 0.5]),
'X': np.array([zeta, zeta, 0.0]),
'X_1': np.array([-zeta, 1 - zeta, 0.0]),
'Y': np.array([-0.5, 0.5, 0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "X", "S", "R", "A", "Z",
"\\Gamma", "Y", "X_1", "A_1", "T", "Y"], ["Z", "T"]]
return {'kpoints': kpoints, 'path': path}
def hex(self):
self.name = "HEX"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.0, 0.0, 0.5]),
'H': np.array([1.0 / 3.0, 1.0 / 3.0, 0.5]),
'K': np.array([1.0 / 3.0, 1.0 / 3.0, 0.0]),
'L': np.array([0.5, 0.0, 0.5]),
'M': np.array([0.5, 0.0, 0.0])}
path = [["\\Gamma", "M", "K", "\\Gamma", "A", "L", "H", "A"], ["L", "M"],
["K", "H"]]
return {'kpoints': kpoints, 'path': path}
def rhl1(self, alpha):
self.name = "RHL1"
eta = (1 + 4 * cos(alpha)) / (2 + 4 * cos(alpha))
nu = 3.0 / 4.0 - eta / 2.0
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'B': np.array([eta, 0.5, 1.0 - eta]),
'B_1': np.array([1.0 / 2.0, 1.0 - eta, eta - 1.0]),
'F': np.array([0.5, 0.5, 0.0]),
'L': np.array([0.5, 0.0, 0.0]),
'L_1': np.array([0.0, 0.0, -0.5]),
'P': np.array([eta, nu, nu]),
'P_1': np.array([1.0 - nu, 1.0 - nu, 1.0 - eta]),
'P_2': np.array([nu, nu, eta - 1.0]),
'Q': np.array([1.0 - nu, nu, 0.0]),
'X': np.array([nu, 0.0, -nu]),
'Z': np.array([0.5, 0.5, 0.5])}
path = [["\\Gamma", "L", "B_1"], ["B", "Z", "\\Gamma", "X"],
["Q", "F", "P_1", "Z"], ["L", "P"]]
return {'kpoints': kpoints, 'path': path}
def rhl2(self, alpha):
self.name = "RHL2"
eta = 1 / (2 * tan(alpha / 2.0) ** 2)
nu = 3.0 / 4.0 - eta / 2.0
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'F': np.array([0.5, -0.5, 0.0]),
'L': np.array([0.5, 0.0, 0.0]),
'P': np.array([1 - nu, -nu, 1 - nu]),
'P_1': np.array([nu, nu - 1.0, nu - 1.0]),
'Q': np.array([eta, eta, eta]),
'Q_1': np.array([1.0 - eta, -eta, -eta]),
'Z': np.array([0.5, -0.5, 0.5])}
path = [["\\Gamma", "P", "Z", "Q", "\\Gamma",
"F", "P_1", "Q_1", "L", "Z"]]
return {'kpoints': kpoints, 'path': path}
def mcl(self, b, c, beta):
self.name = "MCL"
eta = (1 - b * cos(beta) / c) / (2 * sin(beta) ** 2)
nu = 0.5 - eta * c * cos(beta) / b
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5, 0.0]),
'C': np.array([0.0, 0.5, 0.5]),
'D': np.array([0.5, 0.0, 0.5]),
'D_1': np.array([0.5, 0.5, -0.5]),
'E': np.array([0.5, 0.5, 0.5]),
'H': np.array([0.0, eta, 1.0 - nu]),
'H_1': np.array([0.0, 1.0 - eta, nu]),
'H_2': np.array([0.0, eta, -nu]),
'M': np.array([0.5, eta, 1.0 - nu]),
'M_1': np.array([0.5, 1 - eta, nu]),
'M_2': np.array([0.5, 1 - eta, nu]),
'X': np.array([0.0, 0.5, 0.0]),
'Y': np.array([0.0, 0.0, 0.5]),
'Y_1': np.array([0.0, 0.0, -0.5]),
'Z': np.array([0.5, 0.0, 0.0])}
path = [["\\Gamma", "Y", "H", "C", "E", "M_1", "A", "X", "H_1"],
["M", "D", "Z"], ["Y", "D"]]
return {'kpoints': kpoints, 'path': path}
def mclc1(self, a, b, c, alpha):
self.name = "MCLC1"
zeta = (2 - b * cos(alpha) / c) / (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
psi = 0.75 - a ** 2 / (4 * b ** 2 * sin(alpha) ** 2)
phi = psi + (0.75 - psi) * b * cos(alpha) / c
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'F': np.array([1 - zeta, 1 - zeta, 1 - eta]),
'F_1': np.array([zeta, zeta, eta]),
'F_2': np.array([-zeta, -zeta, 1 - eta]),
#'F_3': np.array([1 - zeta, -zeta, 1 - eta]),
'I': np.array([phi, 1 - phi, 0.5]),
'I_1': np.array([1 - phi, phi - 1, 0.5]),
'L': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'X': np.array([1 - psi, psi - 1, 0.0]),
'X_1': np.array([psi, 1 - psi, 0.0]),
'X_2': np.array([psi - 1, -psi, 0.0]),
'Y': np.array([0.5, 0.5, 0.0]),
'Y_1': np.array([-0.5, -0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "Y", "F", "L", "I"], ["I_1", "Z", "F_1"],
["Y", "X_1"], ["X", "\\Gamma", "N"], ["M", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def mclc2(self, a, b, c, alpha):
self.name = "MCLC2"
zeta = (2 - b * cos(alpha) / c) / (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
psi = 0.75 - a ** 2 / (4 * b ** 2 * sin(alpha) ** 2)
phi = psi + (0.75 - psi) * b * cos(alpha) / c
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'F': np.array([1 - zeta, 1 - zeta, 1 - eta]),
'F_1': np.array([zeta, zeta, eta]),
'F_2': np.array([-zeta, -zeta, 1 - eta]),
'F_3': np.array([1 - zeta, -zeta, 1 - eta]),
'I': np.array([phi, 1 - phi, 0.5]),
'I_1': np.array([1 - phi, phi - 1, 0.5]),
'L': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'X': np.array([1 - psi, psi - 1, 0.0]),
'X_1': np.array([psi, 1 - psi, 0.0]),
'X_2': np.array([psi - 1, -psi, 0.0]),
'Y': np.array([0.5, 0.5, 0.0]),
'Y_1': np.array([-0.5, -0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "Y", "F", "L", "I"], ["I_1", "Z", "F_1"],
["N", "\\Gamma", "M"]]
return {'kpoints': kpoints, 'path': path}
def mclc3(self, a, b, c, alpha):
self.name = "MCLC3"
mu = (1 + b ** 2 / a ** 2) / 4.0
delta = b * c * cos(alpha) / (2 * a ** 2)
zeta = mu - 0.25 + (1 - b * cos(alpha) / c)\
/ (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
phi = 1 + zeta - 2 * mu
psi = eta - 2 * delta
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'F': np.array([1 - phi, 1 - phi, 1 - psi]),
'F_1': np.array([phi, phi - 1, psi]),
'F_2': np.array([1 - phi, -phi, 1 - psi]),
'H': np.array([zeta, zeta, eta]),
'H_1': np.array([1 - zeta, -zeta, 1 - eta]),
'H_2': np.array([-zeta, -zeta, 1 - eta]),
'I': np.array([0.5, -0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'X': np.array([0.5, -0.5, 0.0]),
'Y': np.array([mu, mu, delta]),
'Y_1': np.array([1 - mu, -mu, -delta]),
'Y_2': np.array([-mu, -mu, -delta]),
'Y_3': np.array([mu, mu - 1, delta]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "Y", "F", "H", "Z", "I", "F_1"],
["H_1", "Y_1", "X", "\\Gamma", "N"], ["M", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def mclc4(self, a, b, c, alpha):
self.name = "MCLC4"
mu = (1 + b ** 2 / a ** 2) / 4.0
delta = b * c * cos(alpha) / (2 * a ** 2)
zeta = mu - 0.25 + (1 - b * cos(alpha) / c)\
/ (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
phi = 1 + zeta - 2 * mu
psi = eta - 2 * delta
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'F': np.array([1 - phi, 1 - phi, 1 - psi]),
'F_1': np.array([phi, phi - 1, psi]),
'F_2': np.array([1 - phi, -phi, 1 - psi]),
'H': np.array([zeta, zeta, eta]),
'H_1': np.array([1 - zeta, -zeta, 1 - eta]),
'H_2': np.array([-zeta, -zeta, 1 - eta]),
'I': np.array([0.5, -0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'X': np.array([0.5, -0.5, 0.0]),
'Y': np.array([mu, mu, delta]),
'Y_1': np.array([1 - mu, -mu, -delta]),
'Y_2': np.array([-mu, -mu, -delta]),
'Y_3': np.array([mu, mu - 1, delta]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "Y", "F", "H", "Z", "I"],
["H_1", "Y_1", "X", "\\Gamma", "N"], ["M", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def mclc5(self, a, b, c, alpha):
self.name = "MCLC5"
zeta = (b ** 2 / a ** 2 + (1 - b * cos(alpha) / c)
/ sin(alpha) ** 2) / 4
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
mu = eta / 2 + b ** 2 / (4 * a ** 2) \
- b * c * cos(alpha) / (2 * a ** 2)
nu = 2 * mu - zeta
rho = 1 - zeta * a ** 2 / b ** 2
omega = (4 * nu - 1 - b ** 2 * sin(alpha) ** 2 / a ** 2)\
* c / (2 * b * cos(alpha))
delta = zeta * c * cos(alpha) / b + omega / 2 - 0.25
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'F': np.array([nu, nu, omega]),
'F_1': np.array([1 - nu, 1 - nu, 1 - omega]),
'F_2': np.array([nu, nu - 1, omega]),
'H': np.array([zeta, zeta, eta]),
'H_1': np.array([1 - zeta, -zeta, 1 - eta]),
'H_2': np.array([-zeta, -zeta, 1 - eta]),
'I': np.array([rho, 1 - rho, 0.5]),
'I_1': np.array([1 - rho, rho - 1, 0.5]),
'L': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'X': np.array([0.5, -0.5, 0.0]),
'Y': np.array([mu, mu, delta]),
'Y_1': np.array([1 - mu, -mu, -delta]),
'Y_2': np.array([-mu, -mu, -delta]),
'Y_3': np.array([mu, mu - 1, delta]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\\Gamma", "Y", "F", "L", "I"], ["I_1", "Z", "H", "F_1"],
["H_1", "Y_1", "X", "\\Gamma", "N"], ["M", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def tria(self):
self.name = "TRI1a"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'L': np.array([0.5, 0.5, 0.0]),
'M': np.array([0.0, 0.5, 0.5]),
'N': np.array([0.5, 0.0, 0.5]),
'R': np.array([0.5, 0.5, 0.5]),
'X': np.array([0.5, 0.0, 0.0]),
'Y': np.array([0.0, 0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["X", "\\Gamma", "Y"], ["L", "\\Gamma", "Z"],
["N", "\\Gamma", "M"], ["R", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def trib(self):
self.name = "TRI1b"
kpoints = {'\\Gamma': np.array([0.0, 0.0, 0.0]),
'L': np.array([0.5, -0.5, 0.0]),
'M': np.array([0.0, 0.0, 0.5]),
'N': np.array([-0.5, -0.5, 0.5]),
'R': np.array([0.0, -0.5, 0.5]),
'X': np.array([0.0, -0.5, 0.0]),
'Y': np.array([0.5, 0.0, 0.0]),
'Z': np.array([-0.5, 0.0, 0.5])}
path = [["X", "\\Gamma", "Y"], ["L", "\\Gamma", "Z"],
["N", "\\Gamma", "M"], ["R", "\\Gamma"]]
return {'kpoints': kpoints, 'path': path}
| Bismarrck/pymatgen | pymatgen/symmetry/bandstructure.py | Python | mit | 30,212 | [
"pymatgen"
] | ef5e514c9e4e04a80c1d8abe7733af113d233599535b2c684d8ba401c4f85df0 |
import ocl
import camvtk
import time
import vtk
import datetime
import math
def loop_waterline(zh, cutter,s):
range=2
Nmax = 200
yvals = [float(n-float(Nmax)/2)/Nmax*range for n in xrange(0,Nmax+1)]
xvals = [float(n-float(Nmax)/2)/Nmax*range for n in xrange(0,Nmax+1)]
bpc = ocl.BatchPushCutter()
bpc.setXDirection()
bpc.setSTL(s)
bpc.setCutter(cutter)
for y in yvals:
f1 = ocl.Point(-range,y,zh) # start point of fiber
f2 = ocl.Point(+range,y,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
bpc.appendFiber(f)
for x in xvals:
f1 = ocl.Point(x,-range,zh) # start point of fiber
f2 = ocl.Point(x,+range,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
bpc.appendFiber(f)
bpc.run()
clpoints = bpc.getCLPoints()
fibers = bpc.getFibers()
w = ocl.Weave()
print "Weave...",
for f in fibers:
w.addFiber(f)
print "build()...",
w.build()
print "face_traverse()...",
w.face_traverse()
print "done."
return w.getLoops()
def drawLoop(myscreen, w_loop): # draw the loop as a yellow line
previous = 0
for loop in w_loop:
np = 0
for p in loop:
if np is not 0:
myscreen.addActor( camvtk.Line( p1=(previous.x,previous.y, previous.z),
p2=(p.x,p.y,p.z), color=camvtk.yellow) )
np=np+1
previous = p
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
a = ocl.Point(0,1,0.3)
myscreen.addActor(camvtk.Point(center=(a.x,a.y,a.z), color=(1,0,1)))
b = ocl.Point(1,0.5,0.3)
myscreen.addActor(camvtk.Point(center=(b.x,b.y,b.z), color=(1,0,1)))
c = ocl.Point(0,0,0.1)
myscreen.addActor(camvtk.Point(center=(c.x,c.y,c.z), color=(1,0,1)))
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(c.x,c.y,c.z)) )
myscreen.addActor( camvtk.Line(p1=(c.x,c.y,c.z),p2=(b.x,b.y,b.z)) )
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
t = ocl.Triangle(b,c,a)
s = ocl.STLSurf()
s.addTriangle(t) # a one-triangle STLSurf
diameter = 0.4
angle = math.pi/4
length = 5
cutter = ocl.CylCutter(0.3, 5)
#cutter = ocl.BallCutter(0.4, 5)
#cutter = ocl.BullCutter(0.4, 0.1, 5)
#cutter = ocl.ConeCutter(diameter, angle, length)
print cutter
zstart = 0
zend = 0.35
zvals=[]
Nz = 7
for n in xrange(0,Nz):
zvals.append( zstart + n*(zend-zstart)/float(Nz-1) )
for zh in zvals:
print "zh=", zh
w_loop = loop_waterline(zh, cutter,s)
drawLoop( myscreen, w_loop )
print "done."
myscreen.camera.SetPosition(0.5, 3, 2)
myscreen.camera.SetFocalPoint(0.5, 0.5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
| AlanZatarain/opencamlib | scripts/fiber_12_waterline_onetriangle.py | Python | gpl-3.0 | 2,999 | [
"VTK"
] | c201af6f2ed2c44981dce56e2b866356f46dae34e010e770fd9417c7f95c3ae0 |
from __future__ import print_function
import os,sys
import unittest
import math
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit import Chem
from rdkit.Chem import rdMolAlign
from rdkit.Geometry import rdGeometry as geom
from rdkit.Chem import rdShapeHelpers as rdshp
from rdkit.Chem import rdMolTransforms as rdmt
def feq(v1, v2, tol=1.0e-4):
return abs(v1-v2) < tol
class TestCase(unittest.TestCase):
def setUp(self) :
pass
def test1Shape(self):
fileN = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','ShapeHelpers',
'test_data','1oir.mol')
m = Chem.MolFromMolFile(fileN)
rdmt.CanonicalizeMol(m)
dims1, offset1 = rdshp.ComputeConfDimsAndOffset(m.GetConformer())
grd = geom.UniformGrid3D(30.0, 16.0, 10.0)
rdshp.EncodeShape(m, grd, 0);
ovect = grd.GetOccupancyVect()
self.assertTrue(ovect.GetTotalVal() == 9250)
m = Chem.MolFromMolFile(fileN)
trans = rdmt.ComputeCanonicalTransform(m.GetConformer())
dims, offset = rdshp.ComputeConfDimsAndOffset(m.GetConformer(), trans=trans)
dims -= dims1
offset -= offset1;
self.assertTrue(feq(dims.Length(), 0.0))
self.assertTrue(feq(offset.Length(), 0.0))
grd1 = geom.UniformGrid3D(30.0, 16.0, 10.0)
rdshp.EncodeShape(m, grd1, 0, trans)
ovect = grd1.GetOccupancyVect()
self.assertTrue(ovect.GetTotalVal() == 9250)
grd2 = geom.UniformGrid3D(30.0, 16.0, 10.0)
rdshp.EncodeShape(m, grd2, 0)
fileN2 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol','ShapeHelpers',
'test_data','1oir_conf.mol')
m2 = Chem.MolFromMolFile(fileN2)
rmsd = rdMolAlign.AlignMol(m, m2)
self.assertTrue(feq(rdshp.ShapeTanimotoDist(m, m2),0.2813))
dist = rdshp.ShapeTanimotoDist(mol1=m, mol2=m2, confId1=0, confId2=0,
gridSpacing=0.25, stepSize=0.125)
self.assertTrue(feq(dist,0.3021))
m = Chem.MolFromMolFile(fileN)
cpt = rdmt.ComputeCentroid(m.GetConformer())
dims, offset = rdshp.ComputeConfDimsAndOffset(m.GetConformer())
grd = geom.UniformGrid3D(dims.x, dims.y, dims.z,
0.5, DataStructs.DiscreteValueType.TWOBITVALUE,
offset)
dims -= geom.Point3D(13.927, 16.97, 9.775)
offset -= geom.Point3D(-4.353, 16.829, 2.782)
self.assertTrue(feq(dims.Length(), 0.0))
self.assertTrue(feq(offset.Length(), 0.0))
rdshp.EncodeShape(m, grd, 0)
ovect = grd.GetOccupancyVect()
self.assertTrue(ovect.GetTotalVal() == 9275)
geom.WriteGridToFile(grd, '1oir_shape.grd')
m = Chem.MolFromMolFile(fileN)
lc, uc = rdshp.ComputeConfBox(m.GetConformer())
rdmt.CanonicalizeMol(m)
lc1, uc1 = rdshp.ComputeConfBox(m.GetConformer())
lc2, uc2 = rdshp.ComputeUnionBox((lc, uc), (lc1, uc1))
lc -= geom.Point3D(-4.353, 16.829, 2.782)
uc -= geom.Point3D(9.574, 33.799, 12.557)
self.assertTrue(feq(lc.Length(), 0.0))
self.assertTrue(feq(uc.Length(), 0.0))
lc1 -= geom.Point3D(-10.7519, -6.0778, -3.0123)
uc1 -= geom.Point3D(8.7163, 5.3279, 3.1621)
self.assertTrue(feq(lc1.Length(), 0.0))
self.assertTrue(feq(uc1.Length(), 0.0))
lc2 -= geom.Point3D(-10.7519, -6.0778, -3.01226)
uc2 -= geom.Point3D(9.574, 33.799, 12.557)
self.assertTrue(feq(lc2.Length(), 0.0))
self.assertTrue(feq(uc2.Length(), 0.0))
if __name__=='__main__':
print("Testing Shape Helpers wrapper")
unittest.main()
| soerendip42/rdkit | Code/GraphMol/ShapeHelpers/Wrap/testShapeHelpers.py | Python | bsd-3-clause | 3,827 | [
"RDKit"
] | e3008261c8907150d327040a9d70afaa3e4c8311a63aa522f400f206223b2820 |
#!/usr/bin/python
#
# Python script to read bluetooth serial data from USB Smart Chargers
# from http://briandorey.com/post/usb-smart-charger-with-bluetooth-le.aspx
#
# run every 5 mins with cron
# sudo crontab -e
# */5 * * * * python /home/pi/processchargers.py &
#
import urllib2
import serial
import time
# url for processing page
url = 'http://10.0.0.55/charger.aspx?'
#Open named port and set baud rate to 115200
ser = serial.Serial ("/dev/ttyACM0", 115200, timeout=2)
ser.open()
def sendbasiccommand(theinput):
ser.flushInput()
ser.flushOutput()
ser.write(theinput.encode())
return ser.read(512)
# function to send commands to the bluetooth module and return full response from first line
def sendcommand(theinput):
ser.flushInput()
ser.flushOutput()
time.sleep(0.25)
ser.write(theinput.encode())
time.sleep(0.25)
buffer_string = ''
while True:
buffer_string = buffer_string + ser.read(ser.inWaiting())
if '\n' in buffer_string:
lines = buffer_string.split('\n')
return lines[0]
# function to send data requests to the bluetooth module and return trimmed response
def sendreceivedata(theinput):
counter = 0
theinput = theinput + '\r'
try:
ser.flushInput()
ser.flushOutput()
ser.write(theinput.encode())
time.sleep(0.1)
buffer_string = ''
while True:
buffer_string = buffer_string + ser.read(ser.inWaiting())
if '\r' in buffer_string:
lines = buffer_string.split('\r')
return lines[0].replace(".","").replace("R,","").replace("\r,","")
counter = counter + 1
if counter > 100:
break
except:
return "Error with data"
def getChargerData(chargerid):
datastr = ''
chargepower = 0
if 'AOK' in sendcommand("E,0," + chargerid + "\r"):
print 'connected'
time.sleep(0.25)
try:
chargepower = float(sendreceivedata("CHR,001A"))
except ValueError:
print "Not a float"
print "Power: %s" % chargepower
if chargepower > 10:
datastr += 'chargerid=%s' % chargerid
datastr += '&001A=%s' % sendreceivedata("CHR,001A") # ServiceChargingPower 10
datastr += '&0026=%s' % sendreceivedata("CHR,0026") # ServiceTotalCharge 10
datastr += '&0024=%s' % sendreceivedata("CHR,0024") # ServiceStartTime 14
datastr += '&0028=%s' % sendreceivedata("CHR,0028") # ServiceEndTime 14
#datastr += '&0018=%s' % sendreceivedata("CHR,0018") # ServiceDate 14
#datastr += '&001C=%s' % sendreceivedata("CHR,001C") # ServiceActive 4
#datastr += '&001E=%s' % sendreceivedata("CHR,001E") # ServicePowerDownVoltage 10
#datastr += '&0020=%s' % sendreceivedata("CHR,0020") # ServicePowerUpVoltage 10
#datastr += '&0022=%s' % sendreceivedata("CHR,0022") # ServiceDeviceID 14
print datastr
time.sleep(1)
sendcommand("K\r") #disconnect from device
try:
response = urllib2.urlopen(url + datastr)
html = response.read()
print html
except:
print "Unable to upload data"
else:
sendcommand("K\r") #disconnect from device
else:
print "Error connecting to charger"
getChargerData("001EC01A6644") # brian
print "Charger 1 saved"
#time.sleep(5)
getChargerData("001EC025F3A5") # Andrew
print "Charger 2 saved"
#time.sleep(5)
getChargerData("001EC01A663D") # living room
print "Charger 3 saved"
# test functions to get devices and services
#print sendbasiccommand("f\r") # scan for available devices
#time.sleep(1)
#print sendreceivedata("LC\r") # list available services
ser.close()
print "Finished" | briandorey/USB-Smart-Charger-with-BluetoothLE | serverscripts/processchargers.py | Python | mit | 3,614 | [
"Brian"
] | a214ad61480b2518ac9b3e1bcb347156c1c51f979d61634692640890929407f6 |
'''
File: fibonacci_heap_mod.py
Author: Keith Schwarz (htiek@cs.stanford.edu)
Ported to Python by Dan Stromberg (strombrg@gmail.com)
An implementation of a priority queue backed by a Fibonacci heap, as described
by Fredman and Tarjan. Fibonacci heaps are interesting theoretically because
they have asymptotically good runtime guarantees for many operations. In
particular, insert, peek, and decrease-key all run in amortized O(1) time.
dequeue_min and delete each run in amortized O(lg n) time. This allows
algorithms that rely heavily on decrease-key to gain significant performance
boosts. For example, Dijkstra's algorithm for single-source shortest paths can
be shown to run in O(m + n lg n) using a Fibonacci heap, compared to O(m lg n)
using a standard binary or binomial heap.
Internally, a Fibonacci heap is represented as a circular, doubly-linked list
of trees obeying the min-heap property. Each node stores pointers to its
parent (if any) and some arbitrary child. Additionally, every node stores its
degree (the number of children it has) and whether it is a "marked" node.
Finally, each Fibonacci heap stores a pointer to the tree with the minimum
value.
To insert a node into a Fibonacci heap, a singleton tree is created and merged
into the rest of the trees. The merge operation works by simply splicing
together the doubly-linked lists of the two trees, then updating the min
pointer to be the smaller of the minima of the two heaps. Peeking at the
smallest element can therefore be accomplished by just looking at the min
element. All of these operations complete in O(1) time.
The tricky operations are dequeue_min and decrease_key. dequeue_min works by
removing the root of the tree containing the smallest element, then merging its
children with the topmost roots. Then, the roots are scanned and merged so
that there is only one tree of each degree in the root list. This works by
maintaining a dynamic array of trees, each initially null, pointing to the
roots of trees of each dimension. The list is then scanned and this array is
populated. Whenever a conflict is discovered, the appropriate trees are merged
together until no more conflicts exist. The resulting trees are then put into
the root list. A clever analysis using the potential method can be used to
show that the amortized cost of this operation is O(lg n), see "Introduction to
Algorithms, Second Edition" by Cormen, Rivest, Leiserson, and Stein for more
details.
The other hard operation is decrease_key, which works as follows. First, we
update the key of the node to be the new value. If this leaves the node
smaller than its parent, we're done. Otherwise, we cut the node from its
parent, add it as a root, and then mark its parent. If the parent was already
marked, we cut that node as well, recursively mark its parent, and continue
this process. This can be shown to run in O(1) amortized time using yet
another clever potential function. Finally, given this function, we can
implement delete by decreasing a key to -infinity, then calling dequeue_min to
extract it.
'''
import math
import collections
def merge_lists(one, two):
'''
Utility function which, given two pointers into disjoint circularly-
linked lists, merges the two lists together into one circularly-linked
list in O(1) time. Because the lists may be empty, the return value
is the only pointer that's guaranteed to be to an element of the
resulting list.
This function assumes that one and two are the minimum elements of the
lists they are in, and returns a pointer to whichever is smaller. If
this condition does not hold, the return value is some arbitrary pointer
into the doubly-linked list.
@param one A pointer into one of the two linked lists.
@param two A pointer into the other of the two linked lists.
@return A pointer to the smallest element of the resulting list.
'''
# There are four cases depending on whether the lists are None or not.
# We consider each separately.
if one is None and two is None:
# Both None, resulting list is None.
return None
elif one is not None and two is None:
# Two is None, result is one.
return one
elif one is None and two is not None:
# One is None, result is two.
return two
else:
# Both non-None; actually do the splice.
# This is actually not as easy as it seems. The idea is that we'll
# have two lists that look like this:
#
# +----+ +----+ +----+
# | |--N->|one |--N->| |
# | |<-P--| |<-P--| |
# +----+ +----+ +----+
#
#
# +----+ +----+ +----+
# | |--N->|two |--N->| |
# | |<-P--| |<-P--| |
# +----+ +----+ +----+
#
# And we want to relink everything to get
#
# +----+ +----+ +----+---+
# | |--N->|one | | | |
# | |<-P--| | | |<+ |
# +----+ +----+<-\ +----+ | |
# \ P | |
# N \ N |
# +----+ +----+ \->+----+ | |
# | |--N->|two | | | | |
# | |<-P--| | | | | P
# +----+ +----+ +----+ | |
# ^ | | |
# | +-------------+ |
# +-----------------+
# Cache this since we're about to overwrite it.
one_next = one.m_next
one.m_next = two.m_next
one.m_next.m_prev = one
two.m_next = one_next
two.m_next.m_prev = two
# Return a pointer to whichever's smaller.
if one.m_priority < two.m_priority:
return one
else:
return two
def merge(one, two):
'''
Given two Fibonacci heaps, returns a new Fibonacci heap that contains
all of the elements of the two heaps. Each of the input heaps is
destructively modified by having all its elements removed. You can
continue to use those heaps, but be aware that they will be empty
after this call completes.
@param one The first Fibonacci heap to merge.
@param two The second Fibonacci heap to merge.
@return A new Fibonacci_heap containing all of the elements of both
heaps.
'''
# Create a new Fibonacci_heap to hold the result.
result = Fibonacci_heap()
# Merge the two Fibonacci heap root lists together. This helper function
# also computes the min of the two lists, so we can store the result in
# the m_min field of the new heap.
result.m_min = merge_lists(one.m_min, two.m_min)
# The size of the new heap is the sum of the sizes of the input heaps.
result.m_size = one.m_size + two.m_size
# Clear the old heaps.
one.m_size = two.m_size = 0
one.m_min = None
two.m_min = None
# Return the newly-merged heap.
return result
# In order for all of the Fibonacci heap operations to complete in O(1),
# clients need to have O(1) access to any element in the heap. We make
# this work by having each insertion operation produce a handle to the
# node in the tree. In actuality, this handle is the node itself.
class Entry(object):
# pylint: disable=too-many-instance-attributes
'''Hold an entry in the heap'''
__slots__ = ['m_degree', 'm_is_marked', 'm_parent', 'm_child', 'm_next', 'm_prev', 'm_elem', 'm_priority']
def __init__(self, elem, priority):
# Number of children
self.m_degree = 0
# Whether this node is marked
self.m_is_marked = False
# Parent in the tree, if any.
self.m_parent = None
# Child node, if any.
self.m_child = None
self.m_next = self.m_prev = self
self.m_elem = elem
self.m_priority = priority
def __lt__(self, other):
if self.m_priority < other.m_priority:
return True
else:
if self.m_elem < other.m_elem:
return True
else:
return False
def __eq__(self, other):
if self.m_priority == other.m_priority:
return True
else:
if self.m_elem == other.m_elem:
return True
else:
return False
def __gt__(self, other):
if self.m_priority > other.m_priority:
return True
else:
if self.m_elem > other.m_elem:
return True
else:
return False
def __cmp__(self, other):
if self.__lt__(other):
return -1
elif self.__gt__(other):
return 1
else:
return 0
# def __cmp__(self, other):
# '''
# Comparison method, 2.x style.
# We compare object identity, rather than priority and value
# '''
# if id(self) == id(other):
# return 0
# elif id(self) < id(other):
# return -1
# else:
# return 1
#
# def __lt__(self, other):
# '''Comparison method, 3.x style'''
# if self.__cmp__(other) == -1:
# return True
# else:
# return False
#
# def __eq__(self, other):
# '''Comparison method, 3.x style'''
# if self.__cmp__(other) == 0:
# return True
# else:
# return False
def get_value(self):
'''
Returns the element represented by this heap entry.
@return The element represented by this heap entry.
'''
return self.m_elem
def set_value(self, value):
'''
Sets the element associated with this heap entry.
@param value The element to associate with this heap entry.
'''
self.m_elem = value
def get_priority(self):
'''
Returns the priority of this element.
@return The priority of this element.
'''
return self.m_priority
def _entry(self, elem, priority):
'''
Constructs a new Entry that holds the given element with the indicated
priority.
@param elem The element stored in this node.
@param priority The priority of this element.
'''
self.m_next = self.m_prev = self
self.m_elem = elem
self.m_priority = priority
class Fibonacci_heap(object):
'''
A class representing a Fibonacci heap.
@param T The type of elements to store in the heap.
@author Keith Schwarz (htiek@cs.stanford.edu)
'''
def __init__(self):
# Pointer to the minimum element in the heap.
self.m_min = None
# Cached size of the heap, so we don't have to recompute this explicitly.
self.m_size = 0
def enqueue(self, value, priority):
'''
Inserts the specified element into the Fibonacci heap with the specified
priority. Its priority must be a valid double, so you cannot set the
priority to NaN.
@param value The value to insert.
@param priority Its priority, which must be valid.
@return An Entry representing that element in the tree.
'''
self._check_priority(priority)
# Create the entry object, which is a circularly-linked list of length
# one.
result = Entry(value, priority)
# Merge this singleton list with the tree list.
self.m_min = merge_lists(self.m_min, result)
# Increase the size of the heap; we just added something.
self.m_size += 1
# Return the reference to the new element.
return result
def min(self):
'''
Returns an Entry object corresponding to the minimum element of the
Fibonacci heap, raising an IndexError if the heap is
empty.
@return The smallest element of the heap.
@throws IndexError If the heap is empty.
'''
if not bool(self):
raise IndexError("Heap is empty.")
return self.m_min
def __bool__(self):
'''
Returns whether the heap is nonempty.
@return Whether the heap is nonempty.
'''
if self.m_min is None:
return False
else:
return True
__nonzero__ = __bool__
def __len__(self):
'''
Returns the number of elements in the heap.
@return The number of elements in the heap.
'''
return self.m_size
def dequeue_min(self):
# pylint: disable=too-many-branches
'''
Dequeues and returns the minimum element of the Fibonacci heap. If the
heap is empty, this throws an IndexError.
@return The smallest element of the Fibonacci heap.
@throws IndexError if the heap is empty.
'''
# Check for whether we're empty.
if not bool(self):
raise IndexError("Heap is empty.")
# Otherwise, we're about to lose an element, so decrement the number of
# entries in this heap.
self.m_size -= 1
# Grab the minimum element so we know what to return.
min_elem = self.m_min
# Now, we need to get rid of this element from the list of roots. There
# are two cases to consider. First, if this is the only element in the
# list of roots, we set the list of roots to be None by clearing m_min.
# Otherwise, if it's not None, then we write the elements next to the
# min element around the min element to remove it, then arbitrarily
# reassign the min.
if self.m_min.m_next is self.m_min:
# Case one
self.m_min = None
else:
# Case two
self.m_min.m_prev.m_next = self.m_min.m_next
self.m_min.m_next.m_prev = self.m_min.m_prev
# Arbitrary element of the root list.
self.m_min = self.m_min.m_next
# Next, clear the parent fields of all of the min element's children,
# since they're about to become roots. Because the elements are
# stored in a circular list, the traversal is a bit complex.
if min_elem.m_child is not None:
# Keep track of the first visited node.
curr = min_elem.m_child
while True:
curr.m_parent = None
# Walk to the next node, then stop if this is the node we
# started at.
curr = curr.m_next
if curr is min_elem.m_child:
# This was a do-while (curr != minElem.mChild);
break
# Next, splice the children of the root node into the topmost list,
# then set self.m_min to point somewhere in that list.
self.m_min = merge_lists(self.m_min, min_elem.m_child)
# If there are no entries left, we're done.
if self.m_min is None:
return min_elem
# Next, we need to coalesce all of the roots so that there is only one
# tree of each degree. To track trees of each size, we allocate an
# ArrayList where the entry at position i is either None or the
# unique tree of degree i.
tree_table = collections.deque()
# We need to traverse the entire list, but since we're going to be
# messing around with it we have to be careful not to break our
# traversal order mid-stream. One major challenge is how to detect
# whether we're visiting the same node twice. To do this, we'll
# spent a bit of overhead adding all of the nodes to a list, and
# then will visit each element of this list in order.
to_visit = collections.deque()
# To add everything, we'll iterate across the elements until we
# find the first element twice. We check this by looping while the
# list is empty or while the current element isn't the first element
# of that list.
#for (Entry<T> curr = self.m_min; toVisit.isEmpty() || toVisit.get(0) != curr; curr = curr.m_next)
curr = self.m_min
while not to_visit or to_visit[0] is not curr:
to_visit.append(curr)
curr = curr.m_next
# Traverse this list and perform the appropriate unioning steps.
for curr in to_visit:
# Keep merging until a match arises.
while True:
# Ensure that the list is long enough to hold an element of this
# degree.
while curr.m_degree >= len(tree_table):
tree_table.append(None)
# If nothing's here, we can record that this tree has this size
# and are done processing.
if tree_table[curr.m_degree] is None:
tree_table[curr.m_degree] = curr
break
# Otherwise, merge with what's there.
other = tree_table[curr.m_degree]
# Clear the slot
tree_table[curr.m_degree] = None
# Determine which of the two trees has the smaller root, storing
# the two trees accordingly.
#minimum = (other.m_priority < curr.m_priority)? other : curr
if other.m_priority < curr.m_priority:
minimum = other
else:
minimum = curr
#maximum = (other.m_priority < curr.m_priority)? curr : other
if other.m_priority < curr.m_priority:
maximum = curr
else:
maximum = other
# Break max out of the root list, then merge it into min's child
# list.
maximum.m_next.m_prev = maximum.m_prev
maximum.m_prev.m_next = maximum.m_next
# Make it a singleton so that we can merge it.
maximum.m_next = maximum.m_prev = maximum
minimum.m_child = merge_lists(minimum.m_child, maximum)
# Reparent maximum appropriately.
maximum.m_parent = minimum
# Clear maximum's mark, since it can now lose another child.
maximum.m_is_marked = False
# Increase minimum's degree; it now has another child.
minimum.m_degree += 1
# Continue merging this tree.
curr = minimum
# Update the global min based on this node. Note that we compare
# for <= instead of < here. That's because if we just did a
# reparent operation that merged two different trees of equal
# priority, we need to make sure that the min pointer points to
# the root-level one.
if curr.m_priority <= self.m_min.m_priority:
self.m_min = curr
return min_elem
def decrease_key(self, entry, new_priority):
'''
Decreases the key of the specified element to the new priority. If the
new priority is greater than the old priority, this function raises an
ValueError. The new priority must be a finite double,
so you cannot set the priority to be NaN, or +/- infinity. Doing
so also raises an ValueError.
It is assumed that the entry belongs in this heap. For efficiency
reasons, this is not checked at runtime.
@param entry The element whose priority should be decreased.
@param newPriority The new priority to associate with this entry.
@throws ValueError If the new priority exceeds the old
priority, or if the argument is not a finite double.
'''
self._check_priority(new_priority)
if new_priority > entry.m_priority:
raise ValueError("New priority exceeds old.")
# Forward this to a helper function.
self.decrease_key_unchecked(entry, new_priority)
def delete(self, entry):
'''
Deletes this Entry from the Fibonacci heap that contains it.
It is assumed that the entry belongs in this heap. For efficiency
reasons, this is not checked at runtime.
@param entry The entry to delete.
'''
#Use decreaseKey to drop the entry's key to -infinity. This will
#guarantee that the node is cut and set to the global minimum.
self.decrease_key_unchecked(entry, float("-inf"))
# Call dequeue_min to remove it.
self.dequeue_min()
@staticmethod
def _check_priority(priority):
'''
Utility function which, given a user-specified priority, checks whether
it's a valid double and throws an ValueError otherwise.
@param priority The user's specified priority.
@throws ValueError if it is not valid.
'''
if math.isnan(priority) or math.isinf(priority):
raise ValueError("Priority {} is invalid.".format(priority))
def decrease_key_unchecked(self, entry, priority):
'''
Decreases the key of a node in the tree without doing any checking to ensure
that the new priority is valid.
@param entry The node whose key should be decreased.
@param priority The node's new priority.
'''
# First, change the node's priority.
entry.m_priority = priority
# If the node no longer has a higher priority than its parent, cut it.
# Note that this also means that if we try to run a delete operation
# that decreases the key to -infinity, it's guaranteed to cut the node
# from its parent.
if entry.m_parent is not None and entry.m_priority <= entry.m_parent.m_priority:
self.cut_node(entry)
# If our new value is the new min, mark it as such. Note that if we
# ended up decreasing the key in a way that ties the current minimum
# priority, this will change the min accordingly.
if entry.m_priority <= self.m_min.m_priority:
self.m_min = entry
def cut_node(self, entry):
'''
Cuts a node from its parent. If the parent was already marked, recursively
cuts that node from its parent as well.
@param entry The node to cut from its parent.
'''
# Begin by clearing the node's mark, since we just cut it.
entry.m_is_marked = False
# Base case: If the node has no parent, we're done.
if entry.m_parent is None:
return
# Rewire the node's siblings around it, if it has any siblings.
if entry.m_next is not entry:
# Has siblings
entry.m_next.m_prev = entry.m_prev
entry.m_prev.m_next = entry.m_next
# If the node is the one identified by its parent as its child,
# we need to rewrite that pointer to point to some arbitrary other
# child.
if entry.m_parent.m_child is entry:
if entry.m_next is not entry:
# If there are any other children, pick one of them arbitrarily.
entry.m_parent.m_child = entry.m_next
else:
# Otherwise, there aren't any children left and we should clear the
# pointer and drop the node's degree.
entry.m_parent.m_child = None
# Decrease the degree of the parent, since it just lost a child.
entry.m_parent.m_degree -= 1
# Splice this tree into the root list by converting it to a singleton
# and invoking the merge subroutine.
entry.m_prev = entry.m_next = entry
self.m_min = merge_lists(self.m_min, entry)
# Mark the parent and recursively cut it if it's already been
# marked.
if entry.m_parent.m_is_marked:
self.cut_node(entry.m_parent)
else:
entry.m_parent.m_is_marked = True
# Clear the relocated node's parent; it's now a root.
entry.m_parent = None
| AminFadaee/8_Puzzle_Solver | fibonacci_heap_mod.py | Python | mit | 24,049 | [
"VisIt"
] | 296ca4d9f47dafbe032537fb9ee4cf340c3cc1e2ccd7d0f234f4e400488a19b1 |
"""
Contains a class for containing multiple replica Universes in a single object.
This module defines a class that will contain Universes for each replica in a
replica exchange simulation.
While each replica can be defined individually, this adds a convenient
container for logically similar trajectories and allows easy iteration over
and access to the individual replica Universes.
"""
########################################################################
# #
# This module was written by Thomas Heavey in 2018. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2018 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
from __future__ import absolute_import, print_function
from collections.abc import Sequence
import errno
import glob
import numpy as np
import os
import six
from typing import Iterable
from .tools import find_nearest_idx
from .coordinate_analysis import Universe
from . import get_temperatures, exceptions
__all__ = ["REUniverse"]
class REUniverse(Sequence):
"""
A class for working with MDAnalysis Universes from replica exchange sims.
This class creates Universe objects for different simulations that all
have the same topology (atoms, bonds, etc.), but different temperatures
(not strictly required, though a 'temperature' needs to be specified for
each).
This class itself does not add much on top of
:class:`~paratemp.coordinate_analysis.Universe` other than iteration over
the replicas and creating them starting all from the same topology with
different trajectories.
An instance of this class can be indexed (with '[]'s) with either ints
(to get the Universe with that index) or with strings that can be
converted to floats to get the replica with the temperature nearest to
that value.
>>> reu = REUniverse('test.gro', 'simulation_folder', trajs=['cold.xtc', \
'warm.xtc'], temps=[100, 200])
<REUniverse with 2 replicas>
>>> reu[0].temperature
100.0
>>> reu['75'].temperature
100.0
>>> print([u.temperature for u in reu])
[100.0, 200.0]
>>> len(reu)
2
>>> reu.keys()
('100.0', '200.0')
"""
def __init__(
self,
topology,
base_folder,
trajs=None,
traj_glob="*.xtc",
temps="TOPO/temperatures.dat",
):
"""
Instatiate a replica exchange universe for a set of replica trajectories
:param str topology: Name of the topology file (such as a .gro or
.pdb file).
:param str base_folder: Name of the folder in which to look for the
trajectories, topology, temperature file, and others.
:param Iterable[str] trajs: List of the trajectory files. If this
is None, traj_glob will be used instead.
The files can be listed either relative to the current directory
(checked first) or relative to `base_folder`.
**Be aware**: the order of these does not matter because they will
be sorted alphanumerically and then by length. This should be
fine if all the trajectory names only differ by the value of
some index, but in other cases, this could cause issues with
unexpected ordering or incorrect matching of temperatures to
trajectories.
:param str traj_glob: If `trajs` is None, this string will be glob
expanded to find the trajectories.
:type temps: str or Iterable
:param temps: Temperatures or path to file with temperatures of the
replicas in the simulations.
If a string is provided, it is assumed to be a path relative to
the current directory or `base_folder`.
Otherwise, it is assumed to be an iterable of values that can be
cast to floats.
"""
self.base_folder = os.path.abspath(base_folder)
self._top = self._fn(topology)
self._trajs = self._get_trajs(trajs, traj_glob)
self._temps = self._get_temps(temps)
if len(self._temps) != len(self._trajs):
raise ValueError(
"len of temps ({}) not same".format(len(self._temps))
+ " as len of trajs ({})".format(len(self._trajs))
)
self._trajs.sort()
self._trajs.sort(key=len)
# TODO find more sure way to match temp to trajectory
self.universes = np.array(
[
Universe(self._top, t, temp=self._temps[i])
for i, t in enumerate(self._trajs)
]
)
def _fn(self, path):
"""
Return absolute path to file relative to either here or base_folder
:param str path: (Relative path and) file to look for in current
directory or relative to `base_folder`. Current directory is
checked first and the absolute path to that is returned if it is
found there. Otherwise, the file is searched for relative to
`base_folder`. If it's not found there either, FileNotFoundError
is raised
:return: Absolute path to path
:rtype: str
:raises: OSError if the file is not found relative to current
directory or `base_folder`.
"""
if os.path.isfile(path):
return os.path.abspath(path)
elif os.path.isfile(os.path.join(self.base_folder, path)):
return os.path.abspath(os.path.join(self.base_folder, path))
else:
raise OSError(
errno.ENOENT, "{} not found here or under base_folder".format(path)
)
def _get_temps(self, temps):
"""
Get the temperatures for the set of replicas
:type temps: str or Iterable
:param temps: Either the path to the file with the temperatures or a
list-like of the temperatures.
If a string is given, it will be processed as being absolute,
relative to current dir., or relative to base_folder. This uses
:func:`paratemp.tools.get_temperatures` to read the file.
If the input is not string-like, it will be converted to a
:func:`np.ndarray` of floats.
:rtype: np.ndarray
:return: The temperatures as floats
"""
if isinstance(temps, six.string_types):
return get_temperatures(self._fn(temps))
else:
return np.array([float(t) for t in temps])
def _get_trajs(self, trajs, traj_glob):
"""
Get paths to trajectory files
This method will first see if trajs is not None.
If it is None, it will try to glob expand in the current path then
from base_folder if the first glob returns an empty list.
If none of these works, FileNotFoundError will be raised.
For the first one that works, the list of files will be expanded to
absolute paths and returned as a list of strings of the paths.
:param Iterable trajs: If this is not None, this will be taken as a
list of paths to the trajectory files. The order here does
not matter because they will be sorted in
:meth:`~paratemp.re_universe.REUniverse.__init__` (as it is
currently implemented).
:param str traj_glob: A string which can be glob expanded to give
the trajectory files (and only the relevant trajectory files).
:rtype: list
:return: A list of the absolute paths to the trajectories.
:raises: OSError if trajs is None and glob expansion both here and
in base_folder give empty lists.
"""
if trajs is not None:
return [self._fn(t) for t in trajs]
elif isinstance(traj_glob, six.string_types):
g1 = glob.glob(traj_glob)
g2 = glob.glob(os.path.join(self.base_folder, traj_glob))
if g1:
return [self._fn(t) for t in g1]
elif g2:
return [self._fn(t) for t in g2]
else:
raise OSError(
errno.ENOENT,
"{} did not seem to lead".format(traj_glob)
+ " to any files here or under base_folder",
)
else:
raise exceptions.InputError(
(trajs, traj_glob), "use trajs or traj_glob " "to find trajectory files"
)
def __getitem__(self, i):
"""
Get one of the replica universes by index or temperature
If `i` is an int, then this will return the Universe with `i` as its
index.
If `i` is a string, it is assumed to be a float, and the replica
Universe with the temperature absolutely nearest to `i` will be
returned.
:type i: int or str
:param i: The index of the universe to be returned or a string of the
temperature closest to the temp or the Universe to be returned.
:rtype: Universe
:return: The Universe indicated by `i`.
"""
if isinstance(i, int):
if i > len(self) - 1:
raise IndexError(
"index {} is ".format(i)
+ "larger than the number of replicas present"
)
return self.universes[i]
if isinstance(i, six.string_types) or isinstance(i, float):
return self.universes[find_nearest_idx(self._temps, float(i))]
def __len__(self):
"""
Return the number of replicas in the object.
:rtype: int
:return: The number of replicas
"""
return len(self.universes)
def __repr__(self):
return "<REUniverse with {} replicas>".format(len(self))
def keys(self):
"""
Return the temperatures of the replicas (as can be used for indexing)
As currently implemented, these are rounded to the nearest tenths
place, but this may change in the future.
:rtype: tuple[str]
:return: The temperatures of the replicas
"""
# TODO possibly change precision based on spread of temperatures
# (tenths might not always be precise enough for large systems)
return ("{:.1f}".format(t) for t in self._temps)
def values(self):
"""
Return the universes.
:rtype: np.ndarray[Universe]
:return: The numpy array of the Universe objects
"""
return self.universes
def items(self):
"""
Return a list of key-value pairs
:rtype: list[tuple[str, Universe]]
:return: A list of key-value pair tuples
"""
return zip(self.keys(), self.values())
| theavey/ParaTemp | paratemp/re_universe.py | Python | apache-2.0 | 11,993 | [
"MDAnalysis"
] | b28dc7323ee5b03d31d9424aa8c9588ecee5b93cffc3c1569c635460afa547b9 |
"""Read and write treebanks."""
from __future__ import generator_stop
import os
import re
import sys
import gzip
from glob import glob
from itertools import count, chain, islice
from collections import defaultdict
import xml.etree.ElementTree as ElementTree
from collections import OrderedDict
from .tree import (Tree, ParentedTree, brackettree, escape, unescape,
writebrackettree, writediscbrackettree, SUPERFLUOUSSPACERE, HEAD)
from .treetransforms import removeemptynodes
from .punctuation import applypunct
from .heads import applyheadrules, readheadrules, readmodifierrules
from .util import openread
FIELDS = tuple(range(6))
WORD, LEMMA, TAG, MORPH, FUNC, PARENT = FIELDS
EXPORTHEADER = '%% word\tlemma\ttag\tmorph\tedge\tparent\tsecedge\n'
EXPORTNONTERMINAL = re.compile(r'^#([5-9][0-9][0-9])$')
POSRE = re.compile(r'\(([^() ]+)\s+[^ ()]+\s*\)')
# leaf itself can be empty; leaf ends with closing paren or whitespace
# Assumes there is no whitespace between open paren and non-terminal: "( NP "
LEAVESRE = re.compile(r' (?=\))| ([^ ()]+)\s*(?=[\s)])')
ALPINOXML = re.compile(
rb'<\?xml version="1.0" encoding="UTF-?8"\?>.*?</alpino_ds>\r?\n',
flags=re.DOTALL | re.IGNORECASE)
ALPINOSENTID = re.compile(b'<sentence sentid="(.*?)">')
class Item(object):
"""A treebank item."""
__slots__ = ('tree', 'sent', 'comment', 'block')
def __init__(self, tree, sent, comment, block):
self.tree = tree # A ParentedTree
self.sent = sent # list of str
self.comment = comment # a string or None
self.block = block # a string with tree in original treebank format
class CorpusReader(object):
"""Abstract corpus reader."""
def __init__(self, path, encoding='utf8', ensureroot=None, punct=None,
headrules=None, removeempty=False,
functions=None, morphology=None, lemmas=None,
modifierrules=None):
"""
:param path: filename or pattern of corpus files; e.g., ``wsj*.mrg``.
:param ensureroot: add root node with given label if necessary.
:param removeempty: remove empty nodes and any empty ancestors; a
terminal is empty if it is equal to None, '', or '-NONE-'.
:param headrules: if given, read rules for assigning heads and apply
them by ordering constituents according to their heads.
:param punct: one of ...
:None: leave punctuation as is [default].
:'move': move punctuation to appropriate constituents
using heuristics.
:'moveall': same as 'move', but moves all preterminals under root,
instead of only recognized punctuation.
:'prune': prune away leading & ending quotes & periods, then move.
:'remove': eliminate punctuation.
:'removeall': eliminate all preterminals directly under root.
:'root': attach punctuation directly to root
(as in original Negra/Tiger treebanks).
:param functions: one of ...
:None, 'leave': leave syntactic labels as is [default].
:'add': concatenate grammatical function to syntactic label,
separated by a hypen: e.g., ``NP => NP-SBJ``.
:'remove': strip away hyphen-separated grammatical function,
e.g., ``NP-SBJ => NP``.
:'replace': replace syntactic label with grammatical function,
e.g., ``NP => SBJ``.
:param morphology: one of ...
:None, 'no': use POS tags as preterminals [default].
:'add': concatenate morphological information to POS tags,
e.g., ``DET/sg.def``.
:'replace': use morphological information as preterminal label
:'between': add node with morphological information between
POS tag and word, e.g., ``(DET (sg.def the))``.
:param lemmas: one of ...
:None: ignore lemmas [default].
:'add': concatenate lemma to terminals, e.g., men/man.
:'replace': use lemmas as terminals.
:'between': insert lemma as node between POS tag and word."""
self.removeempty = removeempty
self.ensureroot = ensureroot
self.functions = functions
self.punct = punct
self.morphology = morphology
self.lemmas = lemmas
self.headrules = readheadrules(headrules) if headrules else {}
self.modifierrules = (readmodifierrules(modifierrules)
if modifierrules else None)
self._encoding = encoding
try:
self._filenames = (sorted(glob(path), key=numbase)
if path != '-' else ['-'])
except TypeError:
# print('all sentence IDs must have the same type signature '
# '(number, string)')
# raise
self._filenames = sorted(glob(path))
for opt, opts in (
(functions, (None, 'leave', 'add', 'replace', 'remove',
'between')),
(morphology, (None, 'no', 'add', 'replace', 'between')),
(punct, (None, 'no', 'move', 'moveall', 'remove', 'removeall',
'prune', 'root')),
(lemmas, (None, 'no', 'add', 'replace', 'between'))):
if opt not in opts:
raise ValueError('Expected one of %r. Got: %r' % (opts, opt))
if not self._filenames:
raise ValueError("no files matched pattern '%s' in %s" % (
path, os.getcwd()))
self._block_cache = None # optionally, cache of blocks (e.g., etrees)
self._trees_cache = None
def itertrees(self, start=None, end=None):
"""
:returns: an iterator returning tuples ``(key, item)``
of sentences in corpus, where ``item`` is an :py:class:Item
instance with ``tree``, ``sent``, and ``comment`` attributes.
Useful when the dictionary of all trees in corpus would not fit in
memory."""
for n, a in islice(self._read_blocks(), start, end):
yield n, self._parsetree(a)
def trees(self):
"""
:returns: an ordered dictionary of parse trees
(``Tree`` objects with integer indices as leaves)."""
if not self._trees_cache:
self._trees_cache = OrderedDict((n, self._parsetree(a))
for n, a in self._read_blocks())
return OrderedDict((n, a.tree) for n, a in self._trees_cache.items())
def sents(self):
"""
:returns: an ordered dictionary of sentences,
each sentence being a list of words."""
if not self._trees_cache:
self._trees_cache = OrderedDict((n, self._parsetree(a))
for n, a in self._read_blocks())
return OrderedDict((n, a.sent) for n, a in self._trees_cache.items())
def tagged_sents(self):
"""
:returns: an ordered dictionary of tagged sentences,
each tagged sentence being a list of (word, tag) pairs."""
if not self._trees_cache:
self._trees_cache = OrderedDict((n, self._parsetree(a))
for n, a in self._read_blocks())
return OrderedDict(
(n, [(w, t) for w, (_, t) in zip(a.sent, sorted(a.tree.pos()))])
for n, a in self._trees_cache.items())
def blocks(self):
"""
:returns: a list of strings containing the raw representation of
trees in the original treebank."""
def _read_blocks(self):
"""Iterate over blocks in corpus file corresponding to parse trees."""
def _parse(self, block):
""":returns: a parse tree given a block from the treebank file."""
raise NotImplementedError('this is an abstract base class.')
def _parsetree(self, block):
""":returns: a transformed parse tree and sentence."""
item = self._parse(block)
if not item.sent: # ???
return item
if self.removeempty:
removeemptynodes(item.tree, item.sent)
if self.ensureroot:
if item.tree.label == '':
item.tree.label = self.ensureroot
elif item.tree.label != self.ensureroot:
item.tree = ParentedTree(self.ensureroot, [item.tree])
if not isinstance(self, BracketCorpusReader):
# roughly order constituents by order in sentence
for a in reversed(list(item.tree.subtrees(lambda x: len(x) > 1))):
a.children.sort(key=Tree.leaves)
if self.punct:
applypunct(self.punct, item.tree, item.sent)
if self.headrules:
applyheadrules(item.tree, self.headrules, self.modifierrules)
return item
def _word(self, block):
""":returns: a list of words given a block from the treebank file."""
if self.punct in {'remove', 'prune'}:
return self._parsetree(block).sent
return self._parse(block).sent
class BracketCorpusReader(CorpusReader):
"""Corpus reader for phrase-structures in bracket notation.
For example::
(S (NP John) (VP (VB is) (JJ rich)) (. .))"""
def blocks(self):
"""
:returns: a list of strings containing the raw representation of
trees in the original treebank."""
return OrderedDict(self._read_blocks())
def _read_blocks(self):
for filename in self._filenames:
with openread(filename, encoding=self._encoding) as inp:
yield from enumerate((line for line in inp if line), 1)
def _parse(self, block):
c = count()
block = SUPERFLUOUSSPACERE.sub(')', block)
try:
tree = ParentedTree(LEAVESRE.sub(lambda _: ' %d' % next(c), block))
except ValueError:
print(block)
raise
for node in tree.subtrees():
if node.source is None:
node.source = ['--'] * len(FIELDS)
for char in '-=': # map NP-SUBJ and NP=2 to NP; don't touch -NONE-
x = node.label.find(char)
if x > 0:
if char == '-' and not node.label[x + 1:].isdigit():
node.source[FUNC] = node.label[x + 1:].rstrip(
'=0123456789')
if self.functions == 'remove':
node.label = node.label[:x]
sent = [escape(token) for token in LEAVESRE.findall(block)]
return Item(tree, sent, None, block)
class DiscBracketCorpusReader(BracketCorpusReader):
"""A corpus reader for discontinuous trees in bracket notation.
Leaves are consist of an index and a word, with the indices indicating
the word order of the sentence. For example::
(S (NP 1=John) (VP (VB 0=is) (JJ 2=rich)) (? 3=?))
There is one tree per line. Optionally, the tree may be followed by a
comment, separated by a TAB. Compared to Negra's export format, this format
lacks morphology, lemmas and functional edges. On the other hand, it is
close to the internal representation employed here, so it can be read
efficiently."""
def _parse(self, block):
treestr, comment = block, None
if '\t' in block:
treestr, comment = block.rstrip('\n\r').split('\t', 1)
sent = {}
def substleaf(x):
"""Collect token and return index."""
idx, token = x.split('=', 1)
idx = int(idx)
sent[idx] = unescape(token)
return int(idx)
tree = ParentedTree.parse(treestr, parse_leaf=substleaf)
sent = [sent.get(n, None) for n in range(max(sent) + 1)]
if not all(0 <= n < len(sent) for n in tree.leaves()):
raise ValueError('All leaves must be in the interval 0..n with '
'n=len(sent)\ntokens: %d indices: %r\nsent: %s' % (
len(sent), tree.leaves(), sent))
for node in tree.subtrees():
if node.source is None:
node.source = ['--'] * len(FIELDS)
for char in '-=': # map NP-SUBJ and NP=2 to NP; don't touch -NONE-
x = node.label.find(char)
if x > 0:
if char == '-' and not node.label[x + 1:].isdigit():
node.source[FUNC] = node.label[x + 1:].rstrip(
'=0123456789')
if self.functions == 'remove':
node.label = node.label[:x]
return Item(tree, sent, comment, block)
class NegraCorpusReader(CorpusReader):
"""Read a corpus in the Negra export format."""
def blocks(self):
"""
:returns: a list of strings containing the raw representation of
trees in the original treebank."""
if self._block_cache is None:
self._block_cache = OrderedDict(self._read_blocks())
return OrderedDict((a, '\n'.join(b) + '\n')
for a, b in self._block_cache.items())
def _read_blocks(self):
"""Read corpus and yield blocks corresponding to each sentence."""
# NB: A Negra "block" is a list of lines without line endings.
results = set()
started = False
lines = []
for filename in self._filenames:
with openread(filename, encoding=self._encoding) as inp:
for line in inp:
line = line.rstrip()
if line.startswith('#BOS '):
if started:
raise ValueError('beginning of sentence marker '
'while previous one still open: %s' % line)
started = True
sentid = line.split(None, 2)[1]
lines = [line]
elif line.startswith('#EOS '):
if not started:
raise ValueError('end of sentence marker while '
'none started')
thissentid = line.split(None, 2)[1]
if sentid != thissentid:
raise ValueError('unexpected sentence id: '
'start=%s, end=%s' % (sentid, thissentid))
started = False
if sentid in results:
raise ValueError(
'duplicate sentence ID: %s' % sentid)
results.add(sentid)
lines.append(line)
yield sentid, lines
elif started:
lines.append(line)
# other lines are ignored: #FORMAT x, %% comments, ...
def _parse(self, block):
return exporttree(block, self.functions, self.morphology, self.lemmas)
class TigerXMLCorpusReader(CorpusReader):
"""Corpus reader for the Tiger XML format."""
def blocks(self):
"""
:returns: a list of strings containing the raw representation of
trees in the treebank."""
if self._block_cache is None:
self._block_cache = OrderedDict(self._read_blocks())
return OrderedDict((sentid, rawblock)
for sentid, (rawblock, _xmlblock) in self._block_cache.items())
def _read_blocks(self):
if self._encoding not in (None, 'utf8', 'utf-8'):
raise ValueError('Encoding specified in XML files, '
'cannot be overriden.')
for filename in self._filenames:
with openread(filename, encoding=None) as inp:
# iterator over elements in XML file
context = ElementTree.iterparse(
inp, events=('start', 'end'))
_, root = next(context) # event == 'start' of root element
for event, elem in context:
if event == 'end' and elem.tag == 's':
sentid = elem.get('id')
rawblock = ElementTree.tostring(elem)
yield sentid, (rawblock, elem)
root.clear()
def _parse(self, block):
"""Translate Tiger XML structure to the fields of export format."""
rawblock, xmlblock = block
nodes = OrderedDict()
root = xmlblock.find('graph').get('root')
for term in xmlblock.find('graph').find('terminals'):
fields = nodes.setdefault(term.get('id'), 6 * [None])
fields[WORD] = term.get('word')
fields[LEMMA] = term.get('lemma')
fields[TAG] = term.get('pos')
fields[MORPH] = term.get('morph')
fields[PARENT] = '0' if term.get('id') == root else None
fields[FUNC] = '--'
nodes[term.get('id')] = fields
for nt in xmlblock.find('graph').find('nonterminals'):
if nt.get('id') == root:
ntid = '0'
else:
fields = nodes.setdefault(nt.get('id'), 6 * [None])
ntid = nt.get('id').split('_')[-1]
fields[WORD] = '#' + ntid
fields[TAG] = nt.get('cat')
fields[LEMMA] = fields[MORPH] = fields[FUNC] = '--'
for edge in nt:
idref = edge.get('idref')
nodes.setdefault(idref, 6 * [None])
if edge.tag == 'edge':
if nodes[idref][FUNC] not in (None, '--'):
raise ValueError('%s already has a parent: %r'
% (idref, nodes[idref]))
nodes[idref][FUNC] = edge.get('label')
nodes[idref][PARENT] = ntid
elif edge.tag == 'secedge':
nodes[idref].extend((edge.get('label'), ntid))
else:
raise ValueError("expected 'edge' or 'secedge' tag.")
for idref in nodes:
if nodes[idref][PARENT] is None:
raise ValueError('%s does not have a parent: %r' % (
idref, nodes[idref]))
item = exporttree(
['#BOS ' + xmlblock.get('id')]
+ ['\t'.join(a) for a in nodes.values()]
+ ['#EOS ' + xmlblock.get('id')],
self.functions, self.morphology, self.lemmas)
item.tree.label = root.split('_', 1)[1]
item.block = rawblock
return item
class AlpinoCorpusReader(CorpusReader):
"""Corpus reader for the Dutch Alpino treebank in XML format.
Expects a corpus in directory format, where every sentence is in a single
``.xml`` file."""
def blocks(self):
"""
:returns: a list of strings containing the raw representation of
trees in the treebank."""
if self._block_cache is None:
self._block_cache = OrderedDict(self._read_blocks())
return OrderedDict((n, rawblock)
for n, (rawblock, _xmlblock) in self._block_cache.items())
def _read_blocks(self):
"""Read corpus and yield blocks corresponding to each sentence."""
if self._encoding not in (None, 'utf8', 'utf-8'):
raise ValueError('Encoding specified in XML files, '
'cannot be overriden.')
for filename in self._filenames:
with open(filename, 'rb') as inp:
rawblock = inp.read() # NB: store XML data as bytes
# ../path/dir/file.xml => dir/file
path, filename = os.path.split(filename)
_, lastdir = os.path.split(path)
n = os.path.join(lastdir, filename)[:-len('.xml')]
try:
xmlblock = ElementTree.fromstring(rawblock)
except ElementTree.ParseError:
print('Problem with %r:\n%s' % (
filename,
rawblock.decode('utf8', errors='replace')),
file=sys.stderr)
yield n, (rawblock, xmlblock)
def _parse(self, block):
""":returns: a parse tree given a string."""
return alpinotree(
block, self.functions, self.morphology, self.lemmas)
class AlpinoCompactCorpusReader(AlpinoCorpusReader):
"""Corpus reader for the Alpino compact treebank format (Indexed Corpus).
Pass one or more .index or .data.dz files as filenames."""
def _read_blocks(self):
"""Read corpus and yield blocks corresponding to each sentence."""
if self._encoding not in (None, 'utf8', 'utf-8'):
raise ValueError('Encoding specified in XML files, '
'cannot be overriden.')
# NB: could implement proper streaming, random access using .index file
# which lists offsets and sizes in base64 encoding.
for filename in self._filenames:
dzfile = re.sub(r'\.index$', '.data.dz', filename)
dirname = os.path.basename(re.sub(r'\.index$', '', filename))
with gzip.open(dzfile, 'rb') as inp:
for block in ALPINOXML.findall(inp.read()):
sentid = ALPINOSENTID.search(block).group(1).decode('utf8')
n = '%s/%s' % (dirname, sentid)
try:
xmlblock = ElementTree.fromstring(block)
except ElementTree.ParseError:
print('Problem with %r:\n%s' % (
n, block.decode('utf8', errors='replace')),
file=sys.stderr)
yield n, (block, xmlblock)
class FTBXMLCorpusReader(CorpusReader):
"""Corpus reader for the French treebank (FTB) in XML format."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# hack to ensure test/dev set as the first 1235 + 1235 sentences
order = {'flmf7aa1ep.cat.xml': 0,
'flmf7aa2ep.cat.xml': 1,
'flmf7ab2ep.xml': 2,
'flmf7ae1ep.cat.xml': 3,
'flmf7af2ep.cat.xml': 4,
'flmf7ag1exp.cat.xml': 5}
self._filenames.sort(key=lambda x: order.get(os.path.basename(x), 99))
def blocks(self):
"""
:returns: a list of strings containing the raw representation of
trees in the treebank."""
if self._block_cache is None:
self._block_cache = OrderedDict(self._read_blocks())
return OrderedDict((sentid, rawblock)
for sentid, (rawblock, _xmlblock) in self._block_cache.items())
def _read_blocks(self):
if self._encoding not in (None, 'utf8', 'utf-8'):
raise ValueError('Encoding specified in XML files, '
'cannot be overriden.')
for filename in self._filenames:
with openread(filename, encoding=None) as inp:
# iterator over elements in XML file
context = ElementTree.iterparse(inp,
events=('start', 'end'))
_event, root = next(context) # event: 'start' of root element
filename1 = os.path.splitext(os.path.basename(filename))[0]
for event, elem in context:
if event == 'end' and elem.tag == 'SENT':
sentid = '%s-%s' % (filename1, elem.get('nb'))
rawblock = ElementTree.tostring(elem)
yield sentid, (rawblock, elem)
root.clear()
def _parse(self, block):
""":returns: a parse tree given a string."""
return ftbtree(
block, self.functions, self.morphology, self.lemmas)
def exporttree(block, functions=None, morphology=None, lemmas=None):
"""Get tree, sentence from tree in export format given as list of lines.
:param block: a list of lines without line endings.
:returns: Item object, with tree, sent, command, block fields."""
def getchildren(parent):
"""Traverse tree in export format and create Tree object."""
results = []
for n, source in children.get(parent, []):
# n is the index in the block to record word indices
m = EXPORTNONTERMINAL.match(source[WORD])
label = source[TAG].replace('(', '[').replace(')', ']')
if m:
child = ParentedTree(label, getchildren(m.group(1)))
else: # POS + terminal
child = ParentedTree(label, [n])
handlemorphology(morphology, lemmas, child, source, sent)
child.source = tuple(source)
results.append(child)
return results
comment = block[0].split('%%')[1].strip() if '%%' in block[0] else None
table = [exportsplit(x) for x in block[1:-1]]
sent = []
children = {'0': []}
for source in table:
m = EXPORTNONTERMINAL.match(source[WORD])
if m:
children[m.group(1)] = []
else:
sent.append(source[WORD])
for n, source in enumerate(table):
children[source[PARENT]].append((n, source))
tree = ParentedTree('ROOT', getchildren('0'))
handlefunctions(functions, tree, morphology=morphology)
return Item(tree, sent, comment, '\n'.join(block) + '\n')
def exportsplit(line):
"""Take a line in export format and split into fields.
Strip comments. Add dummy field for lemma if absent.
:returns: a list with >= 6 elements; if > 6, length is even since
secondary edges are defined by pairs of (label, parentid) fields.
"""
commentidx = line.find('%%') # remove comments.
if commentidx != -1:
line = line[:commentidx]
fields = line.split()
fieldlen = len(fields)
if fieldlen < 5:
raise ValueError('expected at least 5 columns: %r' % fields)
elif fieldlen & 1: # odd number of fields?
fields.insert(LEMMA, '--') # add empty lemma field
return fields
def alpinotree(block, functions=None, morphology=None, lemmas=None):
"""Get tree, sent from tree in Alpino format given as string, etree object.
"""
def getsubtree(node, parentid, morphology, lemmas):
"""Parse a subtree of an Alpino tree."""
source = [''] * len(FIELDS)
nodeid = int(node.get('id')) + 500
source[WORD] = node.get('word') or ("#%s" % nodeid)
source[LEMMA] = node.get('lemma') or node.get('root')
source[MORPH] = node.get('postag') or node.get('frame')
source[FUNC] = node.get('rel')
if 'cat' in node.keys():
source[TAG] = node.get('cat')
if node.get('index'):
coindexed[int(node.get('index')) + 500] = source
label = node.get('cat')
result = ParentedTree(label.upper(), [])
for child in node:
subtree = getsubtree(child, nodeid, morphology, lemmas)
if subtree and (
'word' in child.keys() or 'cat' in child.keys()):
subtree.source[PARENT] = nodeid
result.append(subtree)
if not result:
return None
elif 'word' in node.keys():
source[TAG] = node.get('pt') or node.get('pos')
if node.get('index'):
coindexed[int(node.get('index')) + 500] = source
result = ParentedTree(source[TAG], list(
range(int(node.get('begin')), int(node.get('end')))))
handlemorphology(morphology, lemmas, result, source, sent)
elif 'index' in node.keys():
coindexation[int(node.get('index')) + 500].extend(
[node.get('rel'), parentid])
return None
source[:] = [a.replace(' ', '_') if a else a for a in source]
result.source = source
return result
coindexed = {}
coindexation = defaultdict(list)
rawblock, xmlblock = block
sent = xmlblock.find('sentence').text.split(' ')
tree = getsubtree(xmlblock.find('node'), 0, morphology, lemmas)
for i in coindexation:
coindexed[i].extend(coindexation[i])
comment = xmlblock.find('comments/comment') # NB: only use first comment
if comment is not None:
comment = comment.text
handlefunctions(functions, tree, morphology=morphology)
return Item(tree, sent, comment, rawblock)
def ftbtree(block, functions=None, morphology=None, lemmas=None):
"""Get tree, sent from tree in FTB format given as etree XML object."""
def getsubtree(node):
"""Parse a subtree of an FTB tree."""
source = [''] * len(FIELDS)
nodeid = next(nodeids)
source[WORD] = node.text or ("#%s" % nodeid)
source[LEMMA] = node.get('lemma') or ''
source[MORPH] = node.get('ee') or ''
source[FUNC] = node.get('fct') or ''
if node.tag == 'w':
# Could rely on compound="yes" attribute here, but there are a few
# cases (annotation errors) where it is inconsistent with the
# actual structure.
if len(node) == 0: # regular word token
source[TAG] = node.get('cat') or node.get('catint')
result = ParentedTree(source[TAG], [len(sent)])
sent.append(re.sub(r'\s+', '', (node.text or '')))
handlemorphology(morphology, lemmas, result, source, sent)
else: # compound node, has <w> child nodes with actual terminals.
source[TAG] = label = 'MW' + node.get('cat')
result = ParentedTree(label, [])
for child in node:
subtree = getsubtree(child)
subtree.source[PARENT] = nodeid
result.append(subtree)
else: # non-terminal node
source[TAG] = label = node.tag
result = ParentedTree(label, [])
for child in node:
subtree = getsubtree(child)
subtree.source[PARENT] = nodeid
result.append(subtree)
if not result:
return None
source[:] = [a.replace(' ', '_') if a else a for a in source]
result.source = source
return result
rawblock, xmlblock = block
sent = []
nodeids = count(500)
tree = getsubtree(xmlblock)
comment = ' '.join('%s=%r' % (a, xmlblock.get(a))
for a in ('nb', 'textid', 'argument', 'author', 'date'))
handlefunctions(functions, tree, morphology=morphology)
return Item(tree, sent, comment, rawblock)
def writetree(tree, sent, key, fmt, comment=None, morphology=None,
sentid=False):
"""Convert a tree to a string representation in the given treebank format.
:param tree: should have indices as terminals
:param sent: contains the words corresponding to the indices in ``tree``
:param key: an identifier for this tree; part of the output with some
formats or when ``sentid`` is True.
:param fmt: Formats are ``bracket``, ``discbracket``, Negra's ``export``
format, and ``alpino`` XML format, as well unlabeled dependency
conversion into ``mst`` or ``conll`` format (requires head rules).
The formats ``tokens`` and ``wordpos`` are to strip away tree structure
and leave only lines with space-separated tokens or ``token/POS``.
When using ``bracket``, make sure tree is canonicalized.
:param comment: optionally, a string that will go in the format's comment
field (supported by ``export`` and ``alpino``), or at the end of the
line preceded by a tab (``discbracket``); ignored by other formats.
Should be a single line.
:param sentid: for line-based formats, prefix output by ``key|``.
Lemmas, functions, and morphology information will be empty unless nodes
contain a 'source' attribute with such information."""
if fmt == 'bracket':
result = writebrackettree(tree, sent)
# if comment:
# result = '# %s\n%s\n' % (comment, result.rstrip('\n'))
elif fmt == 'discbracket':
result = writediscbrackettree(tree, sent)
if comment:
result = '%s\t%s\n' % (result.rstrip('\n'), comment)
elif fmt == 'tokens':
result = '%s\n' % ' '.join(sent)
elif fmt == 'wordpos':
result = '%s\n' % ' '.join('%s/%s' % (word, pos) for word, (_, pos)
in zip(sent, sorted(tree.pos())))
elif fmt == 'export':
result = writeexporttree(tree, sent, key, comment, morphology)
elif fmt == 'alpino':
result = writealpinotree(tree, sent, key, comment)
elif fmt in ('conll', 'mst'):
result = writedependencies(tree, sent, fmt)
else:
raise ValueError('unrecognized format: %r' % fmt)
if sentid and fmt in ('tokens', 'wordpos', 'bracket', 'discbracket'):
return '%s|%s' % (key, result)
return result
def writeexporttree(tree, sent, key, comment, morphology):
"""Return string with given tree in Negra's export format."""
def collectsecedges(node):
if node.source:
for rel, pid in zip(node.source[6::2], node.source[7::2]):
try:
idx = nodeidindex.index(int(pid))
except ValueError:
print('skipping secondary edge; %s' % key, file=sys.stderr)
continue
yield rel
yield str(500 + idx)
result = []
if key is not None:
cmt = (' %% ' + comment) if comment else ''
result.append('#BOS %s%s' % (key, cmt))
# visit nodes in post-order traversal
preterms, phrasalnodes = {}, []
agenda = list(tree)
while agenda:
node = agenda.pop()
if not node or isinstance(node[0], Tree):
# NB: to get a proper post-order traversal, children need to be
# reversed, but for the assignment of IDs this does not matter.
agenda.extend(node)
phrasalnodes.append(node)
else:
preterms[node[0]] = node
phrasalnodes.reverse()
if len(sent) != len(preterms):
raise ValueError('sentence and terminals length mismatch: '
'sentno: %s\ntree: %s\nsent (len=%d): %r\nleaves (len=%d): %r'
% (key, tree, len(sent), sent, len(preterms), preterms))
idindex = [id(node) for node in phrasalnodes]
nodeidindex = [int(node.source[WORD][1:])
if node.source and node.source[WORD].startswith('#') else 0
for node in phrasalnodes]
for n, word in enumerate(sent):
if not word:
# raise ValueError('empty word in sentence: %r' % sent)
word = '...'
node = preterms[n]
lemma = '--'
postag = node.label.replace('$[', '$(') or '--'
func = morphtag = '--'
if node.source:
lemma = node.source[LEMMA] or '--'
morphtag = node.source[MORPH] or '--'
func = node.source[FUNC] or '--'
secedges = list(collectsecedges(node))
if morphtag == '--' and morphology == 'replace':
morphtag = postag
elif morphtag == '--' and morphology == 'add' and '/' in postag:
postag, morphtag = postag.split('/', 1)
parentid = '%d' % (0 if node.parent is tree
else 500 + idindex.index(id(node.parent)))
result.append("\t".join((word, lemma, postag, morphtag, func,
parentid) + tuple(secedges)))
for n, node in enumerate(phrasalnodes):
nodeid = '#%d' % (500 + n)
lemma = '--'
label = node.label or '--'
func = morphtag = '--'
if node.source:
morphtag = node.source[MORPH] or '--'
func = node.source[FUNC] or '--'
secedges = collectsecedges(node)
parentid = '%d' % (0 if node.parent is tree
else 500 + idindex.index(id(node.parent)))
result.append('\t'.join((nodeid, lemma, label, morphtag, func,
parentid) + tuple(secedges)))
if key is not None:
result.append("#EOS %s" % key)
return "%s\n" % "\n".join(result)
def writealpinotree(tree, sent, key, commentstr):
"""Return XML string with tree in AlpinoXML format."""
def addchildren(tree, sent, parent, cnt, depth=1, last=False):
"""Recursively add children of ``tree`` to XML object ``node``"""
node = ElementTree.SubElement(parent, 'node')
node.set('id', str(next(cnt)))
node.set('begin', str(min(tree.leaves())))
node.set('end', str(max(tree.leaves()) + 1))
if tree.source:
node.set('rel', tree.source[FUNC] or '--')
if isinstance(tree[0], Tree):
node.set('cat', tree.label.lower())
node.text = '\n ' + ' ' * depth
else:
assert isinstance(tree[0], int)
node.set('pos', tree.label.lower())
node.set('word', sent[tree[0]])
if tree.source:
node.set('lemma', tree.source[LEMMA] or '--')
node.set('postag', tree.source[MORPH] or '--')
# FIXME: split features in multiple attributes
else:
node.set('lemma', '--')
node.set('postag', '--')
node.tail = '\n' + ' ' * (depth - last)
for x, child in enumerate(tree, 1):
if isinstance(child, Tree):
addchildren(child, sent, node, cnt, depth + 1, x == len(tree))
result = ElementTree.Element('alpino_ds')
result.set('version', '1.3')
# FIXME: add coindexed nodes
addchildren(tree, sent, result, count())
sentence = ElementTree.SubElement(result, 'sentence')
sentence.text = ' '.join(sent)
comment = ElementTree.SubElement(result, 'comment')
comment.text = ('%s|%s' % (key, commentstr)) if commentstr else str(key)
result.text = sentence.tail = '\n '
result.tail = comment.tail = '\n'
return ElementTree.tostring(result).decode('utf8') # hack
def writedependencies(tree, sent, fmt):
"""Convert tree to dependencies in `mst` or `conll` format."""
deps = dependencies(tree)
if fmt == 'mst': # MST parser can read this format
# https://github.com/travisbrown/mstparser#3a-input-data-format
return '\n'.join((
'\t'.join(sent),
'\t'.join(tag for _, tag in sorted(tree.pos())),
'\t'.join(str(rel) for _n, rel, _head in deps),
'\t'.join(str(head) for _n, _rel, head in deps),
)) + '\n\n'
elif fmt == 'conll':
# Cf. https://depparse.uvt.nl/DataFormat.html
return '\n'.join('%d\t%s\t_\t%s\t%s\t_\t%d\t%s\t_\t_' % (
n, word, tag, tag, head, rel)
for word, (_, tag), (n, rel, head)
in zip(sent, sorted(tree.pos()), deps)) + '\n\n'
def dependencies(root):
"""Lin (1995): A Dependency-based Method for Evaluating [...] Parsers.
http://ijcai.org/Proceedings/95-2/Papers/052.pdf
:returns: list of tuples of the form ``(headidx, label, depidx)``."""
deps = []
for child in root:
deps.append((_makedep(child, deps), 'root', 0))
# if root:
# deps.append((_makedep(root, deps), 'ROOT', 0))
return sorted(deps)
def _makedep(node, deps):
"""Traverse a head-marked tree and extract dependencies."""
if isinstance(node[0], int):
return node[0] + 1
headchild = next(iter(a for a in node if a.type == HEAD))
lexhead = _makedep(headchild, deps)
for child in node:
if child is headchild:
continue
lexheadofchild = _makedep(child, deps)
func = '-'
if (child.source
and child.source[FUNC] and child.source[FUNC] != '--'):
func = child.source[FUNC]
deps.append((lexheadofchild, func, lexhead))
return lexhead
def deplen(deps):
"""Compute dependency length from result of ``dependencies()``.
:returns: tuple ``(totaldeplen, numdeps)``."""
total = sum(abs(a - b) for a, label, b in deps
if label != 'ROOT')
return (total, float(len(deps) - 1)) # discount ROOT
def handlefunctions(action, tree, pos=True, root=False, morphology=None):
"""Add function tags to phrasal labels e.g., 'VP' => 'VP-HD'.
:param action: one of {None, 'add', 'replace', 'remove'}
:param pos: whether to add function tags to POS tags.
:param root: whether to add function tags to the root node.
:param morphology: if morphology='between', skip those nodes."""
if action in (None, 'leave'):
return
for node in tree.subtrees():
if action == 'remove':
for char in '-=': # map NP-SUBJ and NP=2 to NP; don't touch -NONE-
x = node.label.find(char)
if x > 0:
node.label = node.label[:x]
elif morphology == 'between' and not isinstance(node[0], Tree):
continue
elif (not root or action == 'between') and node is tree: # skip root
continue
elif pos or isinstance(node[0], Tree):
# test for non-empty function tag ('--' is considered empty)
func = None
if node.source and node.source[FUNC] and node.source[FUNC] != '--':
func = node.source[FUNC]
if func and action == 'add':
node.label += '-%s' % func
elif action == 'replace':
node.label = func or '--'
elif action == 'between':
parent, idx = node.parent, node.parent_index
newnode = ParentedTree('-' + (func or '--'), [parent.pop(idx)])
parent.insert(idx, newnode)
def handlemorphology(action, lemmaaction, preterminal, source, sent=None):
"""Augment/replace preterminal label with morphological information."""
if not source:
return
# escape any parentheses to avoid hassles w/bracket notation of trees
# tag = source[TAG].replace('(', '[').replace(')', ']')
morph = source[MORPH].replace('(', '[').replace(')', ']').replace(' ', '_')
lemma = (source[LEMMA].replace('(', '[').replace(')', ']').replace(
' ', '_') or '--')
if lemmaaction == 'add':
if sent is None:
raise ValueError('adding lemmas requires passing sent argument.')
sent[preterminal[0]] += '/' + lemma
elif lemmaaction == 'replace':
if sent is None:
raise ValueError('adding lemmas requires passing sent argument.')
sent[preterminal[0]] = lemma
elif lemmaaction == 'between':
preterminal[:] = [preterminal.__class__(lemma, preterminal)]
elif lemmaaction not in (None, 'no'):
raise ValueError('unrecognized action: %r' % lemmaaction)
if action in (None, 'no'):
pass # preterminal.label = tag
elif action == 'add':
preterminal.label = '%s/%s' % (preterminal.label, morph)
elif action == 'replace':
preterminal.label = morph
elif action == 'between':
preterminal[:] = [preterminal.__class__(morph, [preterminal.pop()])]
# preterminal.label = tag
elif action not in (None, 'no'):
raise ValueError('unrecognized action: %r' % action)
return preterminal
CONSUMED = True
NEWLB = re.compile(r'(?:.*[\n\r])?\s*')
def incrementaltreereader(treeinput, morphology=None, functions=None,
strict=False, robust=True, othertext=False):
"""Incremental corpus reader.
Supports brackets, discbrackets, export and alpino-xml format.
The format is autodetected.
:param treeinput: an iterable of lines (line endings optional).
:param strict: if True, raise ValueError on malformed data.
:param robust: if True, only return trees with more than 2 brackets;
e.g., (DT the) is not recognized as a tree.
:param othertext: if True, yield non-tree data as ``(None, None, line)``.
By default, text in lines without trees is ignored.
:yields: tuples ``(tree, sent, comment)`` with a Tree object, a separate
lists of terminals, and a string with any other data following the
tree."""
treeinput = chain(iter(treeinput), ('(', None, None)) # hack
line = next(treeinput)
# try the following readers on each line in this order
readers = [segmentexport(morphology, functions, strict),
segmentalpino(morphology, functions),
segmentbrackets(strict, robust)]
for reader in readers:
reader.send(None)
while True:
# status 0: line not consumed, not part of tree;
# status 1: line consumed, waiting for end of tree.
res, status = None, CONSUMED
for reader in readers:
while res is None:
try:
res, status = reader.send(line)
except StopIteration:
return
if status != CONSUMED:
break # there was no tree, or a complete tree was read
try:
line = next(treeinput)
except StopIteration:
line = None
break
if res is not None:
for tree, sent, rest in res:
x = -1 if rest is None else rest.find('\n')
if othertext and x != -1:
yield tree, sent, rest[:x]
yield None, None, rest[x:]
else:
yield tree, sent, rest
break
if res is None: # none of the readers accepted this line
if othertext and line is not None:
yield None, None, line.rstrip()
try:
line = next(treeinput)
except StopIteration:
return
def segmentbrackets(strict=False, robust=True):
"""Co-routine that accepts one line at a time.
Yields tuples ``(result, status)`` where ...
- result is None or one or more S-expressions as a list of
tuples (tree, sent, rest), where rest is the string outside of brackets
between this S-expression and the next.
- status is 1 if the line was consumed, else 0.
:param strict: if True, raise ValueError for improperly nested brackets.
:param robust: if True, only return trees with at least 2 brackets;
e.g., (DT the) is not recognized as a tree.
"""
def tryparse(result, rest):
"""Add a tree to the results list."""
try:
tree, sent = brackettree(result, detectdisc=True)
except Exception as err:
raise ValueError('%r\nwhile parsing:\n%r' % (
err, dict(result=result, rest=rest, parens=parens,
depth=depth, prev=prev)))
else:
results.append((tree, sent, rest.rstrip()))
lb, rb = '()'
parens = 0 # number of open parens
depth = 0 # max. number of open parens
prev = '' # incomplete tree currently being read
result = '' # string of complete tree
results = [] # trees found in current line
rest = '' # any non-tree data after a tree
line = (yield None, CONSUMED)
while True:
start = 0 # index where current tree starts
a, b = line.find(lb, len(prev)), line.find(rb, len(prev))
# ignore first left bracket when not preceded by whitespace
if parens == 0 and a > 0 and NEWLB.match(prev) is None:
a = -1
prev = line
while a != -1 or b != -1:
if a != -1 and (a < b or b == -1): # left bracket
# look ahead to see whether this will be a tree with depth > 1
if parens == 0 and (b == -1
or (not robust or 0 <= line.find(lb, a + 1) < b)):
rest, prev = line[start:a], line[a:]
if result:
tryparse(result, rest)
result, start = '', a
parens += 1
depth = max(depth, parens)
a = line.find(lb, a + 1)
elif b != -1 and (b < a or a == -1): # right bracket
parens -= 1
if parens == 0 and (not robust or depth > 1):
result, prev = line[start:b + 1], line[b + 1:]
start = b + 1
depth = 0
elif parens < 0:
if strict:
raise ValueError('unbalanced parentheses')
parens = 0
b = line.find(rb, b + 1)
status = CONSUMED if results or result or parens else not CONSUMED
line = (yield results or None, status)
if results:
results = []
if line is None:
if result:
tryparse(result, rest)
status = CONSUMED if results or result or parens else not CONSUMED
yield results or None, status
line = ''
if results:
results = []
if parens or result:
line = prev + line
else:
prev = ''
def segmentalpino(morphology, functions):
"""Co-routine that accepts one line at a time.
Yields tuples ``(result, status)`` where ...
- result is ``None`` or a segment delimited by
``<alpino_ds>`` and ``</alpino_ds>`` as a list of lines;
- status is 1 if the line was consumed, else 0."""
cur = []
inblock = 0
line = (yield None, CONSUMED)
while line is not None:
if line.startswith('<alpino_ds'):
cur = ['<?xml version="1.0" encoding="UTF-8"?>', line]
inblock = 1
line = (yield None, CONSUMED)
elif line.startswith('</alpino_ds>'):
cur.append(line)
rawblock = '\n'.join(cur).encode('utf8')
xmlblock = ElementTree.fromstring(rawblock)
block = rawblock, xmlblock
item = alpinotree(block, functions, morphology)
line = (yield ((item.tree, item.sent, item.comment), ), CONSUMED)
inblock = 0
cur = []
elif line.strip():
if inblock == 1:
cur.append(line)
line = line.lstrip()
line = (yield None, (CONSUMED if inblock
or line.startswith('<?xml')
else not CONSUMED))
else:
line = (yield None, not CONSUMED)
def segmentexport(morphology, functions, strict=False):
"""Co-routine that accepts one line at a time.
Yields tuples ``(result, status)`` where ...
- result is ``None`` or a segment delimited by
``#BOS`` and ``#EOS`` as a list of lines;
- status is 1 if the line was consumed, else 0."""
cur = []
inblock = 0
line = (yield None, CONSUMED)
while line is not None:
line = line.rstrip()
if line.startswith('#BOS ') or line.startswith('#BOT '):
if strict and inblock != 0:
raise ValueError('nested #BOS or #BOT')
cur[:] = [line]
inblock = 1 if line.startswith('#BOS ') else 2
line = (yield None, CONSUMED)
elif line.startswith('#EOS ') or line.startswith('#EOT '):
if strict and inblock == 0:
raise ValueError('#EOS or #EOT without start tag')
cur.append(line)
item = exporttree(cur, functions, morphology)
line = (yield ((item.tree, item.sent, item.comment), ), CONSUMED)
inblock = 0
cur = []
elif line:
if inblock == 1:
cur.append(line)
else:
line = line.lstrip()
line = (yield None, (CONSUMED if inblock
or line.startswith('%%')
or line.startswith('#FORMAT ')
else not CONSUMED))
else:
line = (yield None, not CONSUMED)
def numbase(key):
"""Split file name in numeric and string components to use as sort key."""
path, base = os.path.split(key)
components = re.split(r'[-.,_ ]', os.path.splitext(base)[0])
components = [int(a) if re.match(r'[0-9]+$', a) else a for a in components]
return [path] + components
READERS = OrderedDict((
('export', NegraCorpusReader),
('bracket', BracketCorpusReader),
('discbracket', DiscBracketCorpusReader),
('tiger', TigerXMLCorpusReader),
('alpino', AlpinoCorpusReader),
('alpinocompact', AlpinoCompactCorpusReader),
('ftb', FTBXMLCorpusReader)))
WRITERS = ('export', 'bracket', 'discbracket',
'conll', 'mst', 'tokens', 'wordpos')
__all__ = ['Item', 'CorpusReader', 'BracketCorpusReader',
'DiscBracketCorpusReader', 'NegraCorpusReader', 'TigerXMLCorpusReader',
'AlpinoCorpusReader', 'AlpinoCompactCorpusReader',
'FTBXMLCorpusReader',
'exporttree', 'exportsplit', 'alpinotree', 'ftbtree',
'writetree', 'writeexporttree', 'writealpinotree', 'writedependencies',
'dependencies', 'deplen', 'handlefunctions', 'handlemorphology',
'incrementaltreereader', 'segmentbrackets', 'segmentexport',
'segmentalpino', 'numbase']
| andreasvc/disco-dop | discodop/treebank.py | Python | gpl-2.0 | 44,624 | [
"VisIt"
] | ad8200ba096784139d9fcc1345f82985595f4d4c737218388e234f378952838a |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2011 Adam Stein <adam@csh.rit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
from gramps.gen.ggettext import gettext as _
import logging
LOG = logging.getLogger(".rtfdoc")
#------------------------------------------------------------------------
#
# Load the base BaseDoc class
#
#------------------------------------------------------------------------
from gramps.gen.plug.docgen import (BaseDoc, TextDoc, FONT_SERIF, PARA_ALIGN_RIGHT,
PARA_ALIGN_CENTER, PARA_ALIGN_JUSTIFY,
URL_PATTERN)
from gramps.gen.utils.image import image_size, image_actual_size, resize_to_jpeg_buffer
from gramps.gen.errors import ReportError
#------------------------------------------------------------------------
#
# Set up to make links clickable
#
#------------------------------------------------------------------------
_CLICKABLE = r'''{\\field{\\*\\fldinst HYPERLINK "\1"}{\\fldrslt \1}}'''
#------------------------------------------------------------------------
#
# RTF uses a unit called "twips" for its measurements. According to the
# RTF specification, 1 point is 20 twips. This routines converts
# centimeters to twips
#
# 2.54 cm/inch 72pts/inch, 20twips/pt
#
#------------------------------------------------------------------------
def twips(cm):
return int(((cm/2.54)*72)+0.5)*20
#------------------------------------------------------------------------
#
# Rich Text Format Document interface. The current interface does not
# use style sheets. Instead it writes raw formatting.
#
#------------------------------------------------------------------------
class RTFDoc(BaseDoc,TextDoc):
#--------------------------------------------------------------------
#
# Opens the file, and writes the header. Builds the color and font
# tables. Fonts are chosen using the MS TrueType fonts, since it
# is assumed that if you are generating RTF, you are probably
# targeting Word. This generator assumes a Western Europe character
# set.
#
#--------------------------------------------------------------------
def open(self,filename):
if filename[-4:] != ".rtf":
self.filename = filename + ".rtf"
else:
self.filename = filename
try:
self.f = open(self.filename,"w")
except IOError,msg:
errmsg = "%s\n%s" % (_("Could not create %s") % self.filename, msg)
raise ReportError(errmsg)
except:
raise ReportError(_("Could not create %s") % self.filename)
style_sheet = self.get_style_sheet()
self.f.write(
'{\\rtf1\\ansi\\ansicpg1252\\deff0\n'
'{\\fonttbl\n'
'{\\f0\\froman\\fcharset0\\fprq0 Times New Roman;}\n'
'{\\f1\\fswiss\\fcharset0\\fprq0 Arial;}}\n'
'{\colortbl\n'
)
self.color_map = {}
index = 1
self.color_map[(0,0,0)] = 0
self.f.write('\\red0\\green0\\blue0;')
for style_name in style_sheet.get_paragraph_style_names():
style = style_sheet.get_paragraph_style(style_name)
fgcolor = style.get_font().get_color()
bgcolor = style.get_background_color()
if fgcolor not in self.color_map:
self.color_map[fgcolor] = index
self.f.write('\\red%d\\green%d\\blue%d;' % fgcolor)
index += 1
if bgcolor not in self.color_map:
self.f.write('\\red%d\\green%d\\blue%d;' % bgcolor)
self.color_map[bgcolor] = index
index += 1
self.f.write('}\n')
self.f.write(
'\\kerning0\\cf0\\viewkind1' +
'\\paperw%d' % twips(self.paper.get_size().get_width()) +
'\\paperh%d' % twips(self.paper.get_size().get_height()) +
'\\margl%d' % twips(self.paper.get_left_margin()) +
'\\margr%d' % twips(self.paper.get_right_margin()) +
'\\margt%d' % twips(self.paper.get_top_margin()) +
'\\margb%d' % twips(self.paper.get_bottom_margin()) +
'\\widowctl\n'
)
self.in_table = 0
self.text = ""
#--------------------------------------------------------------------
#
# Write the closing brace, and close the file.
#
#--------------------------------------------------------------------
def close(self):
self.f.write('}\n')
self.f.close()
#--------------------------------------------------------------------
#
# Force a section page break
#
#--------------------------------------------------------------------
def end_page(self):
self.f.write('\\sbkpage\n')
#--------------------------------------------------------------------
#
# Starts a paragraph. Instead of using a style sheet, generate the
# the style for each paragraph on the fly. Not the ideal, but it
# does work.
#
#--------------------------------------------------------------------
def start_paragraph(self,style_name,leader=None):
self.opened = 0
style_sheet = self.get_style_sheet()
p = style_sheet.get_paragraph_style(style_name)
# build font information
f = p.get_font()
size = f.get_size()*2
bgindex = self.color_map[p.get_background_color()]
fgindex = self.color_map[f.get_color()]
if f.get_type_face() == FONT_SERIF:
self.font_type = '\\f0'
else:
self.font_type = '\\f1'
self.font_type += '\\fs%d\\cf%d\\cb%d' % (size,fgindex,bgindex)
if f.get_bold():
self.font_type += "\\b"
if f.get_underline():
self.font_type += "\\ul"
if f.get_italic():
self.font_type += "\\i"
# build paragraph information
if not self.in_table:
self.f.write('\\pard')
if p.get_alignment() == PARA_ALIGN_RIGHT:
self.f.write('\\qr')
elif p.get_alignment() == PARA_ALIGN_CENTER:
self.f.write('\\qc')
self.f.write(
'\\ri%d' % twips(p.get_right_margin()) +
'\\li%d' % twips(p.get_left_margin()) +
'\\fi%d' % twips(p.get_first_indent())
)
if p.get_alignment() == PARA_ALIGN_JUSTIFY:
self.f.write('\\qj')
if p.get_padding():
self.f.write('\\sa%d' % twips(p.get_padding()/2.0))
if p.get_top_border():
self.f.write('\\brdrt\\brdrs')
if p.get_bottom_border():
self.f.write('\\brdrb\\brdrs')
if p.get_left_border():
self.f.write('\\brdrl\\brdrs')
if p.get_right_border():
self.f.write('\\brdrr\\brdrs')
if p.get_first_indent():
self.f.write('\\fi%d' % twips(p.get_first_indent()))
if p.get_left_margin():
self.f.write('\\li%d' % twips(p.get_left_margin()))
if p.get_right_margin():
self.f.write('\\ri%d' % twips(p.get_right_margin()))
if leader:
self.opened = 1
self.f.write('\\tx%d' % twips(p.get_left_margin()))
self.f.write('{%s ' % self.font_type)
self.write_text(leader)
self.f.write(self.text)
self.text = ""
self.f.write('\\tab}')
self.opened = 0
#--------------------------------------------------------------------
#
# Ends a paragraph. Care has to be taken to make sure that the
# braces are closed properly. The self.opened flag is used to indicate
# if braces are currently open. If the last write was the end of
# a bold-faced phrase, braces may already be closed.
#
#--------------------------------------------------------------------
def end_paragraph(self):
# FIXME: I don't understand why no end paragraph marker is output when
# we are inside a table. Since at least version 3.2.2, this seems to mean that
# there is no new paragraph after the first line of a table entry.
# For example in the birth cell, the first paragraph should be the
# description (21 Jan 1900 in London); if there is a note following this,
# there is no newline between the description and the note.
if not self.in_table:
self.f.write(self.text)
LOG.debug("end_paragraph: opened: %d write: %s" %
(self.opened,
self.text + '}' if self.opened else "" + "newline"))
if self.opened:
self.f.write('}')
self.opened = 0
self.f.write('\n\\par')
self.text = ""
else:
if self.text == "":
self.write_text(" ")
self.text += '}'
#--------------------------------------------------------------------
#
# Inserts a manual page break
#
#--------------------------------------------------------------------
def page_break(self):
self.f.write('\\page\n')
#--------------------------------------------------------------------
#
# Starts boldfaced text, enclosed the braces
#
#--------------------------------------------------------------------
def start_bold(self):
LOG.debug("start_bold: opened: %d saved text: %s" %
(self.opened,
'}' if self.opened else "" + '{%s\\b ' % self.font_type))
if self.opened:
self.text += '}'
self.text += '{%s\\b ' % self.font_type
self.opened = 1
#--------------------------------------------------------------------
#
# Ends boldfaced text, closing the braces
#
#--------------------------------------------------------------------
def end_bold(self):
LOG.debug("end_bold: opened: %d saved text: %s" %
(self.opened,
self.text + '}'))
if not self.opened == 1:
print self.opened
raise RuntimeError
self.opened = 0
self.text += '}'
def start_superscript(self):
self.text += '{{\*\updnprop5801}\up10 '
def end_superscript(self):
self.text += '}'
#--------------------------------------------------------------------
#
# Start a table. Grab the table style, and store it. Keep a flag to
# indicate that we are in a table. This helps us deal with paragraphs
# internal to a table. RTF does not require anything to start a
# table, since a table is treated as a bunch of rows.
#
#--------------------------------------------------------------------
def start_table(self, name,style_name):
self.in_table = 1
styles = self.get_style_sheet()
self.tbl_style = styles.get_table_style(style_name)
#--------------------------------------------------------------------
#
# End a table. Turn off the table flag
#
#--------------------------------------------------------------------
def end_table(self):
self.in_table = 0
#--------------------------------------------------------------------
#
# Start a row. RTF uses the \trowd to start a row. RTF also specifies
# all the cell data after it has specified the cell definitions for
# the row. Therefore it is necessary to keep a list of cell contents
# that is to be written after all the cells are defined.
#
#--------------------------------------------------------------------
def start_row(self):
self.contents = []
self.cell = 0
self.prev = 0
self.cell_percent = 0.0
self.f.write('\\trowd\n')
#--------------------------------------------------------------------
#
# End a row. Write the cell contents, separated by the \cell marker,
# then terminate the row
#
#--------------------------------------------------------------------
def end_row(self):
self.f.write('{')
for line in self.contents:
self.f.write(line)
self.f.write('\\cell ')
self.f.write('}\\pard\\intbl\\row\n')
#--------------------------------------------------------------------
#
# Start a cell. Dump out the cell specifics, such as borders. Cell
# widths are kind of interesting. RTF doesn't specify how wide a cell
# is, but rather where it's right edge is in relationship to the
# left margin. This means that each cell is the cumlative of the
# previous cells plus its own width.
#
#--------------------------------------------------------------------
def start_cell(self,style_name,span=1):
styles = self.get_style_sheet()
s = styles.get_cell_style(style_name)
self.remain = span -1
if s.get_top_border():
self.f.write('\\clbrdrt\\brdrs\\brdrw10\n')
if s.get_bottom_border():
self.f.write('\\clbrdrb\\brdrs\\brdrw10\n')
if s.get_left_border():
self.f.write('\\clbrdrl\\brdrs\\brdrw10\n')
if s.get_right_border():
self.f.write('\\clbrdrr\\brdrs\\brdrw10\n')
table_width = float(self.paper.get_usable_width())
for cell in range(self.cell,self.cell+span):
self.cell_percent += float(self.tbl_style.get_column_width(cell))
cell_width = twips((table_width * self.cell_percent)/100.0)
self.f.write('\\cellx%d\\pard\intbl\n' % cell_width)
self.cell += 1
#--------------------------------------------------------------------
#
# End a cell. Save the current text in the content lists, since data
# must be saved until all cells are defined.
#
#--------------------------------------------------------------------
def end_cell(self):
self.contents.append(self.text)
self.text = ""
#--------------------------------------------------------------------
#
# Add a photo. Embed the photo in the document. Use the Python
# imaging library to load and scale the photo. The image is converted
# to JPEG, since it is smaller, and supported by RTF. The data is
# dumped as a string of HEX numbers.
#
#--------------------------------------------------------------------
def add_media_object(self, name, pos, x_cm, y_cm, alt='', style_name=None, crop=None):
nx, ny = image_size(name)
if (nx, ny) == (0,0):
return
(act_width, act_height) = image_actual_size(x_cm, y_cm, nx, ny)
act_width = twips(act_width)
act_height = twips(act_height)
size = [act_width, act_height]
buf = resize_to_jpeg_buffer(name, size, crop=crop)
act_width = size[0] # In case it changed because of cropping or keeping the ratio
act_height = size[1]
self.f.write('{\*\shppict{\\pict\\jpegblip')
self.f.write('\\picwgoal%d\\pichgoal%d\n' % (act_width,act_height))
index = 1
for i in buf:
self.f.write('%02x' % ord(i))
if index%32==0:
self.f.write('\n')
index = index+1
self.f.write('}}\\par\n')
if len(alt):
self.f.write('%s\n\\par\n' % '\\par'.join(alt))
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the RTF doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. RTFDoc prints the html without handling it
links: bool, make URLs clickable if True
"""
text = str(styledtext)
self.start_paragraph(style_name)
linenb = 1
for line in text.split('\n'):
[line, sigcount] = process_spaces(line, format)
if sigcount == 0:
if self.in_table:
# # Add LF when in table as in indiv_complete report
self.write_text('\n')
self.end_paragraph()
self.start_paragraph(style_name)
linenb = 1
else:
if ( linenb > 1 ):
self.write_text('\\line ')
self.write_text(line, links=links)
linenb += 1
# FIXME: I don't understand why these newlines are necessary.
# It may be related to the behaviour of end_paragraph inside tables, and
# write_text converting \n to end paragraph.
# This code prevents the whole document going wrong, but seems to produce an extra
# paragraph mark at the end of each table cell.
if self.in_table:
# # Add LF when in table as in indiv_complete report
self.write_text('\n')
self.end_paragraph()
#--------------------------------------------------------------------
#
# Writes text. If braces are not currently open, open them. Loop
# character by character (terribly inefficient, but it works). If a
# character is 8 bit (>127), convert it to a hex representation in
# the form of \`XX. Make sure to escape braces.
#
#--------------------------------------------------------------------
def write_text(self,text,mark=None,links=False):
# Convert to unicode, just in case it's not. Fix of bug 2449.
text = unicode(text)
text = text.replace('\n','\n\\par ')
LOG.debug("write_text: opened: %d input text: %s" %
(self.opened,
text))
if self.opened == 0:
self.opened = 1
self.text += '{%s ' % self.font_type
for i in text:
if ord(i) > 127:
if ord(i) < 256:
self.text += '\\\'%2x' % ord(i)
else:
# If (uni)code with more than 8 bits:
# RTF req valus in decimal, not hex.
self.text += '\\uc1\\u%d\\uc0' % ord(i)
elif i == '{' or i == '}' :
self.text += '\\%s' % i
else:
self.text += i
if links == True:
import re
self.text = re.sub(URL_PATTERN, _CLICKABLE, self.text)
LOG.debug("write_text, exit: opened: %d saved text: %s" %
(self.opened,
self.text))
def process_spaces(line, format):
"""
Function to process spaces in text lines for flowed and pre-formatted notes.
line : text to process
format : = 0 : Flowed, = 1 : Preformatted
If the text is flowed (format==0), then leading spaces
are removed, and multiple spaces are reduced to one.
If the text is pre-formatted (format==1). then all spaces are preserved
Note that xml is just treated like any other text,
because it will be from the original note, and it is just printed, not interpreted.
Returns the processed text, and the number of significant (i.e. non-white-space) chars.
"""
txt = ""
xml = False
space = False
sigcount = 0
# we loop through every character, which is very inefficient, but an attempt to use
# a regex replace didn't always work.
for char in line:
if char == " " or char == "\t":
if format == 1:
txt += char
elif format == 0 and sigcount == 0:
pass
elif format == 0 and space == False:
space = True
txt += char
elif format == 0 and space == True:
pass
else:
sigcount += 1
space = False
txt += char
return [txt, sigcount]
| arunkgupta/gramps | gramps/plugins/docgen/rtfdoc.py | Python | gpl-2.0 | 21,154 | [
"Brian"
] | 6d400c46a2eaedfe2929953c455b74e5e9a6fb3cf0422dd8da5e995c4397c25e |
import re
import tensorflow as tf
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'GPU0'
def conv_layer(name, input, shape, weight_decay=0.0, stride=None, visualize=None):
if stride==None:
stride = [1, 1, 1, 1]
with tf.variable_scope(name) as scope:
kernel = _variable_with_weight_decay('weights',
shape=shape,
stddev=5e-2,
weight_decay=weight_decay)
# Visualize kernel of conv1
if visualize is not None:
grid = put_kernels_on_grid(kernel, visualize[0], visualize[1])
tf.summary.image("{0}/features".format(name), grid, max_outputs=1)
conv = tf.nn.conv2d(input, kernel, stride, padding='SAME')
biases = _get_variable('biases', [shape[3]], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv_n = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv_n)
return conv_n
def max_pool_2x2(name, input):
with tf.variable_scope(name) as scope:
return tf.nn.max_pool(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name=scope.name)
def fully_connected(name, input, shape, weight_decay=0.0):
with tf.variable_scope(name) as scope:
weights = _variable_with_weight_decay('weights', shape=shape,
stddev=0.04, weight_decay=weight_decay)
biases = _get_variable('biases', shape[1], tf.constant_initializer(0.1))
fc = tf.nn.relu(tf.matmul(input, weights) + biases, name=scope.name)
_activation_summary(fc)
return fc
def output(name, input, shape):
with tf.variable_scope(name) as scope:
weights = _variable_with_weight_decay('weights', shape,
stddev=1/shape[0], weight_decay=0.0)
biases = _get_variable('biases', [shape[1]],
tf.constant_initializer(0.0))
logits = tf.sigmoid(tf.matmul(input, weights) + biases)
_activation_summary(logits)
return logits
def inference(images, num_classes, depth, dropout):
#pylint: disable=maybe-no-member
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
num_classes: The number of classes to predict
depth: 1 if grayscale, 3 for rgb
dropout: Probability for droptout. Ex.: 0.75 during training and 1. for evaluation
Returns:
Logits.
"""
weight_decay = 0.0001
print("Building model.")
#
# Conv -> max_pool Layer
#
# Input = W1×H1×D1
# W2=(W1−F+2P)/S+1
# H2=(H1−F+2P)/S+1
# D2=K
# Input is 224x224x3
# Note: Compromise at first layer to reduce memory usage
conv1_1 = conv_layer(name='conv1_1', input=images, shape=[7, 7, depth, 32],
weight_decay=weight_decay*2, stride=[1, 2, 2, 1], visualize=(8, 4))
conv1_2 = conv_layer(name='conv1_2', input=conv1_1, shape=[5, 5, 32, 32], weight_decay=weight_decay)
# Input is 112x112x32
max_pool1 = max_pool_2x2('max_pool1', conv1_2)
# Input is 56x56x32
conv2_1 = conv_layer(name='conv2_1', input=max_pool1, shape=[5, 5, 32, 64], weight_decay=weight_decay)
conv2_2 = conv_layer(name='conv2_2', input=conv2_1, shape=[5, 5, 64, 80], weight_decay=weight_decay)
max_pool2 = max_pool_2x2('max_pool2', conv2_2)
# Input is 28x28x80
conv3_1 = conv_layer(name='conv3_1', input=max_pool2, shape=[5, 5, 80, 80], weight_decay=weight_decay)
conv3_2 = conv_layer(name='conv3_2', input=conv3_1, shape=[5, 5, 80, 192], weight_decay=weight_decay)
max_pool3 = max_pool_2x2('max_pool3', conv3_2)
# Input is 14x14x192
conv4_1 = conv_layer(name='conv4_1', input=max_pool3, shape=[5, 5, 192, 192], weight_decay=weight_decay)
conv4_2 = conv_layer(name='conv4_2', input=conv4_1, shape=[5, 5, 192, 192], weight_decay=weight_decay)
max_pool4 = max_pool_2x2('max_pool4', conv4_2)
# Input is 7x7x192
reshape = tf.reshape(max_pool4, [-1, 7*7*192])
dim = reshape.get_shape()[1].value
#
# Fully connected layers
#
fc1 = fully_connected('fc1', reshape, [dim, (7*7*192) / 2], weight_decay=weight_decay)
fc1 = tf.nn.dropout(fc1, dropout)
fc2 = fully_connected('fc2', fc1, [(7*7*192) / 2, (7*7*192) / 8], weight_decay=weight_decay)
fc2 = tf.nn.dropout(fc2, dropout)
fc3 = fully_connected('fc3', fc2, [(7*7*192) / 8, (7*7*192) / 32], weight_decay=weight_decay)
fc3 = tf.nn.dropout(fc3, dropout)
#
# Create a linear output
#
logits = output('output', fc3, [(7*7*192) / 32, num_classes])
return logits
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _get_variable(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
return tf.get_variable(name, shape, initializer=initializer, dtype=tf.float32)
def _variable_with_weight_decay(name, shape, stddev, weight_decay):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
weight_decay: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32 #tf.float16 if FLAGS.use_fp16 else tf.float32
var = _get_variable(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if weight_decay is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), weight_decay, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
# https://gist.github.com/kukuruza/03731dc494603ceab0c5
# https://github.com/tensorflow/tensorflow/issues/908
def put_kernels_on_grid (kernel, grid_X, grid_Y, pad=1):
'''Visualize conv. features as an image (mostly for the 1st layer).
Place kernel into a grid, with some paddings between adjacent filters.
Args:
kernel: tensor of shape [Y, X, NumChannels, NumKernels]
(grid_Y, grid_X): shape of the grid. Require: NumKernels == grid_Y * grid_X
User is responsible of how to break into two multiples.
pad: number of black pixels around each filter (between them)
Return:
Tensor of shape [(Y+pad)*grid_Y, (X+pad)*grid_X, NumChannels, 1].
'''
# pad X and Y
x1 = tf.pad(kernel, tf.constant( [[pad,0],[pad,0],[0,0],[0,0]] ))
# X and Y dimensions, w.r.t. padding
Y = kernel.get_shape()[0] + pad
X = kernel.get_shape()[1] + pad
Z = kernel.get_shape()[2]
# put NumKernels to the 1st dimension
x2 = tf.transpose(x1, (3, 0, 1, 2))
# organize grid on Y axis
x3 = tf.reshape(x2, tf.stack([grid_X, Y * grid_Y, X, Z]))
# switch X and Y axes
x4 = tf.transpose(x3, (0, 2, 1, 3))
# organize grid on X axis
x5 = tf.reshape(x4, tf.stack([1, X * grid_X, Y * grid_Y, Z]))
# back to normal order (not combining with the next step for clarity)
x6 = tf.transpose(x5, (2, 1, 3, 0))
# to tf.summary.image order [batch_size, height, width, channels],
# where in this case batch_size == 1
x7 = tf.transpose(x6, (3, 0, 1, 2))
# scale to [0, 1]
x_min = tf.reduce_min(x7)
x_max = tf.reduce_max(x7)
x8 = (x7 - x_min) / (x_max - x_min)
return x8
| peerdavid/social-neural-network | neural_network/model.py | Python | gpl-3.0 | 8,655 | [
"Gaussian"
] | dc7fe27853d31683d9b39367e0ff437ed50d0d39561ddd679eb4dd18eff45a64 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from abc import abstractmethod
from math import acos, cos, exp, floor, inf, log, pi, sin, sqrt
from numbers import Number
import numpy as np
import astropy.constants as const
import astropy.units as u
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
from . import scalar_inv_efuncs
from . import units as cu
from .core import Cosmology, FlatCosmologyMixin, Parameter
from .parameter import _validate_non_negative, _validate_with_unit
from .utils import aszarr, vectorize_redshift_method
# isort: split
if HAS_SCIPY:
from scipy.integrate import quad
from scipy.special import ellipkinc, hyp2f1
else:
def quad(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.integrate'")
def ellipkinc(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.special'")
def hyp2f1(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.special'")
__all__ = ["FLRW", "LambdaCDM", "FlatLambdaCDM", "wCDM", "FlatwCDM",
"w0waCDM", "Flatw0waCDM", "wpwaCDM", "w0wzCDM", "FlatFLRWMixin"]
__doctest_requires__ = {'*': ['scipy']}
# Some conversion constants -- useful to compute them once here and reuse in
# the initialization rather than have every object do them.
H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s)
sec_to_Gyr = u.s.to(u.Gyr)
# const in critical density in cgs units (g cm^-3)
critdens_const = (3 / (8 * pi * const.G)).cgs.value
# angle conversions
radian_in_arcsec = (1 * u.rad).to(u.arcsec)
radian_in_arcmin = (1 * u.rad).to(u.arcmin)
# Radiation parameter over c^2 in cgs (g cm^-3 K^-4)
a_B_c2 = (4 * const.sigma_sb / const.c ** 3).cgs.value
# Boltzmann constant in eV / K
kB_evK = const.k_B.to(u.eV / u.K)
class FLRW(Cosmology):
"""
A class describing an isotropic and homogeneous
(Friedmann-Lemaitre-Robertson-Walker) cosmology.
This is an abstract base class -- you cannot instantiate examples of this
class, but must work with one of its subclasses, such as
:class:`~astropy.cosmology.LambdaCDM` or :class:`~astropy.cosmology.wCDM`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0. Note that this does not include massive
neutrinos.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Notes
-----
Class instances are immutable -- you cannot change the parameters' values.
That is, all of the above attributes (except meta) are read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
H0 = Parameter(doc="Hubble constant as an `~astropy.units.Quantity` at z=0.",
unit="km/(s Mpc)", fvalidate="scalar")
Om0 = Parameter(doc="Omega matter; matter density/critical density at z=0.",
fvalidate="non-negative")
Ode0 = Parameter(doc="Omega dark energy; dark energy density/critical density at z=0.",
fvalidate="float")
Tcmb0 = Parameter(doc="Temperature of the CMB as `~astropy.units.Quantity` at z=0.",
unit="Kelvin", fvalidate="scalar")
Neff = Parameter(doc="Number of effective neutrino species.", fvalidate="non-negative")
m_nu = Parameter(doc="Mass of neutrino species.",
unit="eV", equivalencies=u.mass_energy())
Ob0 = Parameter(doc="Omega baryon; baryonic matter density/critical density at z=0.")
def __init__(self, H0, Om0, Ode0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(name=name, meta=meta)
# Assign (and validate) Parameters
self.H0 = H0
self.Om0 = Om0
self.Ode0 = Ode0
self.Tcmb0 = Tcmb0
self.Neff = Neff
self.m_nu = m_nu # (reset later, this is just for unit validation)
self.Ob0 = Ob0 # (must be after Om0)
# Derived quantities:
# Dark matter density; matter - baryons, if latter is not None.
self._Odm0 = None if Ob0 is None else (self._Om0 - self._Ob0)
# 100 km/s/Mpc * h = H0 (so h is dimensionless)
self._h = self._H0.value / 100.0
# Hubble distance
self._hubble_distance = (const.c / self._H0).to(u.Mpc)
# H0 in s^-1
H0_s = self._H0.value * H0units_to_invs
# Hubble time
self._hubble_time = (sec_to_Gyr / H0_s) << u.Gyr
# Critical density at z=0 (grams per cubic cm)
cd0value = critdens_const * H0_s ** 2
self._critical_density0 = cd0value << u.g / u.cm ** 3
# Compute photon density from Tcmb
self._Ogamma0 = a_B_c2 * self._Tcmb0.value ** 4 / self._critical_density0.value
# Compute Neutrino temperature:
# The constant in front is (4/11)^1/3 -- see any cosmology book for an
# explanation -- for example, Weinberg 'Cosmology' p 154 eq (3.1.21).
self._Tnu0 = 0.7137658555036082 * self._Tcmb0
# Compute neutrino parameters:
if self._m_nu is None:
self._nneutrinos = 0
self._neff_per_nu = None
self._massivenu = False
self._massivenu_mass = None
self._nmassivenu = self._nmasslessnu = None
else:
self._nneutrinos = floor(self._Neff)
# We are going to share Neff between the neutrinos equally. In
# detail this is not correct, but it is a standard assumption
# because properly calculating it is a) complicated b) depends on
# the details of the massive neutrinos (e.g., their weak
# interactions, which could be unusual if one is considering
# sterile neutrinos).
self._neff_per_nu = self._Neff / self._nneutrinos
# Now figure out if we have massive neutrinos to deal with, and if
# so, get the right number of masses. It is worth keeping track of
# massless ones separately (since they are easy to deal with, and a
# common use case is to have only one massive neutrino).
massive = np.nonzero(self._m_nu.value > 0)[0]
self._massivenu = massive.size > 0
self._nmassivenu = len(massive)
self._massivenu_mass = self._m_nu[massive].value if self._massivenu else None
self._nmasslessnu = self._nneutrinos - self._nmassivenu
# Compute Neutrino Omega and total relativistic component for massive
# neutrinos. We also store a list version, since that is more efficient
# to do integrals with (perhaps surprisingly! But small python lists
# are more efficient than small NumPy arrays).
if self._massivenu: # (`_massivenu` set in `m_nu`)
nu_y = self._massivenu_mass / (kB_evK * self._Tnu0)
self._nu_y = nu_y.value
self._nu_y_list = self._nu_y.tolist()
self._Onu0 = self._Ogamma0 * self.nu_relative_density(0)
else:
# This case is particularly simple, so do it directly The 0.2271...
# is 7/8 (4/11)^(4/3) -- the temperature bit ^4 (blackbody energy
# density) times 7/8 for FD vs. BE statistics.
self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0
self._nu_y = self._nu_y_list = None
# Compute curvature density
self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0
# Subclasses should override this reference if they provide
# more efficient scalar versions of inv_efunc.
self._inv_efunc_scalar = self.inv_efunc
self._inv_efunc_scalar_args = ()
# ---------------------------------------------------------------
# Parameter details
@Ob0.validator
def Ob0(self, param, value):
"""Validate baryon density to None or positive float > matter density."""
if value is None:
return value
value = _validate_non_negative(self, param, value)
if value > self.Om0:
raise ValueError("baryonic density can not be larger than total matter density.")
return value
@m_nu.validator
def m_nu(self, param, value):
"""Validate neutrino masses to right value, units, and shape.
There are no neutrinos if floor(Neff) or Tcmb0 are 0.
The number of neutrinos must match floor(Neff).
Neutrino masses cannot be negative.
"""
# Check if there are any neutrinos
if (nneutrinos := floor(self._Neff)) == 0 or self._Tcmb0.value == 0:
return None # None, regardless of input
# Validate / set units
value = _validate_with_unit(self, param, value)
# Check values and data shapes
if value.shape not in ((), (nneutrinos,)):
raise ValueError("unexpected number of neutrino masses — "
f"expected {nneutrinos}, got {len(value)}.")
elif np.any(value.value < 0):
raise ValueError("invalid (negative) neutrino mass encountered.")
# scalar -> array
if value.isscalar:
value = np.full_like(value, value, shape=nneutrinos)
return value
# ---------------------------------------------------------------
# properties
@property
def is_flat(self):
"""Return bool; `True` if the cosmology is flat."""
return bool((self._Ok0 == 0.0) and (self.Otot0 == 1.0))
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return self._Om0 + self._Ogamma0 + self._Onu0 + self._Ode0 + self._Ok0
@property
def Odm0(self):
"""Omega dark matter; dark matter density/critical density at z=0."""
return self._Odm0
@property
def Ok0(self):
"""Omega curvature; the effective curvature density/critical density at z=0."""
return self._Ok0
@property
def Tnu0(self):
"""Temperature of the neutrino background as `~astropy.units.Quantity` at z=0."""
return self._Tnu0
@property
def has_massive_nu(self):
"""Does this cosmology have at least one massive neutrino species?"""
if self._Tnu0.value == 0:
return False
return self._massivenu
@property
def h(self):
"""Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]."""
return self._h
@property
def hubble_time(self):
"""Hubble time as `~astropy.units.Quantity`."""
return self._hubble_time
@property
def hubble_distance(self):
"""Hubble distance as `~astropy.units.Quantity`."""
return self._hubble_distance
@property
def critical_density0(self):
"""Critical density as `~astropy.units.Quantity` at z=0."""
return self._critical_density0
@property
def Ogamma0(self):
"""Omega gamma; the density/critical density of photons at z=0."""
return self._Ogamma0
@property
def Onu0(self):
"""Omega nu; the density/critical density of neutrinos at z=0."""
return self._Onu0
# ---------------------------------------------------------------
@abstractmethod
def w(self, z):
r"""The dark energy equation of state.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
`float` if scalar input.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1.
This must be overridden by subclasses.
"""
raise NotImplementedError("w(z) is not implemented")
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
The total density relative to the critical density at each redshift.
Returns float if input scalar.
"""
return self.Om(z) + self.Ogamma(z) + self.Onu(z) + self.Ode(z) + self.Ok(z)
def Om(self, z):
"""
Return the density parameter for non-relativistic matter
at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Om : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest; see `Onu`.
"""
z = aszarr(z)
return self._Om0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ob(self, z):
"""Return the density parameter for baryonic matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ob : ndarray or float
The density of baryonic matter relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
"""
if self._Ob0 is None:
raise ValueError("Baryon density not set for this cosmology")
z = aszarr(z)
return self._Ob0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Odm(self, z):
"""Return the density parameter for dark matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Odm : ndarray or float
The density of non-relativistic dark matter relative to the
critical density at each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest.
"""
if self._Odm0 is None:
raise ValueError("Baryonic density not set for this cosmology, "
"unclear meaning of dark matter density")
z = aszarr(z)
return self._Odm0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ok(self, z):
"""
Return the equivalent density parameter for curvature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ok : ndarray or float
The equivalent density parameter for curvature at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ok0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ok0 * (z + 1.0) ** 2 * self.inv_efunc(z) ** 2
def Ode(self, z):
"""Return the density parameter for dark energy at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ode : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ode0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2
def Ogamma(self, z):
"""Return the density parameter for photons at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ogamma : ndarray or float
The energy density of photons relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
return self._Ogamma0 * (z + 1.0) ** 4 * self.inv_efunc(z) ** 2
def Onu(self, z):
r"""Return the density parameter for neutrinos at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Onu : ndarray or float
The energy density of neutrinos relative to the critical density at
each redshift. Note that this includes their kinetic energy (if
they have mass), so it is not equal to the commonly used
:math:`\sum \frac{m_{\nu}}{94 eV}`, which does not include
kinetic energy.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Onu0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self.Ogamma(z) * self.nu_relative_density(z)
def Tcmb(self, z):
"""Return the CMB temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tcmb : `~astropy.units.Quantity` ['temperature']
The temperature of the CMB in K.
"""
return self._Tcmb0 * (aszarr(z) + 1.0)
def Tnu(self, z):
"""Return the neutrino temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tnu : `~astropy.units.Quantity` ['temperature']
The temperature of the cosmic neutrino background in K.
"""
return self._Tnu0 * (aszarr(z) + 1.0)
def nu_relative_density(self, z):
r"""Neutrino density function relative to the energy density in photons.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
f : ndarray or float
The neutrino density scaling factor relative to the density in
photons at each redshift.
Only returns `float` if z is scalar.
Notes
-----
The density in neutrinos is given by
.. math::
\rho_{\nu} \left(a\right) = 0.2271 \, N_{eff} \,
f\left(m_{\nu} a / T_{\nu 0} \right) \,
\rho_{\gamma} \left( a \right)
where
.. math::
f \left(y\right) = \frac{120}{7 \pi^4}
\int_0^{\infty} \, dx \frac{x^2 \sqrt{x^2 + y^2}}
{e^x + 1}
assuming that all neutrino species have the same mass.
If they have different masses, a similar term is calculated for each
one. Note that ``f`` has the asymptotic behavior :math:`f(0) = 1`. This
method returns :math:`0.2271 f` using an analytical fitting formula
given in Komatsu et al. 2011, ApJS 192, 18.
"""
# Note that there is also a scalar-z-only cython implementation of
# this in scalar_inv_efuncs.pyx, so if you find a problem in this
# you need to update there too.
# See Komatsu et al. 2011, eq 26 and the surrounding discussion
# for an explanation of what we are doing here.
# However, this is modified to handle multiple neutrino masses
# by computing the above for each mass, then summing
prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book
# The massive and massless contribution must be handled separately
# But check for common cases first
z = aszarr(z)
if not self._massivenu:
return prefac * self._Neff * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
# These are purely fitting constants -- see the Komatsu paper
p = 1.83
invp = 0.54644808743 # 1.0 / p
k = 0.3173
curr_nu_y = self._nu_y / (1. + np.expand_dims(z, axis=-1))
rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp
rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu
return prefac * self._neff_per_nu * rel_mass
def _w_integrand(self, ln1pz):
"""Internal convenience function for w(z) integral (eq. 5 of [1]_).
Parameters
----------
ln1pz : `~numbers.Number` or scalar ndarray
Assumes scalar input, since this should only be called inside an
integral.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
return 1.0 + self.w(exp(ln1pz) - 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and is given by
.. math::
I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} }
\left[ 1 + w\left( a^{\prime} \right) \right] \right)
The actual integral used is rewritten from [1]_ to be in terms of z.
It will generally helpful for subclasses to overload this method if
the integral can be done analytically for the particular dark
energy equation of state that they implement.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
# This allows for an arbitrary w(z) following eq (5) of
# Linder 2003, PRL 90, 91301. The code here evaluates
# the integral numerically. However, most popular
# forms of w(z) are designed to make this integral analytic,
# so it is probably a good idea for subclasses to overload this
# method if an analytic form is available.
z = aszarr(z)
if not isinstance(z, (Number, np.generic)): # array/Quantity
ival = np.array([quad(self._w_integrand, 0, log(1 + redshift))[0]
for redshift in z])
return np.exp(3 * ival)
else: # scalar
ival = quad(self._w_integrand, 0, log(z + 1.0))[0]
return exp(3 * ival)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
Notes
-----
It is not necessary to override this method, but if de_density_scale
takes a particularly simple form, it may be advantageous to.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * self.de_density_scale(z))
def inv_efunc(self, z):
"""Inverse of ``efunc``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the inverse Hubble constant.
Returns `float` if the input is scalar.
"""
# Avoid the function overhead by repeating code
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * self.de_density_scale(z))**(-0.5)
def _lookback_time_integrand_scalar(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : float
Input redshift.
Returns
-------
I : float
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
return self._inv_efunc_scalar(z, *self._inv_efunc_scalar_args) / (z + 1.0)
def lookback_time_integrand(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : float or array
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return self.inv_efunc(z) / (z + 1.0)
def _abs_distance_integrand_scalar(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
args = self._inv_efunc_scalar_args
return (z + 1.0) ** 2 * self._inv_efunc_scalar(z, *args)
def abs_distance_integrand(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float or array
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return (z + 1.0) ** 2 * self.inv_efunc(z)
def H(self, z):
"""Hubble parameter (km/s/Mpc) at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
H : `~astropy.units.Quantity` ['frequency']
Hubble parameter at each input redshift.
"""
return self._H0 * self.efunc(z)
def scale_factor(self, z):
"""Scale factor at redshift ``z``.
The scale factor is defined as :math:`a = 1 / (1 + z)`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
a : ndarray or float
Scale factor at each input redshift.
Returns `float` if the input is scalar.
"""
return 1.0 / (aszarr(z) + 1.0)
def lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a lookback time.
"""
return self._lookback_time(z)
def _lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._hubble_time * self._integral_lookback_time(z)
@vectorize_redshift_method
def _integral_lookback_time(self, z, /):
"""Lookback time to redshift ``z``. Value in units of Hubble time.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
Lookback time to each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._lookback_time_integrand_scalar, 0, z)[0]
def lookback_distance(self, z):
"""
The lookback distance is the light travel time distance to a given
redshift. It is simply c * lookback_time. It may be used to calculate
the proper distance between two redshifts, e.g. for the mean free path
to ionizing radiation.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Lookback distance in Mpc
"""
return (self.lookback_time(z) * const.c).to(u.Mpc)
def age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return self._age(z)
def _age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
This internal function exists to be re-defined for optimizations.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
"""
return self._hubble_time * self._integral_age(z)
@vectorize_redshift_method
def _integral_age(self, z, /):
"""Age of the universe at redshift ``z``. Value in units of Hubble time.
Calculated using explicit integration.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
The age of the universe at each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return quad(self._lookback_time_integrand_scalar, z, np.inf)[0]
def critical_density(self, z):
"""Critical density in grams per cubic cm at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
rho : `~astropy.units.Quantity`
Critical density in g/cm^3 at each input redshift.
"""
return self._critical_density0 * (self.efunc(z)) ** 2
def comoving_distance(self, z):
"""Comoving line-of-sight distance in Mpc at a given redshift.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc to each input redshift.
"""
return self._comoving_distance_z1z2(0, z)
def _comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._integral_comoving_distance_z1z2(z1, z2)
@vectorize_redshift_method(nin=2)
def _integral_comoving_distance_z1z2_scalar(self, z1, z2, /):
"""
Comoving line-of-sight distance between objects at redshifts ``z1`` and
``z2``. Value in Mpc.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : float or ndarray
Comoving distance in Mpc between each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._inv_efunc_scalar, z1, z2, args=self._inv_efunc_scalar_args)[0]
def _integral_comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``. The comoving distance along the line-of-sight
between two objects remains constant with time for objects in the
Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'] or array-like
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._hubble_distance * self._integral_comoving_distance_z1z2_scalar(z1, z2)
def comoving_transverse_distance(self, z):
r"""Comoving transverse distance in Mpc at a given redshift.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero (as in the current
concordance Lambda-CDM model).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc at each input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
return self._comoving_transverse_distance_z1z2(0, z)
def _comoving_transverse_distance_z1z2(self, z1, z2):
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z2`` as
seen from redshift ``z1`` corresponding to an angular separation of
1 radian. This is the same as the comoving distance if :math:`\Omega_k`
is zero (as in the current concordance Lambda-CDM model).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc between input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
Ok0 = self._Ok0
dc = self._comoving_distance_z1z2(z1, z2)
if Ok0 == 0:
return dc
sqrtOk0 = sqrt(abs(Ok0))
dh = self._hubble_distance
if Ok0 > 0:
return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value)
else:
return dh / sqrtOk0 * np.sin(sqrtOk0 * dc.value / dh.value)
def angular_diameter_distance(self, z):
"""Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object
at redshift ``z`` ([1]_, [2]_, [3]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Angular diameter distance in Mpc at each input redshift.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 421-424.
.. [2] Weedman, D. (1986). Quasar astronomy, pp 65-67.
.. [3] Peebles, P. (1993). Principles of Physical Cosmology, pp 325-327.
"""
z = aszarr(z)
return self.comoving_transverse_distance(z) / (z + 1.0)
def luminosity_distance(self, z):
"""Luminosity distance in Mpc at redshift ``z``.
This is the distance to use when converting between the bolometric flux
from an object at redshift ``z`` and its bolometric luminosity [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Luminosity distance in Mpc at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a luminosity distance.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62.
"""
z = aszarr(z)
return (z + 1.0) * self.comoving_transverse_distance(z)
def angular_diameter_distance_z1z2(self, z1, z2):
"""Angular diameter distance between objects at 2 redshifts.
Useful for gravitational lensing, for example computing the angular
diameter distance between a lensed galaxy and the foreground lens.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. For most practical applications such as
gravitational lensing, ``z2`` should be larger than ``z1``. The
method will work for ``z2 < z1``; however, this will return
negative distances.
Returns
-------
d : `~astropy.units.Quantity`
The angular diameter distance between each input redshift pair.
Returns scalar if input is scalar, array else-wise.
"""
z1, z2 = aszarr(z1), aszarr(z2)
if np.any(z2 < z1):
warnings.warn(f"Second redshift(s) z2 ({z2}) is less than first "
f"redshift(s) z1 ({z1}).", AstropyUserWarning)
return self._comoving_transverse_distance_z1z2(z1, z2) / (z2 + 1.0)
@vectorize_redshift_method
def absorption_distance(self, z, /):
"""Absorption distance at redshift ``z``.
This is used to calculate the number of objects with some cross section
of absorption and number density intersecting a sightline per unit
redshift path ([1]_, [2]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : float or ndarray
Absorption distance (dimensionless) at each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
.. [2] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B
"""
return quad(self._abs_distance_integrand_scalar, 0, z)[0]
def distmod(self, z):
"""Distance modulus at redshift ``z``.
The distance modulus is defined as the (apparent magnitude - absolute
magnitude) for an object at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
distmod : `~astropy.units.Quantity` ['length']
Distance modulus at each input redshift, in magnitudes.
See Also
--------
z_at_value : Find the redshift corresponding to a distance modulus.
"""
# Remember that the luminosity distance is in Mpc
# Abs is necessary because in certain obscure closed cosmologies
# the distance modulus can be negative -- which is okay because
# it enters as the square.
val = 5. * np.log10(abs(self.luminosity_distance(z).value)) + 25.0
return u.Quantity(val, u.mag)
def comoving_volume(self, z):
r"""Comoving volume in cubic Mpc at redshift ``z``.
This is the volume of the universe encompassed by redshifts less than
``z``. For the case of :math:`\Omega_k = 0` it is a sphere of radius
`comoving_distance` but it is less intuitive if :math:`\Omega_k` is not.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
V : `~astropy.units.Quantity`
Comoving volume in :math:`Mpc^3` at each input redshift.
"""
Ok0 = self._Ok0
if Ok0 == 0:
return 4.0 / 3.0 * pi * self.comoving_distance(z) ** 3
dh = self._hubble_distance.value # .value for speed
dm = self.comoving_transverse_distance(z).value
term1 = 4.0 * pi * dh ** 3 / (2.0 * Ok0) * u.Mpc ** 3
term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2)
term3 = sqrt(abs(Ok0)) * dm / dh
if Ok0 > 0:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsinh(term3))
else:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsin(term3))
def differential_comoving_volume(self, z):
"""Differential comoving volume at redshift z.
Useful for calculating the effective comoving volume.
For example, allows for integration over a comoving volume that has a
sensitivity function that changes with redshift. The total comoving
volume is given by integrating ``differential_comoving_volume`` to
redshift ``z`` and multiplying by a solid angle.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
dV : `~astropy.units.Quantity`
Differential comoving volume per redshift per steradian at each
input redshift.
"""
dm = self.comoving_transverse_distance(z)
return self._hubble_distance * (dm ** 2.0) / (self.efunc(z) << u.steradian)
def kpc_comoving_per_arcmin(self, z):
"""
Separation in transverse comoving kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in comoving kpc corresponding to an arcmin at each
input redshift.
"""
return self.comoving_transverse_distance(z).to(u.kpc) / radian_in_arcmin
def kpc_proper_per_arcmin(self, z):
"""
Separation in transverse proper kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in proper kpc corresponding to an arcmin at each input
redshift.
"""
return self.angular_diameter_distance(z).to(u.kpc) / radian_in_arcmin
def arcsec_per_kpc_comoving(self, z):
"""
Angular separation in arcsec corresponding to a comoving kpc at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a comoving kpc at
each input redshift.
"""
return radian_in_arcsec / self.comoving_transverse_distance(z).to(u.kpc)
def arcsec_per_kpc_proper(self, z):
"""
Angular separation in arcsec corresponding to a proper kpc at redshift
``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a proper kpc at
each input redshift.
"""
return radian_in_arcsec / self.angular_diameter_distance(z).to(u.kpc)
class FlatFLRWMixin(FlatCosmologyMixin):
"""
Mixin class for flat FLRW cosmologies. Do NOT instantiate directly.
Must precede the base class in the multiple-inheritance so that this
mixin's ``__init__`` proceeds the base class'.
Note that all instances of ``FlatFLRWMixin`` are flat, but not all
flat cosmologies are instances of ``FlatFLRWMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
Ode0 = FLRW.Ode0.clone(derived=True) # same as FLRW, but now a derived param.
def __init_subclass__(cls):
super().__init_subclass__()
if "Ode0" in cls._init_signature.parameters:
raise TypeError("subclasses of `FlatFLRWMixin` cannot have `Ode0` in `__init__`")
def __init__(self, *args, **kw):
super().__init__(*args, **kw) # guaranteed not to have `Ode0`
# Do some twiddling after the fact to get flatness
self._Ok0 = 0.0
self._Ode0 = 1.0 - (self._Om0 + self._Ogamma0 + self._Onu0 + self._Ok0)
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return 1.0
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
Returns float if input scalar. Value of 1.
"""
return 1.0 if isinstance(z, (Number, np.generic)) else np.ones_like(z, subok=False)
def __equiv__(self, other):
"""flat-FLRW equivalence. Use ``.is_equivalent()`` for actual check!
Parameters
----------
other : `~astropy.cosmology.FLRW` subclass instance
The object in which to compare.
Returns
-------
bool or `NotImplemented`
`True` if 'other' is of the same class / non-flat class (e.g.
``FlatLambdaCDM`` and ``LambdaCDM``) has matching parameters
and parameter values. `False` if 'other' is of the same class but
has different parameters. `NotImplemented` otherwise.
"""
# check if case (1): same class & parameters
if isinstance(other, FlatFLRWMixin):
return super().__equiv__(other)
# check cases (3, 4), if other is the non-flat version of this class
# this makes the assumption that any further subclass of a flat cosmo
# keeps the same physics.
comparable_classes = [c for c in self.__class__.mro()[1:]
if (issubclass(c, FLRW) and c is not FLRW)]
if other.__class__ not in comparable_classes:
return NotImplemented
# check if have equivalent parameters
# check all parameters in other match those in 'self' and 'other' has
# no extra parameters (case (2)) except for 'Ode0' and that other
params_eq = (
set(self.__all_parameters__) == set(other.__all_parameters__) # no extra
and all(np.all(getattr(self, k) == getattr(other, k)) # equal
for k in self.__parameters__)
and other.is_flat
)
return params_eq
class LambdaCDM(FLRW):
"""FLRW cosmology with a cosmological constant and curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of the cosmological constant in units of
the critical density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import LambdaCDM
>>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0)
if self._Ok0 == 0:
self._optimize_flat_norad()
else:
self._comoving_distance_z1z2 = self._elliptic_comoving_distance_z1z2
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list)
def _optimize_flat_norad(self):
"""Set optimizations for flat LCDM cosmologies with no radiation."""
# Call out the Om0=0 (de Sitter) and Om0=1 (Einstein-de Sitter)
# The dS case is required because the hypergeometric case
# for Omega_M=0 would lead to an infinity in its argument.
# The EdS case is three times faster than the hypergeometric.
if self._Om0 == 0:
self._comoving_distance_z1z2 = self._dS_comoving_distance_z1z2
self._age = self._dS_age
self._lookback_time = self._dS_lookback_time
elif self._Om0 == 1:
self._comoving_distance_z1z2 = self._EdS_comoving_distance_z1z2
self._age = self._EdS_age
self._lookback_time = self._EdS_lookback_time
else:
self._comoving_distance_z1z2 = self._hypergeometric_comoving_distance_z1z2
self._age = self._flat_age
self._lookback_time = self._flat_lookback_time
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = -1`.
"""
z = aszarr(z)
return -1.0 * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by :math:`I = 1`.
"""
z = aszarr(z)
return np.ones(z.shape) if hasattr(z, "shape") else 1.0
def _elliptic_comoving_distance_z1z2(self, z1, z2):
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero.
For :math:`\Omega_{rad} = 0` the comoving distance can be directly
calculated as an elliptic integral [1]_.
Not valid or appropriate for flat cosmologies (Ok0=0).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
References
----------
.. [1] Kantowski, R., Kao, J., & Thomas, R. (2000). Distance-Redshift
in Inhomogeneous FLRW. arXiv e-prints, astro-ph/0002334.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
# The analytic solution is not valid for any of Om0, Ode0, Ok0 == 0.
# Use the explicit integral solution for these cases.
if self._Om0 == 0 or self._Ode0 == 0 or self._Ok0 == 0:
return self._integral_comoving_distance_z1z2(z1, z2)
b = -(27. / 2) * self._Om0**2 * self._Ode0 / self._Ok0**3
kappa = b / abs(b)
if (b < 0) or (2 < b):
def phi_z(Om0, Ok0, kappa, y1, A, z):
return np.arccos(((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 - A) /
((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 + A))
v_k = pow(kappa * (b - 1) + sqrt(b * (b - 2)), 1. / 3)
y1 = (-1 + kappa * (v_k + 1 / v_k)) / 3
A = sqrt(y1 * (3 * y1 + 2))
g = 1 / sqrt(A)
k2 = (2 * A + kappa * (1 + 3 * y1)) / (4 * A)
phi_z1 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z1)
phi_z2 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z2)
# Get lower-right 0<b<2 solution in Om0, Ode0 plane.
# Fot the upper-left 0<b<2 solution the Big Bang didn't happen.
elif (0 < b) and (b < 2) and self._Om0 > self._Ode0:
def phi_z(Om0, Ok0, y1, y2, z):
return np.arcsin(np.sqrt((y1 - y2) /
((z + 1.0) * Om0 / abs(Ok0) + y1)))
yb = cos(acos(1 - b) / 3)
yc = sqrt(3) * sin(acos(1 - b) / 3)
y1 = (1. / 3) * (-1 + yb + yc)
y2 = (1. / 3) * (-1 - 2 * yb)
y3 = (1. / 3) * (-1 + yb - yc)
g = 2 / sqrt(y1 - y2)
k2 = (y1 - y3) / (y1 - y2)
phi_z1 = phi_z(self._Om0, self._Ok0, y1, y2, z1)
phi_z2 = phi_z(self._Om0, self._Ok0, y1, y2, z2)
else:
return self._integral_comoving_distance_z1z2(z1, z2)
prefactor = self._hubble_distance / sqrt(abs(self._Ok0))
return prefactor * g * (ellipkinc(phi_z1, k2) - ellipkinc(phi_z2, k2))
def _dS_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2`` in a flat, :math:`\Omega_{\Lambda}=1` cosmology
(de Sitter).
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
The de Sitter case has an analytic solution.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
return self._hubble_distance * (z2 - z1)
def _EdS_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2`` in a flat, :math:`\Omega_M=1` cosmology
(Einstein - de Sitter).
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
For :math:`\Omega_M=1`, :math:`\Omega_{rad}=0` the comoving distance
has an analytic solution.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
prefactor = 2 * self._hubble_distance
return prefactor * ((z1 + 1.0)**(-1./2) - (z2 + 1.0)**(-1./2))
def _hypergeometric_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
For :math:`\Omega_{rad} = 0` the comoving distance can be directly
calculated as a hypergeometric function [1]_.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
References
----------
.. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical
expressions and numerical evaluation of the luminosity distance
in a flat cosmology. MNRAS, 468(1), 927-930.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
s = ((1 - self._Om0) / self._Om0) ** (1./3)
# Use np.sqrt here to handle negative s (Om0>1).
prefactor = self._hubble_distance / np.sqrt(s * self._Om0)
return prefactor * (self._T_hypergeometric(s / (z1 + 1.0)) -
self._T_hypergeometric(s / (z2 + 1.0)))
def _T_hypergeometric(self, x):
r"""Compute value using Gauss Hypergeometric function 2F1.
.. math::
T(x) = 2 \sqrt(x) _{2}F_{1}\left(\frac{1}{6}, \frac{1}{2};
\frac{7}{6}; -x^3 \right)
Notes
-----
The :func:`scipy.special.hyp2f1` code already implements the
hypergeometric transformation suggested by Baes et al. [1]_ for use in
actual numerical evaulations.
References
----------
.. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical
expressions and numerical evaluation of the luminosity distance
in a flat cosmology. MNRAS, 468(1), 927-930.
"""
return 2 * np.sqrt(x) * hyp2f1(1./6, 1./2, 7./6, -x**3)
def _dS_age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
The age of a de Sitter Universe is infinite.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
"""
t = (inf if isinstance(z, Number) else np.full_like(z, inf, dtype=float))
return self._hubble_time * t
def _EdS_age(self, z):
r"""Age of the universe in Gyr at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
References
----------
.. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for
standard cosmology. PRD, 62(10), 103507.
"""
return (2./3) * self._hubble_time * (aszarr(z) + 1.0) ** (-1.5)
def _flat_age(self, z):
r"""Age of the universe in Gyr at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
References
----------
.. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for
standard cosmology. PRD, 62(10), 103507.
"""
# Use np.sqrt, np.arcsinh instead of math.sqrt, math.asinh
# to handle properly the complex numbers for 1 - Om0 < 0
prefactor = (2./3) * self._hubble_time / np.emath.sqrt(1 - self._Om0)
arg = np.arcsinh(np.emath.sqrt((1 / self._Om0 - 1 + 0j) / (aszarr(z) + 1.0)**3))
return (prefactor * arg).real
def _EdS_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral.
The lookback time is here calculated based on the ``age(0) - age(z)``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._EdS_age(0) - self._EdS_age(z)
def _dS_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated.
.. math::
a = exp(H * t) \ \text{where t=0 at z=0}
t = (1/H) (ln 1 - ln a) = (1/H) (0 - ln (1/(1+z))) = (1/H) ln(1+z)
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._hubble_time * np.log(aszarr(z) + 1.0)
def _flat_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated.
The lookback time is here calculated based on the ``age(0) - age(z)``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._flat_age(0) - self._flat_age(z)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
# We override this because it takes a particularly simple
# form for a cosmological constant
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0)**(-0.5)
class FlatLambdaCDM(FlatFLRWMixin, LambdaCDM):
"""FLRW cosmology with a cosmological constant and no curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import FlatLambdaCDM
>>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=0.0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0)
# Repeat the optimization reassignments here because the init
# of the LambaCDM above didn't actually create a flat cosmology.
# That was done through the explicit tweak setting self._Ok0.
self._optimize_flat_norad()
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
# We override this because it takes a particularly simple
# form for a cosmological constant
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 3 * (Or * zp1 + self._Om0) + self._Ode0)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 3 * (Or * zp1 + self._Om0) + self._Ode0)**(-0.5)
class wCDM(FLRW):
"""
FLRW cosmology with a constant dark energy equation of state and curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import wCDM
>>> cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
w0 = Parameter(doc="Dark energy equation of state.", fvalidate="float")
def __init__(self, H0, Om0, Ode0, w0=-1.0, Tcmb0=0.0*u.K, Neff=3.04,
m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
self.w0 = w0
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = w_0`.
"""
z = aszarr(z)
return self._w0 * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
:math:`I = \left(1 + z\right)^{3\left(1 + w_0\right)}`
"""
return (aszarr(z) + 1.0) ** (3.0 * (1. + self._w0))
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * zp1 ** (3. * (1. + self._w0)))
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * zp1 ** (3. * (1. + self._w0)))**(-0.5)
class FlatwCDM(FlatFLRWMixin, wCDM):
"""
FLRW cosmology with a constant dark energy equation of state and no spatial
curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float, optional
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import FlatwCDM
>>> cosmo = FlatwCDM(H0=70, Om0=0.3, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, w0=-1.0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=0.0, w0=w0, Tcmb0=Tcmb0,
Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._w0)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0,
self._w0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 3 * (Or * zp1 + self._Om0) +
self._Ode0 * zp1 ** (3. * (1 + self._w0)))
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 3 * (Or * zp1 + self._Om0) +
self._Ode0 * zp1 ** (3. * (1. + self._w0)))**(-0.5)
class w0waCDM(FLRW):
r"""FLRW cosmology with a CPL dark energy equation of state and curvature.
The equation for the dark energy equation of state uses the
CPL form as described in Chevallier & Polarski [1]_ and Linder [2]_:
:math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import w0waCDM
>>> cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
w0 = Parameter(doc="Dark energy equation of state at z=0.", fvalidate="float")
wa = Parameter(doc="Negative derivative of dark energy equation of state w.r.t. a.",
fvalidate="float")
def __init__(self, H0, Om0, Ode0, w0=-1.0, wa=0.0, Tcmb0=0.0*u.K, Neff=3.04,
m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
self.w0 = w0
self.wa = wa
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wa)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is
:math:`w(z) = w_0 + w_a (1 - a) = w_0 + w_a \frac{z}{1+z}`.
"""
z = aszarr(z)
return self._w0 + self._wa * z / (z + 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
I = \left(1 + z\right)^{3 \left(1 + w_0 + w_a\right)}
\exp \left(-3 w_a \frac{z}{1+z}\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
return zp1 ** (3 * (1 + self._w0 + self._wa)) * np.exp(-3 * self._wa * z / zp1)
class Flatw0waCDM(FlatFLRWMixin, w0waCDM):
"""FLRW cosmology with a CPL dark energy equation of state and no
curvature.
The equation for the dark energy equation of state uses the CPL form as
described in Chevallier & Polarski [1]_ and Linder [2]_:
:math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float, optional
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import Flatw0waCDM
>>> cosmo = Flatw0waCDM(H0=70, Om0=0.3, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
def __init__(self, H0, Om0, w0=-1.0, wa=0.0, Tcmb0=0.0*u.K, Neff=3.04,
m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=0.0, w0=w0, wa=wa, Tcmb0=Tcmb0,
Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._w0, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0,
self._w0, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wa)
class wpwaCDM(FLRW):
r"""
FLRW cosmology with a CPL dark energy equation of state, a pivot redshift,
and curvature.
The equation for the dark energy equation of state uses the CPL form as
described in Chevallier & Polarski [1]_ and Linder [2]_, but modified to
have a pivot redshift as in the findings of the Dark Energy Task Force
[3]_: :math:`w(a) = w_p + w_a (a_p - a) = w_p + w_a( 1/(1+zp) - 1/(1+z) )`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
wp : float, optional
Dark energy equation of state at the pivot redshift zp. This is
pressure/density for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has wp=-1.0 and wa=0.0.
zp : float or quantity-like ['redshift'], optional
Pivot redshift -- the redshift where w(z) = wp
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import wpwaCDM
>>> cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.7, wp=-0.9, wa=0.2, zp=0.4)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
.. [3] Albrecht, A., Amendola, L., Bernstein, G., Clowe, D., Eisenstein,
D., Guzzo, L., Hirata, C., Huterer, D., Kirshner, R., Kolb, E., &
Nichol, R. (2009). Findings of the Joint Dark Energy Mission Figure
of Merit Science Working Group. arXiv e-prints, arXiv:0901.0721.
"""
wp = Parameter(doc="Dark energy equation of state at the pivot redshift zp.", fvalidate="float")
wa = Parameter(doc="Negative derivative of dark energy equation of state w.r.t. a.",
fvalidate="float")
zp = Parameter(doc="The pivot redshift, where w(z) = wp.", unit=cu.redshift)
def __init__(self, H0, Om0, Ode0, wp=-1.0, wa=0.0, zp=0.0 * cu.redshift,
Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV, Ob0=None, *,
name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
self.wp = wp
self.wa = wa
self.zp = zp
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
apiv = 1.0 / (1.0 + self._zp.value)
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._wp, apiv, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._wp, apiv, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.wpwacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._wp,
apiv, self._wa)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = w_p + w_a (a_p - a)` where
:math:`a = 1/1+z` and :math:`a_p = 1 / 1 + z_p`.
"""
apiv = 1.0 / (1.0 + self._zp.value)
return self._wp + self._wa * (apiv - 1.0 / (aszarr(z) + 1.0))
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
a_p = \frac{1}{1 + z_p}
I = \left(1 + z\right)^{3 \left(1 + w_p + a_p w_a\right)}
\exp \left(-3 w_a \frac{z}{1+z}\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
apiv = 1. / (1. + self._zp.value)
return zp1 ** (3. * (1. + self._wp + apiv * self._wa)) * \
np.exp(-3. * self._wa * z / zp1)
class w0wzCDM(FLRW):
"""
FLRW cosmology with a variable dark energy equation of state and curvature.
The equation for the dark energy equation of state uses the simple form:
:math:`w(z) = w_0 + w_z z`.
This form is not recommended for z > 1.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0. This is pressure/density for
dark energy in units where c=1.
wz : float, optional
Derivative of the dark energy equation of state with respect to z.
A cosmological constant has w0=-1.0 and wz=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import w0wzCDM
>>> cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wz=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
w0 = Parameter(doc="Dark energy equation of state at z=0.", fvalidate="float")
wz = Parameter(doc="Derivative of the dark energy equation of state w.r.t. z.", fvalidate="float")
def __init__(self, H0, Om0, Ode0, w0=-1.0, wz=0.0, Tcmb0=0.0*u.K, Neff=3.04,
m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
self.w0 = w0
self.wz = wz
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0, self._wz)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0, self._wz)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wz)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is given by :math:`w(z) = w_0 + w_z z`.
"""
return self._w0 + self._wz * aszarr(z)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
I = \left(1 + z\right)^{3 \left(1 + w_0 - w_z\right)}
\exp \left(-3 w_z z\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
return zp1 ** (3. * (1. + self._w0 - self._wz)) * np.exp(-3. * self._wz * z)
| pllim/astropy | astropy/cosmology/flrw.py | Python | bsd-3-clause | 113,801 | [
"Galaxy"
] | a563c44ea373606c2aa93c54655c2c69444c59dea5b4678d57d02e363f4b431e |
# Author: Samuel Genheden samuel.genheden@gmail.com
"""
Program to build a CG or AA/CG dual-resolution
system from an AA system. Converts AA residues
automatically to CG using dictionary of conversions.
Writes out LAMMPS datafiles and include file
for the force field, as well as a PDB-file.
"""
import sys
import math
import random
import argparse
import os
import copy
import numpy as np
from scipy.spatial.distance import cdist
from sgenlib import lammps
from sgenlib import pdb
def _ntypes(array):
"""
Find the number of types from a list (could be masses or connectivities)
"""
n = 0
for m in array:
n = max(n, m.idx)
return n
def _generate_aa_residue(residue, molidx, resdata, sysdata):
"""
Generates an aa residue by copying most of the structure
from a datafile template, but the coordinates from a PDB residue
"""
n = len(sysdata.atoms)
for i, (ratom, datom) in enumerate(zip(residue.atoms, resdata.atoms)):
atom = copy.deepcopy(datom)
atom.idx = atom.idx + n
atom.set_xyz(ratom.xyz)
atom.molecule = molidx
sysdata.atoms.append(atom)
for bond in resdata.bonds:
b = lammps.Connectivity(record="%d %d %d %d" % (len(sysdata.bonds) + 1, bond[0], bond[1] + n, bond[2] + n))
sysdata.bonds.append(b)
for angle in resdata.angles:
a = lammps.Connectivity(record="%d %d %d %d %d" % (len(sysdata.angles) + 1, angle[0], angle[1] + n, angle[2] + n, angle[3] + n))
sysdata.angles.append(a)
for dihedral in resdata.dihedrals:
a = lammps.Connectivity(record="%d %d %d %d %d %d" % (len(sysdata.dihedrals) + 1, dihedral[0], dihedral[1] + n, dihedral[2] + n, dihedral[3] + n, dihedral[4] + n))
sysdata.dihedrals.append(a)
def _generate_aa_wat(residue, molidx, sysdata, include):
n = len(sysdata.atoms)
charges = [ -0.834, 0.417, 0.417]
mass = [9.9514, 3.024, 3.024]
nt = len(include.masses)
types = [nt-1,nt,nt]
for i, ratom in enumerate(residue.atoms):
atom = lammps.Atom()
atom.idx = n + i + 1
atom.set_xyz(ratom.xyz)
atom.molecule = molidx
atom.q = charges[i]
atom.atype = types[i]
atom.comment = "# water"
atom.diameter = 0.0
atom.density = mass[i]
atom.set_mu([0.0, 0.0, 0.0])
sysdata.atoms.append(atom)
nb = len(include.bondparams)
b = lammps.Connectivity(record="%d %d %d %d" % (len(sysdata.bonds) + 1, nb, n+1, n+2))
b.comment = "# O-H bond"
sysdata.bonds.append(b)
b = lammps.Connectivity(record="%d %d %d %d" % (len(sysdata.bonds) + 1, nb, n+1, n+3))
b.comment = "# O-H bond"
sysdata.bonds.append(b)
na = len(include.angleparams)
a = lammps.Connectivity(record="%d %d %d %d %d" % (len(sysdata.angles) + 1, na, n+2, n+1, n+3))
a.comment = "# H-O-H angle"
sysdata.angles.append(a)
def _add_water_param(datafile):
n = _ntypes(datafile.atomtypes)
# Oxygen type
datafile.atomtypes.append(lammps.AtomType())
datafile.atomtypes[-1].idx = n + 1
datafile.atomtypes[-1].mass = 9.9514
datafile.atomtypes[-1].epsilon = 0.1521
datafile.atomtypes[-1].sigma = 3.1507
# Hydrogen type
datafile.atomtypes.append(lammps.AtomType())
datafile.atomtypes[-1].idx = n + 2
datafile.atomtypes[-1].mass = 3.024
datafile.atomtypes[-1].epsilon = 0.0
datafile.atomtypes[-1].sigma = 0.0
# O-H bond type
n = _ntypes(datafile.bondtypes)
datafile.bondtypes.append(lammps.ConnectivityParam())
datafile.bondtypes[-1].idx = n + 1
datafile.bondtypes[-1].params = ["450.0", "0.9572"]
# H-O-H angle type
n = _ntypes(datafile.angletypes)
datafile.angletypes.append(lammps.ConnectivityParam())
datafile.angletypes[-1].idx = n + 1
datafile.angletypes[-1].params = ["55.0", "104.52"]
if __name__ == '__main__':
# Command-line input
parser = argparse.ArgumentParser(description="Converting a AA to CG or AA/CG")
parser.add_argument('file', help="the PDB or GRO file")
parser.add_argument('-i', '--include', help="the LAMMPS include file")
parser.add_argument('-o', '--out', help="the output prefix", default="converted")
parser.add_argument('-b', '--box', type=float, nargs="+", help="the box dimensions", default=[0.0, 0.0, 0.0])
parser.add_argument('-a', '--atomistic', nargs="+", help="data file(s) for atomistic solutes", default=[])
parser.add_argument('-c', '--converter', help="the dictionary with conversion rules")
parser.add_argument('-p', '--pairfunc', help="the pair function for the AA", default="lj/charmm/coul/long")
parser.add_argument('-w', '--watrad', type=float, help="the water radius to keep atomistic")
args = parser.parse_args()
# Load a converter
converter = lammps.Aa2Cg()
if args.converter is None:
converter.read(lammps.get_filename("aa2cg.dat")) # The default
else:
converter.read(args.converter)
# Create a Datafile and PDBFile
pdbfile = pdb.PDBFile(args.file) # Input PDB
takeres = [True for res in pdbfile.residues]
data = lammps.Datafile()
pdbout = pdb.PDBFile() # Output PDB
# Load the force field file
include = lammps.Includefile(args.include)
if args.atomistic:
# At the moment, multiple AA solutes are not supported with atomistic water radius
if len(args.atomistic) > 1 :
args.watrad = None
natomtypes = _ntypes(include.masses)
contypes = [_ntypes(include.bondparams), _ntypes(include.angleparams), _ntypes(include.dihedralparams)]
# Set the functional form of the CG particles if will retain some atomistic molecules
for pair in include.pair_coeff:
pair.func = "lj/sf/dipole/sf"
pair.hybrid = 2
# Load datafiles for given solutes, will assume these are atomistic
aa_datafiles = {}
aa_range = None
for sol in args.atomistic:
res, filename = sol.split("=")
res = res.lower()
if res[0] == ":":
aa_range = res
aa_datafiles[res] = lammps.Datafile(filename)
if args.watrad is not None:
_add_water_param(aa_datafiles[res])
# Extend the force field parameters
# by extending the inclusion file, we will automatically update the parameter index
ELBA_FUNC = "lj/sf/dipole/sf"
include.extend_from_data(aa_datafiles[res], lj_hybrid=-1, lj_func=args.pairfunc,
lj_hybrid_mix={ELBA_FUNC: 1, args.pairfunc: 2}, lj_func_mix={ELBA_FUNC: ELBA_FUNC, args.pairfunc: args.pairfunc}, ang_func="harmonic")
# Update the atom and conectivity parameters
for atom in aa_datafiles[res].atoms:
atom.atype = atom.atype + natomtypes
atom.diameter = 0.0
atom.density = aa_datafiles[res].atomtypes[atom.atype - natomtypes - 1].mass
atom.set_mu([0.0, 0.0, 0.0])
conlist = [aa_datafiles[res].bonds, aa_datafiles[res].angles, aa_datafiles[res].dihedrals]
for cons, ntypes in zip(conlist, contypes):
for con in cons:
con.param = con.param + ntypes
natomtypes = _ntypes(include.masses)
# Add a range of all-atom residues to the data file
moli = 0
if aa_range is not None:
first, last = map(lambda x: int(x) - 1, aa_range[1:].split("-"))
resall = pdb.Residue()
allres = []
for i in range(first, last + 1):
for atom in pdbfile.residues[i].atoms:
resall.append(atom)
allres.append(pdbfile.residues[i])
takeres[i] = False
_generate_aa_residue(resall, 1, aa_datafiles[aa_range], data)
moli = 1
if args.watrad is not None:
ninside = 0
com = [np.asarray([res.collect("centerofmass") for res in allres]).mean(axis=0)]
watres = []
mindist = []
for i,residue in enumerate(pdbfile.residues) :
if not residue.resname in ["HOH","WAT","SOL"] : continue
mindist.append(np.sum((com-residue.atoms[0].xyz)**2))
#xyz1 = pdbfile.xyz[residue.atoms[0].idx:residue.atoms[-1].idx+1,:]
#print xyz1[0],
#mindist.append(cdist(xyz1,com,"sqeuclidean").min())
watres.append((i,residue))
mindist = np.asarray(mindist)
for ri in np.argsort(mindist)[:args.watrad]:
i,residue = watres[ri]
_generate_aa_wat(residue, 2, data, include)
allres.append(residue)
takeres[i] = False
ninside += 1
moli = 2
print "Kept %d water molecules as all-atom"%ninside
pdbout.extend_residues(allres, dochains=False)
# Convert residues
all_coords = []
nwat = 0
for i, (res, takethis) in enumerate(zip(pdbfile.residues, takeres)):
if not takethis:
continue
moli += 1
res2 = res.resname.strip().lower()
found = False
# If we have an all-atom datafile as a template, keep it as all-atom
if res2 in aa_datafiles:
_generate_aa_residue(res, moli - nwat, aa_datafiles[res2], data)
coord = res.collect("xyz")
pdb.make_pdbres(coord, [atom.name for atom in res.atoms], res2, pdbout)
found = True
# Otherwise convert it to CG
else:
for residue in converter.residues:
if residue.name == res2:
coord = residue.generate_cg(res, moli, data)
all_coords.extend(coord)
pdb.make_pdbres(coord, residue.cg_names, res2, pdbout)
found = True
break
# If we could not find a conversion, we will convert the residue to a water bead
if not found:
for residue in converter.residues:
if residue.name == "wat":
nwat = nwat + 1
coord = residue.generate_cg(res, 0, data)
all_coords.extend(coord)
pdb.make_pdbres(coord, residue.cg_names, "wat", pdbout)
all_coords = np.array(all_coords)
print "Minimum of coordinates = %.3f %.3f %.3f" % tuple(all_coords.min(axis=0))
print "Maximum of coordinates = %.3f %.3f %.3f" % tuple(all_coords.max(axis=0))
print "Average of coordinates = %.3f %.3f %.3f" % tuple(all_coords.mean(axis=0))
# Settings the correct number of atom and connectivity types
data.atomtypes = [None] * len(include.masses)
data.bondtypes = [None] * len(include.bondparams)
data.angletypes = [None] * len(include.angleparams)
data.dihedraltypes = [None] * len(include.dihedralparams)
# Setting the box of the datafile
if len(args.box) == 6 :
data.box = args.box
else :
if all_coords.mean(axis=0).sum() > 10: # Checking if center is at origin or not
data.box = [0.0, 0.0, 0.0, args.box[0], args.box[1], args.box[2]]
else:
data.box = [-args.box[0] / 2.0, -args.box[1] / 2.0, -args.box[2] / 2.0, args.box[0] / 2.0, args.box[1] / 2.0, args.box[2] / 2.0]
# Setting correct type for all atoms
if args.atomistic:
for atom in data.atoms:
atom.kind = "cg/aa"
# Setting box and adding connectivity to PDB-file
pdbout.box = args.box
def add_con(pdbfile, f):
for bnd in data.bonds:
f.write("CONECT%5d%5d\n" % (bnd.atoms[0], bnd.atoms[1]))
# Write out datafile, pdbfile and force field
print "Saving LAMMPS data file to %s" % ("data." + args.out)
data.write("data." + args.out)
print "Saving PDB file to %s" % (args.out + ".pdb")
pdbout.write(args.out + ".pdb", add_extra=add_con)
if args.atomistic:
print "Saving LAMMPS inclusion file to %s" % ("forcefield." + args.out)
include.write("forcefield." + args.out)
| SGenheden/Scripts | Lammps/aa2cg.py | Python | mit | 11,978 | [
"CHARMM",
"LAMMPS"
] | 1db94d924b93308acb8bc7934f9aacbeac88297e8e771a3abdb335e9d05bc5f3 |
import numpy as np
from sklearn.gaussian_process.gpr import GaussianProcessRegressor as GPR
def fit(kernel, sample_indices, X, y, n_restarts_optimizer, normalize_y):
"""Fits a Gaussian Process Regression model on a subset of X and y using
the provided covariance kernel and subset indices. This is used as a single
worker payload in the parallel fitting process of the rBCM.
TODO: take the sample_indices argument out of this function and keep it
in the logic of the rBCM class alone. Just pass the X and y we'll
actually use. For now keep it to avoid too many changes during the
refactor, however.
Args:
kernel : sklearn kernel object
The kernel specifying the covariance function of the Guassian
Process.
sample_indices : list of integers
The indices of the subset of X and y to fit
X : np.ndarray
The locations of the points.
Must match y in length.
y : np.ndarray
The values of the points at the X locations.
Must match X in length.
n_restarts_optimizer : non-negative integer
The number of restarts to permit in the GPR. Look to scikit-learn's
GPR implementation for more detail as it is passed through.
normalize_y : boolean
Whether to normalize the scale of y to improve fitting quality.
See scikit-learn's GPR implementation for more detail.
"""
gpr = GPR(kernel, n_restarts_optimizer=n_restarts_optimizer,
copy_X_train=False, normalize_y=normalize_y)
gpr.fit(X[sample_indices, :], y[sample_indices, :])
return gpr
def predict(expert, X, y_num_columns):
"""Predicts using a Gaussian Process Regression model called expert to
points given by X. This is used as a single worker payload in the parallel
prediction process of the rBCM.
Args:
X : np.ndarray
The locations of the points to predict at.
Must match y in length.
y_num_columns : positive integer
The number of columns in the y data that was used during the
fitting of the expert model.
"""
predictions = np.zeros((X.shape[0], y_num_columns))
sigma = np.zeros((X.shape[0], 1))
predictions, sigma = expert.predict(X, return_std=True)
return predictions, sigma
| lucaskolstad/rBCM | rBCM/gpr.py | Python | bsd-3-clause | 2,397 | [
"Gaussian"
] | f29c4bc606b54314ab1c81f695f3ed935b5920b6fd9d1085e0e71e9df03406f2 |
########################################################################
# $Id$
########################################################################
""" JobStateUpdateHandler is the implementation of the Job State updating
service in the DISET framework
The following methods are available in the Service interface
setJobStatus()
"""
__RCSID__ = "$Id$"
from types import StringType, IntType, LongType, ListType, DictType
# from types import *
import time
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
# This is a global instance of the JobDB class
jobDB = False
logDB = False
JOB_FINAL_STATES = ['Done', 'Completed', 'Failed']
def initializeJobStateUpdateHandler( serviceInfo ):
global jobDB
global logDB
jobDB = JobDB()
logDB = JobLoggingDB()
return S_OK()
class JobStateUpdateHandler( RequestHandler ):
###########################################################################
types_updateJobFromStager = [[StringType, IntType, LongType], StringType]
def export_updateJobFromStager( self, jobID, status ):
""" Simple call back method to be used by the stager. """
if status == 'Done':
jobStatus = 'Checking'
minorStatus = 'JobScheduling'
elif status == 'Failed':
jobStatus = 'Failed'
minorStatus = 'Staging input files failed'
else:
return S_ERROR( "updateJobFromStager: %s status not known." % status )
infoStr = None
trials = 10
for i in range( trials ):
result = jobDB.getJobAttributes( jobID, ['Status'] )
if not result['OK']:
return result
if not result['Value']:
# if there is no matching Job it returns an empty dictionary
return S_OK( 'No Matching Job' )
status = result['Value']['Status']
if status == 'Staging':
if i:
infoStr = "Found job in Staging after %d seconds" % i
break
time.sleep( 1 )
if status != 'Staging':
return S_OK( 'Job is not in Staging after %d seconds' % trials )
result = self.__setJobStatus( int( jobID ), jobStatus, minorStatus, 'StagerSystem', None )
if not result['OK']:
if result['Message'].find( 'does not exist' ) != -1:
return S_OK()
if infoStr:
return S_OK( infoStr )
return result
###########################################################################
types_setJobStatus = [[StringType, IntType, LongType], StringType, StringType, StringType]
def export_setJobStatus( self, jobID, status, minorStatus, source = 'Unknown', datetime = None ):
""" Set the major and minor status for job specified by its JobId.
Set optionally the status date and source component which sends the
status information.
"""
return self.__setJobStatus( int( jobID ), status, minorStatus, source, datetime )
###########################################################################
types_setJobsStatus = [ListType, StringType, StringType, StringType]
def export_setJobsStatus( self, jobIDs, status, minorStatus, source = 'Unknown', datetime = None ):
""" Set the major and minor status for job specified by its JobId.
Set optionally the status date and source component which sends the
status information.
"""
for jobID in jobIDs:
self.__setJobStatus( int( jobID ), status, minorStatus, source, datetime )
return S_OK()
def __setJobStatus( self, jobID, status, minorStatus, source, datetime ):
""" update the job status. """
result = jobDB.setJobStatus( jobID, status, minorStatus )
if not result['OK']:
return result
if status in JOB_FINAL_STATES:
result = jobDB.setEndExecTime( jobID )
if status == 'Running' and minorStatus == 'Application':
result = jobDB.setStartExecTime( jobID )
result = jobDB.getJobAttributes( jobID, ['Status', 'MinorStatus'] )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR( 'Job %d does not exist' % int( jobID ) )
status = result['Value']['Status']
minorStatus = result['Value']['MinorStatus']
if datetime:
result = logDB.addLoggingRecord( jobID, status, minorStatus, datetime, source )
else:
result = logDB.addLoggingRecord( jobID, status, minorStatus, source = source )
return result
###########################################################################
types_setJobStatusBulk = [[StringType, IntType, LongType], DictType]
def export_setJobStatusBulk( self, jobID, statusDict ):
""" Set various status fields for job specified by its JobId.
Set only the last status in the JobDB, updating all the status
logging information in the JobLoggingDB. The statusDict has datetime
as a key and status information dictionary as values
"""
status = ""
minor = ""
application = ""
appCounter = ""
endDate = ''
startDate = ''
startFlag = ''
jobID = int( jobID )
result = jobDB.getJobAttributes( jobID, ['Status'] )
if not result['OK']:
return result
if not result['Value']:
# if there is no matching Job it returns an empty dictionary
return S_ERROR( 'No Matching Job' )
new_status = result['Value']['Status']
if new_status == "Stalled":
status = 'Running'
# Get the latest WN time stamps of status updates
result = logDB.getWMSTimeStamps( int( jobID ) )
if not result['OK']:
return result
lastTime = max( [float( t ) for s, t in result['Value'].items() if s != 'LastTime'] )
from DIRAC import Time
lastTime = Time.toString( Time.fromEpoch( lastTime ) )
# Get the last status values
dates = sorted( statusDict )
# We should only update the status if its time stamp is more recent than the last update
for date in [date for date in dates if date >= lastTime]:
sDict = statusDict[date]
if sDict['Status']:
status = sDict['Status']
if status in JOB_FINAL_STATES:
endDate = date
if status == "Running":
startFlag = 'Running'
if sDict['MinorStatus']:
minor = sDict['MinorStatus']
if minor == "Application" and startFlag == 'Running':
startDate = date
if sDict['ApplicationStatus']:
application = sDict['ApplicationStatus']
counter = sDict.get( 'ApplicationCounter' )
if counter:
appCounter = counter
attrNames = []
attrValues = []
if status:
attrNames.append( 'Status' )
attrValues.append( status )
if minor:
attrNames.append( 'MinorStatus' )
attrValues.append( minor )
if application:
attrNames.append( 'ApplicationStatus' )
attrValues.append( application )
if appCounter:
attrNames.append( 'ApplicationCounter' )
attrValues.append( appCounter )
result = jobDB.setJobAttributes( jobID, attrNames, attrValues, update = True )
if not result['OK']:
return result
if endDate:
result = jobDB.setEndExecTime( jobID, endDate )
if startDate:
result = jobDB.setStartExecTime( jobID, startDate )
# Update the JobLoggingDB records
for date in dates:
sDict = statusDict[date]
status = sDict['Status']
if not status:
status = 'idem'
minor = sDict['MinorStatus']
if not minor:
minor = 'idem'
application = sDict['ApplicationStatus']
if not application:
application = 'idem'
else:
status = "Running"
minor = "Application"
source = sDict['Source']
result = logDB.addLoggingRecord( jobID, status, minor, application, date, source )
if not result['OK']:
return result
return S_OK()
###########################################################################
types_setJobSite = [[StringType, IntType, LongType], StringType]
def export_setJobSite( self, jobID, site ):
"""Allows the site attribute to be set for a job specified by its jobID.
"""
result = jobDB.setJobAttribute( int( jobID ), 'Site', site )
return result
###########################################################################
types_setJobFlag = [[StringType, IntType, LongType], StringType]
def export_setJobFlag( self, jobID, flag ):
""" Set job flag for job with jobID
"""
result = jobDB.setJobAttribute( int( jobID ), flag, 'True' )
return result
###########################################################################
types_unsetJobFlag = [[StringType, IntType, LongType], StringType]
def export_unsetJobFlag( self, jobID, flag ):
""" Unset job flag for job with jobID
"""
result = jobDB.setJobAttribute( int( jobID ), flag, 'False' )
return result
###########################################################################
types_setJobApplicationStatus = [[StringType, IntType, LongType], StringType, StringType]
def export_setJobApplicationStatus( self, jobID, appStatus, source = 'Unknown' ):
""" Set the application status for job specified by its JobId.
"""
result = jobDB.getJobAttributes( int( jobID ), ['Status', 'MinorStatus'] )
if not result['OK']:
return result
if not result['Value']:
# if there is no matching Job it returns an empty dictionary
return S_ERROR( 'No Matching Job' )
status = result['Value']['Status']
if status == "Stalled" or status == "Matched":
new_status = 'Running'
else:
new_status = status
minorStatus = result['Value']['MinorStatus']
result = jobDB.setJobStatus( int( jobID ), new_status, application = appStatus )
if not result['OK']:
return result
result = logDB.addLoggingRecord( int( jobID ), new_status, minorStatus, appStatus, source = source )
return result
###########################################################################
types_setJobParameter = [[StringType, IntType, LongType], StringType, StringType]
def export_setJobParameter( self, jobID, name, value ):
""" Set arbitrary parameter specified by name/value pair
for job specified by its JobId
"""
result = jobDB.setJobParameter( int( jobID ), name, value )
return result
###########################################################################
types_setJobsParameter = [DictType]
def export_setJobsParameter( self, jobsParameterDict ):
""" Set arbitrary parameter specified by name/value pair
for job specified by its JobId
"""
for jobID in jobsParameterDict:
jobDB.setJobParameter( jobID, str( jobsParameterDict[jobID][0] ), str( jobsParameterDict[jobID][1] ) )
return S_OK()
###########################################################################
types_setJobParameters = [[StringType, IntType, LongType], ListType]
def export_setJobParameters( self, jobID, parameters ):
""" Set arbitrary parameters specified by a list of name/value pairs
for job specified by its JobId
"""
result = jobDB.setJobParameters( int( jobID ), parameters )
if not result['OK']:
return S_ERROR( 'Failed to store some of the parameters' )
return S_OK( 'All parameters stored for job' )
###########################################################################
types_sendHeartBeat = [[StringType, IntType, LongType], DictType, DictType]
def export_sendHeartBeat( self, jobID, dynamicData, staticData ):
""" Send a heart beat sign of life for a job jobID
"""
result = jobDB.setHeartBeatData( int( jobID ), staticData, dynamicData )
if not result['OK']:
gLogger.warn( 'Failed to set the heart beat data for job %d ' % int( jobID ) )
# Restore the Running status if necessary
# result = jobDB.getJobAttributes(jobID,['Status'])
# if not result['OK']:
# return result
# if not result['Value']:
# return S_ERROR('Job %d not found' % jobID)
# status = result['Value']['Status']
# if status == "Stalled" or status == "Matched":
# result = jobDB.setJobAttribute(jobID,'Status','Running',True)
# if not result['OK']:
# gLogger.warn('Failed to restore the job status to Running')
jobMessageDict = {}
result = jobDB.getJobCommand( int( jobID ) )
if result['OK']:
jobMessageDict = result['Value']
if jobMessageDict:
for key, _value in jobMessageDict.items():
result = jobDB.setJobCommandStatus( int( jobID ), key, 'Sent' )
return S_OK( jobMessageDict )
| rajanandakumar/DIRAC | WorkloadManagementSystem/Service/JobStateUpdateHandler.py | Python | gpl-3.0 | 12,605 | [
"DIRAC"
] | 2f459ad54d9ceb979dc6e8c45f87cab84997e3dafcb03befeb4e71136e4f0f15 |
#!/usr/bin/env python
import cdsapi
import calendar
import os
#Time of dataset
year_start = var_year_start
year_end = var_year_end
month_start = var_month_start
month_end = var_month_end
# Area
north = var_north
south = var_south
west = var_west
east = var_east
# Output folder, i.e. replace by your project name
out_path = 'var_out'
count = 1
for year in range(year_start, year_end+1):
if len(range(year_start, year_end+1)) == 1:
mt_start = month_start
mt_end = month_end
else:
if year == year_start:
mt_start = month_start
mt_end = 12
elif year == year_end:
mt_start = 1
mt_end = month_end
else:
mt_start = 1
mt_end = 12
for month in range(mt_start, mt_end+1):
lastday1=calendar.monthrange(year,month)
lastday=lastday1[1]
dayList = range(lastday+1)
dayList = dayList[1:]
dayList = [str(i) for i in dayList]
bdate="%s%02d01"%(year,month)
edate="%s%02d%s"%(year,month,lastday)
print("######### ERA-5 #########")
print('Accessing wind data from ', bdate,' to ',edate,' (YYYYMMDD)')
print("################################")
c = cdsapi.Client()
c.retrieve(
'reanalysis-era5-pressure-levels',
{
'variable' : ['geopotential', 'u_component_of_wind', 'v_component_of_wind'],
'pressure_level': ['1', '2', '3','5', '7', '10','20', '30', '50','70', '100', '125','150', '175', '200','225', '250', '300','350', '400', '450','500', '550', '600','650', '700', '750','775', '800', '825','850', '875', '900','925', '950', '975','1000'],
'product_type' : 'reanalysis',
'year' : '%s'%(year),
'month' : '%s'%(month),
'day' : dayList,
'area' : [north, west, south, east], # North, West, South, East. Default: global
'grid' : [0.25, 0.25], # Latitude/longitude grid: east-west (longitude) and north-south resolution (latitude). Default: 0.25 x 0.25
'time' : ['00:00', '06:00', '12:00','18:00'],
'format' : 'netcdf' # Supported format: grib and netcdf. Default: grib
},
"%s%05d_%s_%04d.nc"%(out_path, count, calendar.month_abbr[month],year))
count = count + 1 | e5k/TephraProb | CODE/cdsapi-0.2.5/download_ERA5_tmp.py | Python | gpl-3.0 | 2,496 | [
"NetCDF"
] | b73803a0c5c73ca39c2f432d43301fee874c75125b88336aac75fe0f8f6b5db8 |
from textwrap import dedent
import psycopg2
import pytest
from unittest.mock import patch, MagicMock
from pgspecial.main import PGSpecial, NO_QUERY
from utils import run, dbtest, requires_json, requires_jsonb
from pgcli.main import PGCli
from pgcli.packages.parseutils.meta import FunctionMetadata
def function_meta_data(
func_name,
schema_name="public",
arg_names=None,
arg_types=None,
arg_modes=None,
return_type=None,
is_aggregate=False,
is_window=False,
is_set_returning=False,
is_extension=False,
arg_defaults=None,
):
return FunctionMetadata(
schema_name,
func_name,
arg_names,
arg_types,
arg_modes,
return_type,
is_aggregate,
is_window,
is_set_returning,
is_extension,
arg_defaults,
)
@dbtest
def test_conn(executor):
run(executor, """create table test(a text)""")
run(executor, """insert into test values('abc')""")
assert run(executor, """select * from test""", join=True) == dedent(
"""\
+-----+
| a |
|-----|
| abc |
+-----+
SELECT 1"""
)
@dbtest
def test_copy(executor):
executor_copy = executor.copy()
run(executor_copy, """create table test(a text)""")
run(executor_copy, """insert into test values('abc')""")
assert run(executor_copy, """select * from test""", join=True) == dedent(
"""\
+-----+
| a |
|-----|
| abc |
+-----+
SELECT 1"""
)
@dbtest
def test_bools_are_treated_as_strings(executor):
run(executor, """create table test(a boolean)""")
run(executor, """insert into test values(True)""")
assert run(executor, """select * from test""", join=True) == dedent(
"""\
+------+
| a |
|------|
| True |
+------+
SELECT 1"""
)
@dbtest
def test_expanded_slash_G(executor, pgspecial):
# Tests whether we reset the expanded output after a \G.
run(executor, """create table test(a boolean)""")
run(executor, """insert into test values(True)""")
results = run(executor, r"""select * from test \G""", pgspecial=pgspecial)
assert pgspecial.expanded_output == False
@dbtest
def test_schemata_table_views_and_columns_query(executor):
run(executor, "create table a(x text, y text)")
run(executor, "create table b(z text)")
run(executor, "create view d as select 1 as e")
run(executor, "create schema schema1")
run(executor, "create table schema1.c (w text DEFAULT 'meow')")
run(executor, "create schema schema2")
# schemata
# don't enforce all members of the schemas since they may include postgres
# temporary schemas
assert set(executor.schemata()) >= {
"public",
"pg_catalog",
"information_schema",
"schema1",
"schema2",
}
assert executor.search_path() == ["pg_catalog", "public"]
# tables
assert set(executor.tables()) >= {
("public", "a"),
("public", "b"),
("schema1", "c"),
}
assert set(executor.table_columns()) >= {
("public", "a", "x", "text", False, None),
("public", "a", "y", "text", False, None),
("public", "b", "z", "text", False, None),
("schema1", "c", "w", "text", True, "'meow'::text"),
}
# views
assert set(executor.views()) >= {("public", "d")}
assert set(executor.view_columns()) >= {
("public", "d", "e", "integer", False, None)
}
@dbtest
def test_foreign_key_query(executor):
run(executor, "create schema schema1")
run(executor, "create schema schema2")
run(executor, "create table schema1.parent(parentid int PRIMARY KEY)")
run(
executor,
"create table schema2.child(childid int PRIMARY KEY, motherid int REFERENCES schema1.parent)",
)
assert set(executor.foreignkeys()) >= {
("schema1", "parent", "parentid", "schema2", "child", "motherid")
}
@dbtest
def test_functions_query(executor):
run(
executor,
"""create function func1() returns int
language sql as $$select 1$$""",
)
run(executor, "create schema schema1")
run(
executor,
"""create function schema1.func2() returns int
language sql as $$select 2$$""",
)
run(
executor,
"""create function func3()
returns table(x int, y int) language sql
as $$select 1, 2 from generate_series(1,5)$$;""",
)
run(
executor,
"""create function func4(x int) returns setof int language sql
as $$select generate_series(1,5)$$;""",
)
funcs = set(executor.functions())
assert funcs >= {
function_meta_data(func_name="func1", return_type="integer"),
function_meta_data(
func_name="func3",
arg_names=["x", "y"],
arg_types=["integer", "integer"],
arg_modes=["t", "t"],
return_type="record",
is_set_returning=True,
),
function_meta_data(
schema_name="public",
func_name="func4",
arg_names=("x",),
arg_types=("integer",),
return_type="integer",
is_set_returning=True,
),
function_meta_data(
schema_name="schema1", func_name="func2", return_type="integer"
),
}
@dbtest
def test_datatypes_query(executor):
run(executor, "create type foo AS (a int, b text)")
types = list(executor.datatypes())
assert types == [("public", "foo")]
@dbtest
def test_database_list(executor):
databases = executor.databases()
assert "_test_db" in databases
@dbtest
def test_invalid_syntax(executor, exception_formatter):
result = run(executor, "invalid syntax!", exception_formatter=exception_formatter)
assert 'syntax error at or near "invalid"' in result[0]
@dbtest
def test_invalid_column_name(executor, exception_formatter):
result = run(
executor, "select invalid command", exception_formatter=exception_formatter
)
assert 'column "invalid" does not exist' in result[0]
@pytest.fixture(params=[True, False])
def expanded(request):
return request.param
@dbtest
def test_unicode_support_in_output(executor, expanded):
run(executor, "create table unicodechars(t text)")
run(executor, "insert into unicodechars (t) values ('é')")
# See issue #24, this raises an exception without proper handling
assert "é" in run(
executor, "select * from unicodechars", join=True, expanded=expanded
)
@dbtest
def test_not_is_special(executor, pgspecial):
"""is_special is set to false for database queries."""
query = "select 1"
result = list(executor.run(query, pgspecial=pgspecial))
success, is_special = result[0][5:]
assert success == True
assert is_special == False
@dbtest
def test_execute_from_file_no_arg(executor, pgspecial):
r"""\i without a filename returns an error."""
result = list(executor.run(r"\i", pgspecial=pgspecial))
status, sql, success, is_special = result[0][3:]
assert "missing required argument" in status
assert success == False
assert is_special == True
@dbtest
@patch("pgcli.main.os")
def test_execute_from_file_io_error(os, executor, pgspecial):
r"""\i with an os_error returns an error."""
# Inject an OSError.
os.path.expanduser.side_effect = OSError("test")
# Check the result.
result = list(executor.run(r"\i test", pgspecial=pgspecial))
status, sql, success, is_special = result[0][3:]
assert status == "test"
assert success == False
assert is_special == True
@dbtest
def test_multiple_queries_same_line(executor):
result = run(executor, "select 'foo'; select 'bar'")
assert len(result) == 12 # 2 * (output+status) * 3 lines
assert "foo" in result[3]
assert "bar" in result[9]
@dbtest
def test_multiple_queries_with_special_command_same_line(executor, pgspecial):
result = run(executor, r"select 'foo'; \d", pgspecial=pgspecial)
assert len(result) == 11 # 2 * (output+status) * 3 lines
assert "foo" in result[3]
# This is a lame check. :(
assert "Schema" in result[7]
@dbtest
def test_multiple_queries_same_line_syntaxerror(executor, exception_formatter):
result = run(
executor,
"select 'fooé'; invalid syntax é",
exception_formatter=exception_formatter,
)
assert "fooé" in result[3]
assert 'syntax error at or near "invalid"' in result[-1]
@pytest.fixture
def pgspecial():
return PGCli().pgspecial
@dbtest
def test_special_command_help(executor, pgspecial):
result = run(executor, "\\?", pgspecial=pgspecial)[1].split("|")
assert "Command" in result[1]
assert "Description" in result[2]
@dbtest
def test_bytea_field_support_in_output(executor):
run(executor, "create table binarydata(c bytea)")
run(executor, "insert into binarydata (c) values (decode('DEADBEEF', 'hex'))")
assert "\\xdeadbeef" in run(executor, "select * from binarydata", join=True)
@dbtest
def test_unicode_support_in_unknown_type(executor):
assert "日本語" in run(executor, "SELECT '日本語' AS japanese;", join=True)
@dbtest
def test_unicode_support_in_enum_type(executor):
run(executor, "CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy', '日本語')")
run(executor, "CREATE TABLE person (name TEXT, current_mood mood)")
run(executor, "INSERT INTO person VALUES ('Moe', '日本語')")
assert "日本語" in run(executor, "SELECT * FROM person", join=True)
@requires_json
def test_json_renders_without_u_prefix(executor, expanded):
run(executor, "create table jsontest(d json)")
run(executor, """insert into jsontest (d) values ('{"name": "Éowyn"}')""")
result = run(
executor, "SELECT d FROM jsontest LIMIT 1", join=True, expanded=expanded
)
assert '{"name": "Éowyn"}' in result
@requires_jsonb
def test_jsonb_renders_without_u_prefix(executor, expanded):
run(executor, "create table jsonbtest(d jsonb)")
run(executor, """insert into jsonbtest (d) values ('{"name": "Éowyn"}')""")
result = run(
executor, "SELECT d FROM jsonbtest LIMIT 1", join=True, expanded=expanded
)
assert '{"name": "Éowyn"}' in result
@dbtest
def test_date_time_types(executor):
run(executor, "SET TIME ZONE UTC")
assert (
run(executor, "SELECT (CAST('00:00:00' AS time))", join=True).split("\n")[3]
== "| 00:00:00 |"
)
assert (
run(executor, "SELECT (CAST('00:00:00+14:59' AS timetz))", join=True).split(
"\n"
)[3]
== "| 00:00:00+14:59 |"
)
assert (
run(executor, "SELECT (CAST('4713-01-01 BC' AS date))", join=True).split("\n")[
3
]
== "| 4713-01-01 BC |"
)
assert (
run(
executor, "SELECT (CAST('4713-01-01 00:00:00 BC' AS timestamp))", join=True
).split("\n")[3]
== "| 4713-01-01 00:00:00 BC |"
)
assert (
run(
executor,
"SELECT (CAST('4713-01-01 00:00:00+00 BC' AS timestamptz))",
join=True,
).split("\n")[3]
== "| 4713-01-01 00:00:00+00 BC |"
)
assert (
run(
executor, "SELECT (CAST('-123456789 days 12:23:56' AS interval))", join=True
).split("\n")[3]
== "| -123456789 days, 12:23:56 |"
)
@dbtest
@pytest.mark.parametrize("value", ["10000000", "10000000.0", "10000000000000"])
def test_large_numbers_render_directly(executor, value):
run(executor, "create table numbertest(a numeric)")
run(executor, f"insert into numbertest (a) values ({value})")
assert value in run(executor, "select * from numbertest", join=True)
@dbtest
@pytest.mark.parametrize("command", ["di", "dv", "ds", "df", "dT"])
@pytest.mark.parametrize("verbose", ["", "+"])
@pytest.mark.parametrize("pattern", ["", "x", "*.*", "x.y", "x.*", "*.y"])
def test_describe_special(executor, command, verbose, pattern, pgspecial):
# We don't have any tests for the output of any of the special commands,
# but we can at least make sure they run without error
sql = r"\{command}{verbose} {pattern}".format(**locals())
list(executor.run(sql, pgspecial=pgspecial))
@dbtest
@pytest.mark.parametrize("sql", ["invalid sql", "SELECT 1; select error;"])
def test_raises_with_no_formatter(executor, sql):
with pytest.raises(psycopg2.ProgrammingError):
list(executor.run(sql))
@dbtest
def test_on_error_resume(executor, exception_formatter):
sql = "select 1; error; select 1;"
result = list(
executor.run(sql, on_error_resume=True, exception_formatter=exception_formatter)
)
assert len(result) == 3
@dbtest
def test_on_error_stop(executor, exception_formatter):
sql = "select 1; error; select 1;"
result = list(
executor.run(
sql, on_error_resume=False, exception_formatter=exception_formatter
)
)
assert len(result) == 2
# @dbtest
# def test_unicode_notices(executor):
# sql = "DO language plpgsql $$ BEGIN RAISE NOTICE '有人更改'; END $$;"
# result = list(executor.run(sql))
# assert result[0][0] == u'NOTICE: 有人更改\n'
@dbtest
def test_nonexistent_function_definition(executor):
with pytest.raises(RuntimeError):
result = executor.view_definition("there_is_no_such_function")
@dbtest
def test_function_definition(executor):
run(
executor,
"""
CREATE OR REPLACE FUNCTION public.the_number_three()
RETURNS int
LANGUAGE sql
AS $function$
select 3;
$function$
""",
)
result = executor.function_definition("the_number_three")
@dbtest
def test_view_definition(executor):
run(executor, "create table tbl1 (a text, b numeric)")
run(executor, "create view vw1 AS SELECT * FROM tbl1")
run(executor, "create materialized view mvw1 AS SELECT * FROM tbl1")
result = executor.view_definition("vw1")
assert "FROM tbl1" in result
# import pytest; pytest.set_trace()
result = executor.view_definition("mvw1")
assert "MATERIALIZED VIEW" in result
@dbtest
def test_nonexistent_view_definition(executor):
with pytest.raises(RuntimeError):
result = executor.view_definition("there_is_no_such_view")
with pytest.raises(RuntimeError):
result = executor.view_definition("mvw1")
@dbtest
def test_short_host(executor):
with patch.object(executor, "host", "localhost"):
assert executor.short_host == "localhost"
with patch.object(executor, "host", "localhost.example.org"):
assert executor.short_host == "localhost"
with patch.object(
executor, "host", "localhost1.example.org,localhost2.example.org"
):
assert executor.short_host == "localhost1"
class BrokenConnection:
"""Mock a connection that failed."""
def cursor(self):
raise psycopg2.InterfaceError("I'm broken!")
class VirtualCursor:
"""Mock a cursor to virtual database like pgbouncer."""
def __init__(self):
self.protocol_error = False
self.protocol_message = ""
self.description = None
self.status = None
self.statusmessage = "Error"
def execute(self, *args, **kwargs):
self.protocol_error = True
self.protocol_message = "Command not supported"
@dbtest
def test_exit_without_active_connection(executor):
quit_handler = MagicMock()
pgspecial = PGSpecial()
pgspecial.register(
quit_handler,
"\\q",
"\\q",
"Quit pgcli.",
arg_type=NO_QUERY,
case_sensitive=True,
aliases=(":q",),
)
with patch.object(executor, "conn", BrokenConnection()):
# we should be able to quit the app, even without active connection
run(executor, "\\q", pgspecial=pgspecial)
quit_handler.assert_called_once()
# an exception should be raised when running a query without active connection
with pytest.raises(psycopg2.InterfaceError):
run(executor, "select 1", pgspecial=pgspecial)
@dbtest
def test_virtual_database(executor):
virtual_connection = MagicMock()
virtual_connection.cursor.return_value = VirtualCursor()
with patch.object(executor, "conn", virtual_connection):
result = run(executor, "select 1")
assert "Command not supported" in result
| dbcli/pgcli | tests/test_pgexecute.py | Python | bsd-3-clause | 16,600 | [
"MOE"
] | 5ff905d3b79cb47626bfcbae3b833b9ef29e8dcf9950790cbeabe9ce25d992bf |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
import sys
from tempfile import mkdtemp
from shutil import rmtree
import locale
import logging
import subprocess
from pelican import Pelican
from pelican.settings import read_settings
from pelican.tests.support import LoggedTestCase, mute, locale_available, unittest
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLES_PATH = os.path.abspath(os.path.join(
CURRENT_DIR, os.pardir, os.pardir, 'samples'))
OUTPUT_PATH = os.path.abspath(os.path.join(CURRENT_DIR, 'output'))
INPUT_PATH = os.path.join(SAMPLES_PATH, "content")
SAMPLE_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf.py")
SAMPLE_FR_CONFIG = os.path.join(SAMPLES_PATH, "pelican.conf_FR.py")
def recursiveDiff(dcmp):
diff = {
'diff_files': [os.path.join(dcmp.right, f)
for f in dcmp.diff_files],
'left_only': [os.path.join(dcmp.right, f)
for f in dcmp.left_only],
'right_only': [os.path.join(dcmp.right, f)
for f in dcmp.right_only],
}
for sub_dcmp in dcmp.subdirs.values():
for k, v in recursiveDiff(sub_dcmp).items():
diff[k] += v
return diff
class TestPelican(LoggedTestCase):
# general functional testing for pelican. Basically, this test case tries
# to run pelican in different situations and see how it behaves
def setUp(self):
super(TestPelican, self).setUp()
self.temp_path = mkdtemp(prefix='pelicantests.')
self.temp_cache = mkdtemp(prefix='pelican_cache.')
self.maxDiff = None
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
def tearDown(self):
rmtree(self.temp_path)
rmtree(self.temp_cache)
locale.setlocale(locale.LC_ALL, self.old_locale)
super(TestPelican, self).tearDown()
def assertFilesEqual(self, diff):
msg = ("some generated files differ from the expected functional "
"tests output.\n"
"This is probably because the HTML generated files "
"changed. If these changes are normal, please refer "
"to docs/contribute.rst to update the expected "
"output of the functional tests.")
self.assertEqual(diff['left_only'], [], msg=msg)
self.assertEqual(diff['right_only'], [], msg=msg)
self.assertEqual(diff['diff_files'], [], msg=msg)
def assertDirsEqual(self, left_path, right_path):
out, err = subprocess.Popen(
['git', 'diff', '--no-ext-diff', '--exit-code', '-w', left_path, right_path], env={'PAGER': ''},
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
assert not out, out
assert not err, err
def test_basic_generation_works(self):
# when running pelican without settings, it should pick up the default
# ones and generate correct output without raising any exception
settings = read_settings(path=None, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'LOCALE': locale.normalize('en_US'),
})
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'basic'))
self.assertLogCountEqual(
count=3,
msg="Unable to find.*skipping url replacement",
level=logging.WARNING)
def test_custom_generation_works(self):
# the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'LOCALE': locale.normalize('en_US'),
})
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'custom'))
@unittest.skipUnless(locale_available('fr_FR.UTF-8') or
locale_available('French'), 'French locale needed')
def test_custom_locale_generation_works(self):
'''Test that generation with fr_FR.UTF-8 locale works'''
old_locale = locale.setlocale(locale.LC_TIME)
if sys.platform == 'win32':
our_locale = str('French')
else:
our_locale = str('fr_FR.UTF-8')
settings = read_settings(path=SAMPLE_FR_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'LOCALE': our_locale,
})
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
self.assertDirsEqual(self.temp_path, os.path.join(OUTPUT_PATH, 'custom_locale'))
def test_theme_static_paths_copy(self):
# the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'very'),
os.path.join(SAMPLES_PATH, 'kinda'),
os.path.join(SAMPLES_PATH, 'theme_standard')]
})
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
theme_output = os.path.join(self.temp_path, 'theme')
extra_path = os.path.join(theme_output, 'exciting', 'new', 'files')
for file in ['a_stylesheet', 'a_template']:
self.assertTrue(os.path.exists(os.path.join(theme_output, file)))
for file in ['wow!', 'boom!', 'bap!', 'zap!']:
self.assertTrue(os.path.exists(os.path.join(extra_path, file)))
def test_theme_static_paths_copy_single_file(self):
# the same thing with a specified set of settings should work
settings = read_settings(path=SAMPLE_CONFIG, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'THEME_STATIC_PATHS': [os.path.join(SAMPLES_PATH, 'theme_standard')]
})
pelican = Pelican(settings=settings)
mute(True)(pelican.run)()
theme_output = os.path.join(self.temp_path, 'theme')
for file in ['a_stylesheet', 'a_template']:
self.assertTrue(os.path.exists(os.path.join(theme_output, file)))
def test_write_only_selected(self):
"""Test that only the selected files are written"""
settings = read_settings(path=None, override={
'PATH': INPUT_PATH,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'WRITE_SELECTED': [
os.path.join(self.temp_path, 'oh-yeah.html'),
os.path.join(self.temp_path, 'categories.html'),
],
'LOCALE': locale.normalize('en_US'),
})
pelican = Pelican(settings=settings)
logger = logging.getLogger()
orig_level = logger.getEffectiveLevel()
logger.setLevel(logging.INFO)
mute(True)(pelican.run)()
logger.setLevel(orig_level)
self.assertLogCountEqual(
count=2,
msg="Writing .*",
level=logging.INFO)
| 0xMF/pelican | pelican/tests/test_pelican.py | Python | agpl-3.0 | 7,512 | [
"exciting"
] | dbcb0e8682ba2e710544fa214212dfd02c9eb74a0f82f90007cdd9a3523a74a1 |
# pyenchant
#
# Copyright (C) 2004-2011, Ryan Kelly
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPsE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# In addition, as a special exception, you are
# given permission to link the code of this program with
# non-LGPL Spelling Provider libraries (eg: a MSFT Office
# spell checker backend) and distribute linked combinations including
# the two. You must obey the GNU Lesser General Public License in all
# respects for all of the code used other than said providers. If you modify
# this file, you may extend this exception to your version of the
# file, but you are not obligated to do so. If you do not wish to
# do so, delete this exception statement from your version.
#
"""
enchant: Access to the enchant spellchecking library
=====================================================
This module provides several classes for performing spell checking
via the Enchant spellchecking library. For more details on Enchant,
visit the project website:
http://www.abisource.com/enchant/
Spellchecking is performed using 'Dict' objects, which represent
a language dictionary. Their use is best demonstrated by a quick
example::
>>> import enchant
>>> d = enchant.Dict("en_US") # create dictionary for US English
>>> d.check("enchant")
True
>>> d.check("enchnt")
False
>>> d.suggest("enchnt")
['enchant', 'enchants', 'enchanter', 'penchant', 'incant', 'enchain', 'enchanted']
Languages are identified by standard string tags such as "en" (English)
and "fr" (French). Specific language dialects can be specified by
including an additional code - for example, "en_AU" refers to Australian
English. The later form is preferred as it is more widely supported.
To check whether a dictionary exists for a given language, the function
'dict_exists' is available. Dictionaries may also be created using the
function 'request_dict'.
A finer degree of control over the dictionaries and how they are created
can be obtained using one or more 'Broker' objects. These objects are
responsible for locating dictionaries for a specific language.
In Python 2.x, unicode strings are supported transparently in the
standard manner - if a unicode string is given as an argument, the
result will be a unicode string. Note that Enchant works in UTF-8
internally, so passing an ASCII string to a dictionary for a language
requiring Unicode may result in UTF-8 strings being returned.
In Python 3.x unicode strings are expected throughout. Bytestrings
should not be passed into any functions.
Errors that occur in this module are reported by raising subclasses
of 'Error'.
"""
_DOC_ERRORS = ['enchnt','enchnt','incant','fr']
# Make version info available
__ver_major__ = 1
__ver_minor__ = 6
__ver_patch__ = 6
__ver_sub__ = ""
__version__ = "%d.%d.%d%s" % (__ver_major__,__ver_minor__,
__ver_patch__,__ver_sub__)
import os
try:
from enchant import _enchant as _e
except ImportError:
if not os.environ.get("PYENCHANT_IGNORE_MISSING_LIB",False):
raise
_e = None
from enchant.errors import *
from enchant.utils import EnchantStr, get_default_language
from enchant.pypwl import PyPWL
# Due to the unfortunate name collision between the enchant "tokenize" module
# and the stdlib "tokenize" module, certain values of sys.path can cause
# the former to override the latter and break the "warnings" module.
# This hacks around it by making a dummy "warnings" module.
try:
import warnings
except ImportError:
class warnings(object):
def warn(self,*args,**kwds):
pass
warnings = warnings()
class ProviderDesc(object):
"""Simple class describing an Enchant provider.
Each provider has the following information associated with it:
* name: Internal provider name (e.g. "aspell")
* desc: Human-readable description (e.g. "Aspell Provider")
* file: Location of the library containing the provider
"""
_DOC_ERRORS = ["desc"]
def __init__(self,name,desc,file):
self.name = name
self.desc = desc
self.file = file
def __str__(self):
return "<Enchant: %s>" % self.desc
def __repr__(self):
return str(self)
def __eq__(self,pd):
"""Equality operator on ProviderDesc objects."""
return (self.name == pd.name and \
self.desc == pd.desc and \
self.file == pd.file)
def __hash__(self):
"""Hash operator on ProviderDesc objects."""
return hash(self.name + self.desc + self.file)
class _EnchantObject(object):
"""Base class for enchant objects.
This class implements some general functionality for interfacing with
the '_enchant' C-library in a consistent way. All public objects
from the 'enchant' module are subclasses of this class.
All enchant objects have an attribute '_this' which contains the
pointer to the underlying C-library object. The method '_check_this'
can be called to ensure that this point is not None, raising an
exception if it is.
"""
def __init__(self):
"""_EnchantObject constructor."""
self._this = None
# To be importable when enchant C lib is missing, we need
# to create a dummy default broker.
if _e is not None:
self._init_this()
def _check_this(self,msg=None):
"""Check that self._this is set to a pointer, rather than None."""
if self._this is None:
if msg is None:
msg = "%s unusable: the underlying C-library object has been freed."
msg = msg % (self.__class__.__name__,)
raise Error(msg)
def _init_this(self):
"""Initialise the underlying C-library object pointer."""
raise NotImplementedError
def _raise_error(self,default="Unspecified Error",eclass=Error):
"""Raise an exception based on available error messages.
This method causes an Error to be raised. Subclasses should
override it to retrieve an error indication from the underlying
API if possible. If such a message cannot be retrieved, the
argument value <default> is used. The class of the exception
can be specified using the argument <eclass>
"""
raise eclass(default)
_raise_error._DOC_ERRORS = ["eclass"]
def __getstate__(self):
"""Customize pickling of PyEnchant objects.
Since it's not safe for multiple objects to share the same C-library
object, we make sure it's unset when pickling.
"""
state = self.__dict__.copy()
state["_this"] = None
return state
def __setstate__(self,state):
self.__dict__.update(state)
self._init_this()
class Broker(_EnchantObject):
"""Broker object for the Enchant spellchecker.
Broker objects are responsible for locating and managing dictionaries.
Unless custom functionality is required, there is no need to use Broker
objects directly. The 'enchant' module provides a default broker object
so that 'Dict' objects can be created directly.
The most important methods of this class include:
* dict_exists: check existence of a specific language dictionary
* request_dict: obtain a dictionary for specific language
* set_ordering: specify which dictionaries to try for for a
given language.
"""
def __init__(self):
"""Broker object constructor.
This method is the constructor for the 'Broker' object. No
arguments are required.
"""
_EnchantObject.__init__(self)
def _init_this(self):
self._this = _e.broker_init()
if not self._this:
raise Error("Could not initialise an enchant broker.")
self._live_dicts = {}
def __del__(self):
"""Broker object destructor."""
if _e is not None:
self._free()
def __getstate__(self):
state = super(Broker,self).__getstate__()
state.pop("_live_dicts")
return state
def _raise_error(self,default="Unspecified Error",eclass=Error):
"""Overrides _EnchantObject._raise_error to check broker errors."""
err = _e.broker_get_error(self._this)
if err == "" or err is None:
raise eclass(default)
raise eclass(err)
def _free(self):
"""Free system resource associated with a Broker object.
This method can be called to free the underlying system resources
associated with a Broker object. It is called automatically when
the object is garbage collected. If called explicitly, the
Broker and any associated Dict objects must no longer be used.
"""
if self._this is not None:
# During shutdown, this finalizer may be called before
# some Dict finalizers. Ensure all pointers are freed.
for (dict,count) in list(self._live_dicts.items()):
while count:
self._free_dict_data(dict)
count -= 1
_e.broker_free(self._this)
self._this = None
def request_dict(self,tag=None):
"""Request a Dict object for the language specified by <tag>.
This method constructs and returns a Dict object for the
requested language. 'tag' should be a string of the appropriate
form for specifying a language, such as "fr" (French) or "en_AU"
(Australian English). The existence of a specific language can
be tested using the 'dict_exists' method.
If <tag> is not given or is None, an attempt is made to determine
the current language in use. If this cannot be determined, Error
is raised.
NOTE: this method is functionally equivalent to calling the Dict()
constructor and passing in the <broker> argument.
"""
return Dict(tag,self)
request_dict._DOC_ERRORS = ["fr"]
def _request_dict_data(self,tag):
"""Request raw C pointer data for a dictionary.
This method call passes on the call to the C library, and does
some internal bookkeeping.
"""
self._check_this()
tag = EnchantStr(tag)
new_dict = _e.broker_request_dict(self._this,tag.encode())
if new_dict is None:
eStr = "Dictionary for language '%s' could not be found"
self._raise_error(eStr % (tag,),DictNotFoundError)
if new_dict not in self._live_dicts:
self._live_dicts[new_dict] = 1
else:
self._live_dicts[new_dict] += 1
return new_dict
def request_pwl_dict(self,pwl):
"""Request a Dict object for a personal word list.
This method behaves as 'request_dict' but rather than returning
a dictionary for a specific language, it returns a dictionary
referencing a personal word list. A personal word list is a file
of custom dictionary entries, one word per line.
"""
self._check_this()
pwl = EnchantStr(pwl)
new_dict = _e.broker_request_pwl_dict(self._this,pwl.encode())
if new_dict is None:
eStr = "Personal Word List file '%s' could not be loaded"
self._raise_error(eStr % (pwl,))
if new_dict not in self._live_dicts:
self._live_dicts[new_dict] = 1
else:
self._live_dicts[new_dict] += 1
d = Dict(False)
d._switch_this(new_dict,self)
return d
def _free_dict(self,dict):
"""Free memory associated with a dictionary.
This method frees system resources associated with a Dict object.
It is equivalent to calling the object's 'free' method. Once this
method has been called on a dictionary, it must not be used again.
"""
self._free_dict_data(dict._this)
dict._this = None
dict._broker = None
def _free_dict_data(self,dict):
"""Free the underlying pointer for a dict."""
self._check_this()
_e.broker_free_dict(self._this,dict)
self._live_dicts[dict] -= 1
if self._live_dicts[dict] == 0:
del self._live_dicts[dict]
def dict_exists(self,tag):
"""Check availability of a dictionary.
This method checks whether there is a dictionary available for
the language specified by 'tag'. It returns True if a dictionary
is available, and False otherwise.
"""
self._check_this()
tag = EnchantStr(tag)
val = _e.broker_dict_exists(self._this,tag.encode())
return bool(val)
def set_ordering(self,tag,ordering):
"""Set dictionary preferences for a language.
The Enchant library supports the use of multiple dictionary programs
and multiple languages. This method specifies which dictionaries
the broker should prefer when dealing with a given language. 'tag'
must be an appropriate language specification and 'ordering' is a
string listing the dictionaries in order of preference. For example
a valid ordering might be "aspell,myspell,ispell".
The value of 'tag' can also be set to "*" to set a default ordering
for all languages for which one has not been set explicitly.
"""
self._check_this()
tag = EnchantStr(tag)
ordering = EnchantStr(ordering)
_e.broker_set_ordering(self._this,tag.encode(),ordering.encode())
def describe(self):
"""Return list of provider descriptions.
This method returns a list of descriptions of each of the
dictionary providers available. Each entry in the list is a
ProviderDesc object.
"""
self._check_this()
self.__describe_result = []
_e.broker_describe(self._this,self.__describe_callback)
return [ ProviderDesc(*r) for r in self.__describe_result]
def __describe_callback(self,name,desc,file):
"""Collector callback for dictionary description.
This method is used as a callback into the _enchant function
'enchant_broker_describe'. It collects the given arguments in
a tuple and appends them to the list '__describe_result'.
"""
s = EnchantStr("")
name = s.decode(name)
desc = s.decode(desc)
file = s.decode(file)
self.__describe_result.append((name,desc,file))
def list_dicts(self):
"""Return list of available dictionaries.
This method returns a list of dictionaries available to the
broker. Each entry in the list is a two-tuple of the form:
(tag,provider)
where <tag> is the language lag for the dictionary and
<provider> is a ProviderDesc object describing the provider
through which that dictionary can be obtained.
"""
self._check_this()
self.__list_dicts_result = []
_e.broker_list_dicts(self._this,self.__list_dicts_callback)
return [ (r[0],ProviderDesc(*r[1])) for r in self.__list_dicts_result]
def __list_dicts_callback(self,tag,name,desc,file):
"""Collector callback for listing dictionaries.
This method is used as a callback into the _enchant function
'enchant_broker_list_dicts'. It collects the given arguments into
an appropriate tuple and appends them to '__list_dicts_result'.
"""
s = EnchantStr("")
tag = s.decode(tag)
name = s.decode(name)
desc = s.decode(desc)
file = s.decode(file)
self.__list_dicts_result.append((tag,(name,desc,file)))
def list_languages(self):
"""List languages for which dictionaries are available.
This function returns a list of language tags for which a
dictionary is available.
"""
langs = []
for (tag,prov) in self.list_dicts():
if tag not in langs:
langs.append(tag)
return langs
def __describe_dict(self,dict_data):
"""Get the description tuple for a dict data object.
<dict_data> must be a C-library pointer to an enchant dictionary.
The return value is a tuple of the form:
(<tag>,<name>,<desc>,<file>)
"""
# Define local callback function
cb_result = []
def cb_func(tag,name,desc,file):
s = EnchantStr("")
tag = s.decode(tag)
name = s.decode(name)
desc = s.decode(desc)
file = s.decode(file)
cb_result.append((tag,name,desc,file))
# Actually call the describer function
_e.dict_describe(dict_data,cb_func)
return cb_result[0]
__describe_dict._DOC_ERRORS = ["desc"]
def get_param(self,name):
"""Get the value of a named parameter on this broker.
Parameters are used to provide runtime information to individual
provider backends. See the method 'set_param' for more details.
"""
name = EnchantStr(name)
return name.decode(_e.broker_get_param(self._this,name.encode()))
get_param._DOC_ERRORS = ["param"]
def set_param(self,name,value):
"""Set the value of a named parameter on this broker.
Parameters are used to provide runtime information to individual
provider backends. For example, the myspell provider will search
any directories given in the "enchant.myspell.dictionary.path"
parameter when looking for its dictionary files.
"""
name = EnchantStr(name)
value = EnchantStr(value)
_e.broker_set_param(self._this,name.encode(),value.encode())
class Dict(_EnchantObject):
"""Dictionary object for the Enchant spellchecker.
Dictionary objects are responsible for checking the spelling of words
and suggesting possible corrections. Each dictionary is owned by a
Broker object, but unless a new Broker has explicitly been created
then this will be the 'enchant' module default Broker and is of little
interest.
The important methods of this class include:
* check(): check whether a word id spelled correctly
* suggest(): suggest correct spellings for a word
* add(): add a word to the user's personal dictionary
* remove(): add a word to the user's personal exclude list
* add_to_session(): add a word to the current spellcheck session
* store_replacement(): indicate a replacement for a given word
Information about the dictionary is available using the following
attributes:
* tag: the language tag of the dictionary
* provider: a ProviderDesc object for the dictionary provider
"""
def __init__(self,tag=None,broker=None):
"""Dict object constructor.
A dictionary belongs to a specific language, identified by the
string <tag>. If the tag is not given or is None, an attempt to
determine the language currently in use is made using the 'locale'
module. If the current language cannot be determined, Error is raised.
If <tag> is instead given the value of False, a 'dead' Dict object
is created without any reference to a language. This is typically
only useful within PyEnchant itself. Any other non-string value
for <tag> raises Error.
Each dictionary must also have an associated Broker object which
obtains the dictionary information from the underlying system. This
may be specified using <broker>. If not given, the default broker
is used.
"""
# Initialise misc object attributes to None
self.provider = None
# If no tag was given, use the default language
if tag is None:
tag = get_default_language()
if tag is None:
err = "No tag specified and default language could not "
err = err + "be determined."
raise Error(err)
self.tag = tag
# If no broker was given, use the default broker
if broker is None:
broker = _broker
self._broker = broker
# Now let the superclass initialise the C-library object
_EnchantObject.__init__(self)
def _init_this(self):
# Create dead object if False was given as the tag.
# Otherwise, use the broker to get C-library pointer data.
self._this = None
if self.tag:
this = self._broker._request_dict_data(self.tag)
self._switch_this(this,self._broker)
def __del__(self):
"""Dict object destructor."""
# Calling free() might fail if python is shutting down
try:
self._free()
except AttributeError:
pass
def _switch_this(self,this,broker):
"""Switch the underlying C-library pointer for this object.
As all useful state for a Dict is stored by the underlying C-library
pointer, it is very convenient to allow this to be switched at
run-time. Pass a new dict data object into this method to affect
the necessary changes. The creating Broker object (at the Python
level) must also be provided.
This should *never* *ever* be used by application code. It's
a convenience for developers only, replacing the clunkier <data>
parameter to __init__ from earlier versions.
"""
# Free old dict data
Dict._free(self)
# Hook in the new stuff
self._this = this
self._broker = broker
# Update object properties
desc = self.__describe(check_this=False)
self.tag = desc[0]
self.provider = ProviderDesc(*desc[1:])
_switch_this._DOC_ERRORS = ["init"]
def _check_this(self,msg=None):
"""Extend _EnchantObject._check_this() to check Broker validity.
It is possible for the managing Broker object to be freed without
freeing the Dict. Thus validity checking must take into account
self._broker._this as well as self._this.
"""
if self._broker is None or self._broker._this is None:
self._this = None
_EnchantObject._check_this(self,msg)
def _raise_error(self,default="Unspecified Error",eclass=Error):
"""Overrides _EnchantObject._raise_error to check dict errors."""
err = _e.dict_get_error(self._this)
if err == "" or err is None:
raise eclass(default)
raise eclass(err)
def _free(self):
"""Free the system resources associated with a Dict object.
This method frees underlying system resources for a Dict object.
Once it has been called, the Dict object must no longer be used.
It is called automatically when the object is garbage collected.
"""
if self._this is not None:
# The broker may have been freed before the dict.
# It will have freed the underlying pointers already.
if self._broker is not None and self._broker._this is not None:
self._broker._free_dict(self)
def check(self,word):
"""Check spelling of a word.
This method takes a word in the dictionary language and returns
True if it is correctly spelled, and false otherwise.
"""
self._check_this()
word = EnchantStr(word)
# Enchant asserts that the word is non-empty.
# Check it up-front to avoid nasty warnings on stderr.
if len(word) == 0:
raise ValueError("can't check spelling of empty string")
val = _e.dict_check(self._this,word.encode())
if val == 0:
return True
if val > 0:
return False
self._raise_error()
def suggest(self,word):
"""Suggest possible spellings for a word.
This method tries to guess the correct spelling for a given
word, returning the possibilities in a list.
"""
self._check_this()
word = EnchantStr(word)
# Enchant asserts that the word is non-empty.
# Check it up-front to avoid nasty warnings on stderr.
if len(word) == 0:
raise ValueError("can't suggest spellings for empty string")
suggs = _e.dict_suggest(self._this,word.encode())
return [word.decode(w) for w in suggs]
def add(self,word):
"""Add a word to the user's personal word list."""
self._check_this()
word = EnchantStr(word)
_e.dict_add(self._this,word.encode())
def remove(self,word):
"""Add a word to the user's personal exclude list."""
self._check_this()
word = EnchantStr(word)
_e.dict_remove(self._this,word.encode())
def add_to_pwl(self,word):
"""Add a word to the user's personal word list."""
warnings.warn("Dict.add_to_pwl is deprecated, please use Dict.add",
category=DeprecationWarning,stacklevel=2)
self._check_this()
word = EnchantStr(word)
_e.dict_add_to_pwl(self._this,word.encode())
def add_to_session(self,word):
"""Add a word to the session personal list."""
self._check_this()
word = EnchantStr(word)
_e.dict_add_to_session(self._this,word.encode())
def remove_from_session(self,word):
"""Add a word to the session exclude list."""
self._check_this()
word = EnchantStr(word)
_e.dict_remove_from_session(self._this,word.encode())
def is_added(self,word):
"""Check whether a word is in the personal word list."""
self._check_this()
word = EnchantStr(word)
return _e.dict_is_added(self._this,word.encode())
def is_removed(self,word):
"""Check whether a word is in the personal exclude list."""
self._check_this()
word = EnchantStr(word)
return _e.dict_is_removed(self._this,word.encode())
def is_in_session(self,word):
"""Check whether a word is in the session list."""
warnings.warn("Dict.is_in_session is deprecated, "\
"please use Dict.is_added",
category=DeprecationWarning,stacklevel=2)
self._check_this()
word = EnchantStr(word)
return _e.dict_is_in_session(self._this,word.encode())
def store_replacement(self,mis,cor):
"""Store a replacement spelling for a miss-spelled word.
This method makes a suggestion to the spellchecking engine that the
miss-spelled word <mis> is in fact correctly spelled as <cor>. Such
a suggestion will typically mean that <cor> appears early in the
list of suggested spellings offered for later instances of <mis>.
"""
if not mis:
raise ValueError("can't store replacement for an empty string")
if not cor:
raise ValueError("can't store empty string as a replacement")
self._check_this()
mis = EnchantStr(mis)
cor = EnchantStr(cor)
_e.dict_store_replacement(self._this,mis.encode(),cor.encode())
store_replacement._DOC_ERRORS = ["mis","mis"]
def __describe(self,check_this=True):
"""Return a tuple describing the dictionary.
This method returns a four-element tuple describing the underlying
spellchecker system providing the dictionary. It will contain the
following strings:
* language tag
* name of dictionary provider
* description of dictionary provider
* dictionary file
Direct use of this method is not recommended - instead, access this
information through the 'tag' and 'provider' attributes.
"""
if check_this:
self._check_this()
_e.dict_describe(self._this,self.__describe_callback)
return self.__describe_result
def __describe_callback(self,tag,name,desc,file):
"""Collector callback for dictionary description.
This method is used as a callback into the _enchant function
'enchant_dict_describe'. It collects the given arguments in
a tuple and stores them in the attribute '__describe_result'.
"""
s = EnchantStr("")
tag = s.decode(tag)
name = s.decode(name)
desc = s.decode(desc)
file = s.decode(file)
self.__describe_result = (tag,name,desc,file)
class DictWithPWL(Dict):
"""Dictionary with separately-managed personal word list.
NOTE: As of version 1.4.0, enchant manages a per-user pwl and
exclude list. This class is now only needed if you want
to explicitly maintain a separate word list in addition to
the default one.
This class behaves as the standard Dict class, but also manages a
personal word list stored in a separate file. The file must be
specified at creation time by the 'pwl' argument to the constructor.
Words added to the dictionary are automatically appended to the pwl file.
A personal exclude list can also be managed, by passing another filename
to the constructor in the optional 'pel' argument. If this is not given,
requests to exclude words are ignored.
If either 'pwl' or 'pel' are None, an in-memory word list is used.
This will prevent calls to add() and remove() from affecting the user's
default word lists.
The Dict object managing the PWL is available as the 'pwl' attribute.
The Dict object managing the PEL is available as the 'pel' attribute.
To create a DictWithPWL from the user's default language, use None
as the 'tag' argument.
"""
_DOC_ERRORS = ["pel","pel","PEL","pel"]
def __init__(self,tag,pwl=None,pel=None,broker=None):
"""DictWithPWL constructor.
The argument 'pwl', if not None, names a file containing the
personal word list. If this file does not exist, it is created
with default permissions.
The argument 'pel', if not None, names a file containing the personal
exclude list. If this file does not exist, it is created with
default permissions.
"""
Dict.__init__(self,tag,broker)
if pwl is not None:
if not os.path.exists(pwl):
f = open(pwl,"wt")
f.close()
del f
self.pwl = self._broker.request_pwl_dict(pwl)
else:
self.pwl = PyPWL()
if pel is not None:
if not os.path.exists(pel):
f = open(pel,"wt")
f.close()
del f
self.pel = self._broker.request_pwl_dict(pel)
else:
self.pel = PyPWL()
def _check_this(self,msg=None):
"""Extend Dict._check_this() to check PWL validity."""
if self.pwl is None:
self._free()
if self.pel is None:
self._free()
Dict._check_this(self,msg)
self.pwl._check_this(msg)
self.pel._check_this(msg)
def _free(self):
"""Extend Dict._free() to free the PWL as well."""
if self.pwl is not None:
self.pwl._free()
self.pwl = None
if self.pel is not None:
self.pel._free()
self.pel = None
Dict._free(self)
def check(self,word):
"""Check spelling of a word.
This method takes a word in the dictionary language and returns
True if it is correctly spelled, and false otherwise. It checks
both the dictionary and the personal word list.
"""
if self.pel.check(word):
return False
if self.pwl.check(word):
return True
if Dict.check(self,word):
return True
return False
def suggest(self,word):
"""Suggest possible spellings for a word.
This method tries to guess the correct spelling for a given
word, returning the possibilities in a list.
"""
suggs = Dict.suggest(self,word)
suggs.extend([w for w in self.pwl.suggest(word) if w not in suggs])
for i in range(len(suggs)-1,-1,-1):
if self.pel.check(suggs[i]):
del suggs[i]
return suggs
def add(self,word):
"""Add a word to the associated personal word list.
This method adds the given word to the personal word list, and
automatically saves the list to disk.
"""
self._check_this()
self.pwl.add(word)
self.pel.remove(word)
def remove(self,word):
"""Add a word to the associated exclude list."""
self._check_this()
self.pwl.remove(word)
self.pel.add(word)
def add_to_pwl(self,word):
"""Add a word to the associated personal word list.
This method adds the given word to the personal word list, and
automatically saves the list to disk.
"""
self._check_this()
self.pwl.add_to_pwl(word)
self.pel.remove(word)
def is_added(self,word):
"""Check whether a word is in the personal word list."""
self._check_this()
return self.pwl.is_added(word)
def is_removed(self,word):
"""Check whether a word is in the personal exclude list."""
self._check_this()
return self.pel.is_added(word)
## Create a module-level default broker object, and make its important
## methods available at the module level.
_broker = Broker()
request_dict = _broker.request_dict
request_pwl_dict = _broker.request_pwl_dict
dict_exists = _broker.dict_exists
list_dicts = _broker.list_dicts
list_languages = _broker.list_languages
get_param = _broker.get_param
set_param = _broker.set_param
# Expose the "get_version" function.
def get_enchant_version():
"""Get the version string for the underlying enchant library."""
return _e.get_version()
# Run unit tests when called from comand-line
if __name__ == "__main__":
import sys
import enchant.tests
res = enchant.tests.runtestsuite()
if len(res.errors) > 0 or len(res.failures) > 0:
sys.exit(1)
sys.exit(0)
| meska/pyenchant | enchant/__init__.py | Python | lgpl-2.1 | 35,386 | [
"VisIt"
] | 86e3d80c0e82604132bab9503b38bd250e901428364996e9ce44aed7f1f6fddc |
import sys
import os
import json
import csv
import shutil
import subprocess
def createSimulationDirectory(simulationid):
# YZ: This directory is created by making a full copy of the 'Facade' folder in the jEPlus repository
directory = 'simulations/{0}'.format(simulationid)
if os.path.exists(directory):
shutil.rmtree (directory)
shutil.copytree("../jEPlus/Box", directory) # YZ: check if it should "../../jEPlus/Facade"
return os.path.abspath(directory)
def convertDataToCSV(jsondata):
# This would take the json object passed in and convert it to CSV data.
# The format of the data is as follows:
'''
{
"JobID": "<string>", <-- can be any text that identifies this case
"WeatherFile": 0, <-- should always be 0
"ModelFile": 0, <-- should always be 0
"Terrain": "<string>",
"Orientation" : "<number>",
"Width": "<number>",
"Height": "<number>",
"Depth": "<number>",
"OccupancyType": "<string>",
"Window" : "null or <number>",
"CoolingSP" : "<number>",
"HeatingSP" : "<number>"
"InsulationLevel" : "<number>",
"InfiltrationRate" : "<number>",
"Mvalue" : "<number>",
"Qvalue" : "<number>",
"WindowType": "<string>",
"WallType": "<string>",
"FinLeft" : "<number>",
"FinRight" : "<number>",
"Overhang" : "<number>",
}
'''
# "ventilationRate" : "<number>" jsondata["ventilationRate"]])
# This is a kludge for now. It just writes out the parameters on 3 lines to test that we received them correctly
# TODO: When it is implemented, it should be one line with values appearing in the same order as the parameters in the project.
csvdata = [[ 'job1', # jsondata["JobID"],
0,
0,
#Row 0 BioPCM case
#Geometry tab (P1,P2,P3,P4,P5,P6,P7,P8)
jsondata["Height"],
jsondata["Depth"],
jsondata["Width"],
jsondata["WinGR"],
jsondata["Overhang"],
jsondata["LFin"],
jsondata["RFin"],
jsondata["Orientation"],
#Facade tab (P9,P10,P11,P12,P13)
jsondata["WallType"],
jsondata["WindowType"],
jsondata["InfiltrationRate"],
jsondata["InsulationLevel"],
"M" + jsondata["Mvalue"] + "Q" + jsondata["Qvalue"],
#Activity tab (P14,P15,P16)
jsondata["OccupancyType"],
jsondata["CoolingSP"],
jsondata["HeatingSP"],
#Site tab (P17)
'City'
# jsondata["Terrain"]
],
# Row 2 NoPCM This is a kludge for now used to add a noPCM run.
['job0', # jsondata["JobID"],
0,
0,
#Geometry tab (P1,P2,P3,P4,P5,P6,P7,P8)
jsondata["Height"],
jsondata["Depth"],
jsondata["Width"],
jsondata["WinGR"],
jsondata["Overhang"],
jsondata["LFin"],
jsondata["RFin"],
jsondata["Orientation"],
#Facade tab (P9,P10,P11,P12,P13)
jsondata["WallType"],
jsondata["WindowType"],
jsondata["InfiltrationRate"],
jsondata["InsulationLevel"],
#This is a kludge for now used to add a noPCM run.
"WallAirGap",
#Activity tab (P14,P15,P16)
jsondata["OccupancyType"],
jsondata["CoolingSP"],
jsondata["HeatingSP"],
#Site tab (P17)
'City'
# jsondata["Terrain"]
]
]
return csvdata
def createJobListFile(directory, jsondata):
outputfile = directory + '/joblist.csv'
with open(outputfile, 'w', newline='') as csvfile:
csvfile = csv.writer(csvfile, delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL)
csvfile.writerows(convertDataToCSV(jsondata))
# Main purpose of this function is to copy the selected weather file to in.epw in the simulation folder
def copySupportingFiles(simulationDirectory, jsondata):
# YZ: is the value in jsondata.weatherFile containing the full path to the weather file to copy?
shutil.copyfile(jsondata['WeatherFile'], simulationDirectory + os.sep + 'in.epw')
# YZ: no need to copy the geometry file unless it has been altered for this case.
#TODO: I think in version 1 we will always use the same idf file but it will get it's data from the parameter file.
# We need to replace the one that is currently there with the correct one that takes a parameter file.
# shutil.copyfile('../idf/Geometry.idf', simulationDirectory + os.sep + 'in.idf') <-- No need to copy in.idf. The project folder contains the correct model file.
def executeSimulation(simulationDirectory, resultsDirectory):
olddir = os.getcwd()
os.chdir('../jess_client')
# Call JESS client to run a single case defined in the joblist.csv file
subprocess.call(['java', '-jar', '../jess_client/JESS_Client.jar',
'-cfg', '../jess_client/client.cfg',
'-log', '../jess_client/log4j.cfg',
'-job', simulationDirectory,
'-type', 'JEPLUS_PROJECT',
'-subset', 'LIST_FILE',
'-subset_param', 'joblist.csv',
'-output', resultsDirectory])
os.chdir(olddir)
# This is the main entry to execute simulation. Simulationid is used to name the working directory, and
# jsondata should contain all parameter values for the cases
def runSimulation(simulationid, jsondata):
# create simulation directory "simulations/[simulationid]" by copying from the template folder ("jEPlus/Facade/")
directory = createSimulationDirectory(simulationid)
resultsDirectory = directory + os.sep + 'output'
resultsFile = resultsDirectory + os.sep + 'AllDerivedResults.csv'
# copy selected weather file to the simulation directory
copySupportingFiles(directory, jsondata)
# create job list file from jsondata
createJobListFile(directory, jsondata)
# run simulation
executeSimulation(directory, resultsDirectory)
# Send the directory that the simulation results are in and the file to stream back to the client to the caller.
print(resultsDirectory)
print(resultsFile)
runSimulation(sys.argv[1], json.JSONDecoder().decode(sys.argv[2]))
| nrgsim/nrgsim | scripts/runsimulation.py | Python | agpl-3.0 | 6,184 | [
"EPW"
] | 086a4ff9edd671e09dcf1708cfe564d3f2cd993c16ff39d43cbaee7ea0e80215 |
from __future__ import annotations
import copy
import os
import pickle
import random
import pytest
from cctbx import sgtbx
from dxtbx.model import Crystal, Experiment, ExperimentList
from dxtbx.serialize import load
from dials.array_family import flex
def test_accessing_invalid_key_throws_keyerror():
table = flex.reflection_table()
with pytest.raises(KeyError) as e:
table["missing_key"]
assert e.value.args[0] == "Unknown column 'missing_key'"
def test_reflection_table_behaves_like_a_python_dictionary():
def identical(flex1, flex2):
return (flex1 == flex2).all_eq(True)
flex_A = flex.int([1])
flex_B = flex.int([2])
table = flex.reflection_table([("A", flex_A), ("B", flex_B)])
assert len(table) == 1
assert list(table.keys()) == ["A", "B"]
items = list(table.items())
assert len(items) == 2
assert items[0][0] == "A" and (items[0][1] == flex_A).all_eq(True)
assert items[1][0] == "B" and (items[1][1] == flex_B).all_eq(True)
assert list(table) == ["A", "B"]
assert "A" in table and "B" in table and "C" not in table
assert (table["A"] == flex_A).all_eq(True)
assert (table["B"] == flex_B).all_eq(True)
assert table.get("A", -424242) == flex_A
assert table.get("C", -424242) == -424242
def test_init():
# test default
table = flex.reflection_table()
assert table.is_consistent()
assert table.nrows() == 0
assert table.ncols() == 0
assert table.empty()
# test with nrows
table = flex.reflection_table(10)
assert table.is_consistent()
assert table.nrows() == 10
assert table.ncols() == 0
assert table.empty()
# test with valid columns
table = flex.reflection_table(
[
("col1", flex.int(10)),
("col2", flex.double(10)),
("col3", flex.std_string(10)),
]
)
assert table.is_consistent()
assert table.nrows() == 10
assert table.ncols() == 3
assert not table.empty()
# test with invalid columns
with pytest.raises(RuntimeError):
_ = flex.reflection_table(
[
("col1", flex.int(10)),
("col2", flex.double(20)),
("col3", flex.std_string(10)),
]
)
def test_resizing():
# Create a table with 2 empty columns
table = flex.reflection_table()
assert table.empty()
table["col1"] = flex.int()
table["col2"] = flex.double()
assert table.nrows() == 0
assert table.ncols() == 2
assert not table.empty()
assert "col1" in table
assert "col2" in table
assert "col3" not in table
# Create a table with 2 columns and 10 rows
table = flex.reflection_table()
table["col1"] = flex.int(10)
table["col2"] = flex.double(10)
assert table.nrows() == 10
assert table.ncols() == 2
# Add an extra column with the wrong size (throw)
with pytest.raises(RuntimeError):
table["col3"] = flex.std_string(20)
assert table.nrows() == 10
assert table.ncols() == 2
assert table.is_consistent()
assert len(table["col1"]) == 10
assert len(table["col2"]) == 10
assert len(table) == table.size()
# Resize the table (should resize all columns)
table.resize(50)
assert table.nrows() == 50
assert table.ncols() == 2
assert table.is_consistent()
assert len(table["col1"]) == 50
assert len(table["col2"]) == 50
# Make the table inconsistent
table["col1"].resize(40)
assert not table.is_consistent()
with pytest.raises(Exception):
table.nrows()
assert table.ncols() == 2
# Clear the table
table.clear()
assert table.is_consistent()
assert table.empty()
assert table.nrows() == 0
assert table.ncols() == 0
def test_delete():
# Test del item
table = flex.reflection_table()
table["col1"] = flex.int([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
table["col2"] = flex.int([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
table["col3"] = flex.int([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
del table["col3"]
assert table.is_consistent()
assert table.nrows() == 10
assert table.ncols() == 2
assert "col3" not in table
# Test del row
del table[5]
assert table.is_consistent()
assert table.nrows() == 9
assert table.ncols() == 2
assert all(a == b for a, b in zip(list(table["col1"]), [0, 1, 2, 3, 4, 6, 7, 8, 9]))
# Test del slice
del table[0:10:2]
assert table.is_consistent()
assert table.nrows() == 4
assert table.ncols() == 2
assert all(a == b for a, b in zip(list(table["col1"]), [1, 3, 6, 8]))
# Test del slice
del table[:]
assert table.is_consistent()
assert table.nrows() == 0
assert table.ncols() == 2
def test_row_operations():
# The columns as lists
c1 = list(range(10))
c2 = list(range(10))
c3 = ["a", "b", "c", "d", "e", "f", "g", "i", "j", "k"]
# Create a table with some elements
table = flex.reflection_table()
table["col1"] = flex.int(c1)
table["col2"] = flex.double(c2)
table["col3"] = flex.std_string(c3)
# Extend the table
table.extend(table)
c1 = c1 * 2
c2 = c2 * 2
c3 = c3 * 2
assert table.nrows() == 20
assert table.ncols() == 3
assert table.is_consistent()
assert all(a == b for a, b in zip(table["col1"], c1))
assert all(a == b for a, b in zip(table["col2"], c2))
assert all(a == b for a, b in zip(table["col3"], c3))
# Append some rows to the table
row = {"col1": 10}
c1 = c1 + [10]
c2 = c2 + [0]
c3 = c3 + [""]
table.append(row)
assert table.nrows() == 21
assert table.ncols() == 3
assert table.is_consistent()
assert all(a == b for a, b in zip(table["col1"], c1))
assert all(a == b for a, b in zip(table["col2"], c2))
assert all(a == b for a, b in zip(table["col3"], c3))
row = {"col2": 11}
c1 = c1 + [0]
c2 = c2 + [11]
c3 = c3 + [""]
table.append(row)
assert table.nrows() == 22
assert table.ncols() == 3
assert table.is_consistent()
assert all(a == b for a, b in zip(table["col1"], c1))
assert all(a == b for a, b in zip(table["col2"], c2))
assert all(a == b for a, b in zip(table["col3"], c3))
row = {"col1": 12, "col2": 12, "col3": "l"}
c1 = c1 + [12]
c2 = c2 + [12]
c3 = c3 + ["l"]
table.append(row)
assert table.nrows() == 23
assert table.ncols() == 3
assert table.is_consistent()
assert all(a == b for a, b in zip(table["col1"], c1))
assert all(a == b for a, b in zip(table["col2"], c2))
assert all(a == b for a, b in zip(table["col3"], c3))
# Try inserting some rows
row = {"col1": -1}
c1.insert(5, -1)
c2.insert(5, 0)
c3.insert(5, "")
table.insert(5, row)
assert table.nrows() == 24
assert table.ncols() == 3
assert table.is_consistent()
assert all(a == b for a, b in zip(table["col1"], c1))
assert all(a == b for a, b in zip(table["col2"], c2))
assert all(a == b for a, b in zip(table["col3"], c3))
row = {"col1": -2, "col2": -3, "col3": "abc"}
c1.insert(2, -2)
c2.insert(2, -3)
c3.insert(2, "abc")
table.insert(2, row)
assert table.nrows() == 25
assert table.ncols() == 3
assert table.is_consistent()
assert all(a == b for a, b in zip(table["col1"], c1))
assert all(a == b for a, b in zip(table["col2"], c2))
assert all(a == b for a, b in zip(table["col3"], c3))
# Try iterating through table rows
for i in range(table.nrows()):
row = table[i]
assert row["col1"] == c1[i]
assert row["col2"] == c2[i]
assert row["col3"] == c3[i]
# Trying setting some rows
row = {"col1": 100}
table[2] = row
assert table[2]["col1"] == 100
assert table[2]["col2"] == c2[2]
assert table[2]["col3"] == c3[2]
row = {"col1": 1000, "col2": 2000, "col3": "hello"}
table[10] = row
assert table[10]["col1"] == 1000
assert table[10]["col2"] == 2000
assert table[10]["col3"] == "hello"
def test_iteration():
# The columns as lists
c1 = list(range(10))
c2 = list(range(10))
c3 = ["a", "b", "c", "d", "e", "f", "g", "i", "j", "k"]
# Create a table with some elements
table = flex.reflection_table()
table["col1"] = flex.int(c1)
table["col2"] = flex.double(c2)
table["col3"] = flex.std_string(c3)
# Try iterating keys
k = []
for key in table.keys():
k.append(key)
assert len(k) == 3
assert k.count("col1") == 1
assert k.count("col2") == 1
assert k.count("col3") == 1
# Try iterating columns
k = []
c = []
for key, col in table.cols():
k.append(key)
c.append(col)
assert len(k) == 3
assert k.count("col1") == 1
assert k.count("col2") == 1
assert k.count("col3") == 1
# Try iterating rows
for row1, row2 in zip(table.rows(), zip(c1, c2, c3)):
assert row1["col1"] == row2[0]
assert row1["col2"] == row2[1]
assert row1["col3"] == row2[2]
def test_slicing():
# The columns as lists
c1 = list(range(10))
c2 = list(range(10))
c3 = ["a", "b", "c", "d", "e", "f", "g", "i", "j", "k"]
# Create a table with some elements
table = flex.reflection_table()
table["col1"] = flex.int(c1)
table["col2"] = flex.double(c2)
table["col3"] = flex.std_string(c3)
# Try forward slicing
new_table = table[2:7:2]
assert new_table.ncols() == 3
assert new_table.nrows() == 3
assert new_table.is_consistent()
c11 = c1[2:7:2]
c22 = c2[2:7:2]
c33 = c3[2:7:2]
assert all(a == b for a, b in zip(new_table["col1"], c11))
assert all(a == b for a, b in zip(new_table["col2"], c22))
assert all(a == b for a, b in zip(new_table["col3"], c33))
# Try backward slicing
new_table = table[7:2:-2]
assert new_table.ncols() == 3
assert new_table.nrows() == 3
assert new_table.is_consistent()
c11 = c1[7:2:-2]
c22 = c2[7:2:-2]
c33 = c3[7:2:-2]
assert all(a == b for a, b in zip(new_table["col1"], c11))
assert all(a == b for a, b in zip(new_table["col2"], c22))
assert all(a == b for a, b in zip(new_table["col3"], c33))
# Try setting forward slicing
table[2:7:2] = new_table
assert table.ncols() == 3
assert table.nrows() == 10
assert table.is_consistent()
c1[2:7:2] = c11
c2[2:7:2] = c22
c3[2:7:2] = c33
assert all(a == b for a, b in zip(table["col1"], c1))
assert all(a == b for a, b in zip(table["col2"], c2))
assert all(a == b for a, b in zip(table["col3"], c3))
# Try setting backward slicing
table[7:2:-2] = new_table
assert table.ncols() == 3
assert table.nrows() == 10
assert table.is_consistent()
c1[7:2:-2] = c11
c2[7:2:-2] = c22
c3[7:2:-2] = c33
assert all(a == b for a, b in zip(table["col1"], c1))
assert all(a == b for a, b in zip(table["col2"], c2))
assert all(a == b for a, b in zip(table["col3"], c3))
def test_updating():
# The columns as lists
c1 = list(range(10))
c2 = list(range(10))
c3 = ["a", "b", "c", "d", "e", "f", "g", "i", "j", "k"]
# Create a table with some elements
table0 = flex.reflection_table()
table1 = flex.reflection_table()
table2 = flex.reflection_table()
table1["col1"] = flex.int(c1)
table1["col2"] = flex.double(c2)
table2["col3"] = flex.std_string(c3)
# Update from zero columns
table0.update(table1)
assert table0.is_consistent()
assert table0.nrows() == 10
assert table0.ncols() == 2
# Update table1 with table2 columns
table1.update(table2)
assert table1.is_consistent()
assert table1.nrows() == 10
assert table1.ncols() == 3
assert table2.is_consistent()
assert table2.nrows() == 10
assert table2.ncols() == 1
# Update trable1 with invalid table
c3 = ["a", "b", "c"]
# Create a table with some elements
table2 = flex.reflection_table()
table2["col3"] = flex.std_string(c3)
with pytest.raises(RuntimeError):
table1.update(table2)
assert table1.is_consistent()
assert table1.nrows() == 10
assert table1.ncols() == 3
assert table2.is_consistent()
assert table2.nrows() == 3
assert table2.ncols() == 1
def test_select():
# The columns as lists
c1 = list(range(10))
c2 = list(range(10))
c3 = ["a", "b", "c", "d", "e", "f", "g", "i", "j", "k"]
# Create a table with some elements
table = flex.reflection_table()
table["col1"] = flex.int(c1)
table["col2"] = flex.double(c2)
table["col3"] = flex.std_string(c3)
# Select some columns
new_table = table.select(("col1", "col2"))
assert new_table.nrows() == 10
assert new_table.ncols() == 2
assert all(a == b for a, b in zip(new_table["col1"], c1))
assert all(a == b for a, b in zip(new_table["col2"], c2))
# Select some columns
new_table = table.select(flex.std_string(["col1", "col2"]))
assert new_table.nrows() == 10
assert new_table.ncols() == 2
assert all(a == b for a, b in zip(new_table["col1"], c1))
assert all(a == b for a, b in zip(new_table["col2"], c2))
# Select some rows
index = flex.size_t([0, 1, 5, 8, 9])
cc1 = [c1[i] for i in index]
cc2 = [c2[i] for i in index]
cc3 = [c3[i] for i in index]
new_table = table.select(index)
assert new_table.nrows() == 5
assert new_table.ncols() == 3
assert all(a == b for a, b in zip(new_table["col1"], cc1))
assert all(a == b for a, b in zip(new_table["col2"], cc2))
assert all(a == b for a, b in zip(new_table["col3"], cc3))
# Select some rows
index = flex.bool([True, True, False, False, False, True, False, False, True, True])
new_table = table.select(index)
assert new_table.nrows() == 5
assert new_table.ncols() == 3
assert all(a == b for a, b in zip(new_table["col1"], cc1))
assert all(a == b for a, b in zip(new_table["col2"], cc2))
assert all(a == b for a, b in zip(new_table["col3"], cc3))
def test_set_selected():
# The columns as lists
c1 = list(range(10))
c2 = list(range(10))
c3 = ["a", "b", "c", "d", "e", "f", "g", "i", "j", "k"]
# Create a table with some elements
table1 = flex.reflection_table()
table2 = flex.reflection_table()
table1["col1"] = flex.int(c1)
table2["col2"] = flex.double(c2)
table2["col3"] = flex.std_string(c3)
# Set selected columns
table1.set_selected(("col3", "col2"), table2)
assert table1.nrows() == 10
assert table1.ncols() == 3
assert all(a == b for a, b in zip(table1["col1"], c1))
assert all(a == b for a, b in zip(table1["col2"], c2))
assert all(a == b for a, b in zip(table1["col3"], c3))
# Set selected columns
table1 = flex.reflection_table()
table1["col1"] = flex.int(c1)
table1.set_selected(flex.std_string(["col3", "col2"]), table2)
assert table1.nrows() == 10
assert table1.ncols() == 3
assert all(a == b for a, b in zip(table1["col1"], c1))
assert all(a == b for a, b in zip(table1["col2"], c2))
assert all(a == b for a, b in zip(table1["col3"], c3))
cc1 = list(range(10, 15))
cc2 = list(range(10, 15))
cc3 = ["l", "m", "n", "o", "p"]
# Set selected rows
table2 = flex.reflection_table()
table2["col1"] = flex.int(cc1)
table2["col2"] = flex.double(cc2)
table2["col3"] = flex.std_string(cc3)
index = flex.size_t([0, 1, 5, 8, 9])
ccc1 = copy.deepcopy(c1)
ccc2 = copy.deepcopy(c2)
ccc3 = copy.deepcopy(c3)
for j, i in enumerate(index):
ccc1[i] = cc1[j]
ccc2[i] = cc2[j]
ccc3[i] = cc3[j]
table1.set_selected(index, table2)
assert all(a == b for a, b in zip(table1["col1"], ccc1))
assert all(a == b for a, b in zip(table1["col2"], ccc2))
assert all(a == b for a, b in zip(table1["col3"], ccc3))
# Set selected rows
table2 = flex.reflection_table()
table2["col1"] = flex.int(cc1)
table2["col2"] = flex.double(cc2)
table2["col3"] = flex.std_string(cc3)
table1.set_selected(index, table2)
assert all(a == b for a, b in zip(table1["col1"], ccc1))
assert all(a == b for a, b in zip(table1["col2"], ccc2))
assert all(a == b for a, b in zip(table1["col3"], ccc3))
def test_del_selected():
# The columns as lists
c1 = list(range(10))
c2 = list(range(10))
c3 = ["a", "b", "c", "d", "e", "f", "g", "i", "j", "k"]
# Create a table with some elements
table1 = flex.reflection_table()
table1["col1"] = flex.int(c1)
table1["col2"] = flex.double(c2)
table1["col3"] = flex.std_string(c3)
# Del selected columns
table1.del_selected(("col3", "col2"))
assert table1.nrows() == 10
assert table1.ncols() == 1
assert "col1" in table1
assert "col2" not in table1
assert "col3" not in table1
assert all(a == b for a, b in zip(table1["col1"], c1))
# Del selected columns
table1 = flex.reflection_table()
table1["col1"] = flex.int(c1)
table1["col2"] = flex.double(c2)
table1["col3"] = flex.std_string(c3)
table1.del_selected(flex.std_string(["col3", "col2"]))
assert table1.nrows() == 10
assert table1.ncols() == 1
assert "col1" in table1
assert "col2" not in table1
assert "col3" not in table1
assert all(a == b for a, b in zip(table1["col1"], c1))
# Del selected rows
table1 = flex.reflection_table()
table1["col1"] = flex.int(c1)
table1["col2"] = flex.double(c2)
table1["col3"] = flex.std_string(c3)
index = flex.size_t([0, 1, 5, 8, 9])
index2 = list(range(10))
for i in index:
index2.remove(i)
ccc1 = [c1[i] for i in index2]
ccc2 = [c2[i] for i in index2]
ccc3 = [c3[i] for i in index2]
table1.del_selected(index)
assert table1.nrows() == len(ccc1)
assert all(a == b for a, b in zip(table1["col1"], ccc1))
assert all(a == b for a, b in zip(table1["col2"], ccc2))
assert all(a == b for a, b in zip(table1["col3"], ccc3))
# Del selected rows
table1 = flex.reflection_table()
table1["col1"] = flex.int(c1)
table1["col2"] = flex.double(c2)
table1["col3"] = flex.std_string(c3)
table1.del_selected(index)
assert table1.nrows() == len(ccc1)
assert all(a == b for a, b in zip(table1["col1"], ccc1))
assert all(a == b for a, b in zip(table1["col2"], ccc2))
assert all(a == b for a, b in zip(table1["col3"], ccc3))
def test_sort():
table = flex.reflection_table()
table["a"] = flex.int([2, 4, 3, 1, 5, 6])
table["b"] = flex.vec2_double([(3, 2), (3, 1), (1, 3), (4, 5), (4, 3), (2, 0)])
table["c"] = flex.miller_index(
[(3, 2, 1), (3, 1, 1), (2, 4, 2), (2, 1, 1), (1, 1, 1), (1, 1, 2)]
)
table.sort("a")
assert list(table["a"]) == [1, 2, 3, 4, 5, 6]
table.sort("b")
assert list(table["b"]) == [(1, 3), (2, 0), (3, 1), (3, 2), (4, 3), (4, 5)]
table.sort("c")
assert list(table["c"]) == [
(1, 1, 1),
(1, 1, 2),
(2, 1, 1),
(2, 4, 2),
(3, 1, 1),
(3, 2, 1),
]
table.sort("c", order=(1, 2, 0))
assert list(table["c"]) == [
(1, 1, 1),
(2, 1, 1),
(3, 1, 1),
(1, 1, 2),
(3, 2, 1),
(2, 4, 2),
]
def test_flags():
# Create a table with flags all 0
table = flex.reflection_table()
table["flags"] = flex.size_t(5, 0)
# Get all the flags
f1 = table.get_flags(table.flags.predicted)
assert f1.count(True) == 0
# Set some flags
mask = flex.bool([True, True, False, False, True])
table.set_flags(mask, table.flags.predicted)
f1 = table.get_flags(table.flags.predicted)
assert f1.count(True) == 3
assert all(f11 == f22 for f11, f22 in zip(f1, mask))
f2 = table.get_flags(table.flags.predicted | table.flags.observed)
assert f2.count(True) == 0
# Unset the flags
mask = flex.bool(5, True)
table.unset_flags(mask, table.flags.predicted | table.flags.observed)
f1 = table.get_flags(table.flags.predicted)
assert f1.count(True) == 0
flags = table["flags"]
assert all(f == 0 for f in flags)
# Set multiple flags
mask = flex.bool([True, True, False, False, True])
table.set_flags(mask, table.flags.predicted | table.flags.observed)
f1 = table.get_flags(table.flags.predicted)
f2 = table.get_flags(table.flags.observed)
assert f1.count(True) == 3
assert f2.count(True) == 3
mask = flex.bool([False, True, True, True, False])
table.set_flags(mask, table.flags.integrated)
f1 = table.get_flags(table.flags.predicted)
f2 = table.get_flags(table.flags.observed)
f3 = table.get_flags(table.flags.integrated)
f4 = table.get_flags(table.flags.integrated | table.flags.predicted)
assert f1.count(True) == 3
assert f2.count(True) == 3
assert f3.count(True) == 3
assert f4.count(True) == 1
# Get where any are set
f1 = table.get_flags(table.flags.predicted, all=False)
f2 = table.get_flags(table.flags.observed, all=False)
f3 = table.get_flags(table.flags.integrated, all=False)
f4 = table.get_flags(table.flags.integrated | table.flags.predicted, all=False)
assert f1.count(True) == 3
assert f2.count(True) == 3
assert f3.count(True) == 3
assert f4.count(True) == 5
def test_serialize():
# The columns as lists
c1 = list(range(10))
c2 = list(range(10))
c3 = ["a", "b", "c", "d", "e", "f", "g", "i", "j", "k"]
# Create a table with some elements
table = flex.reflection_table()
table["col1"] = flex.int(c1)
table["col2"] = flex.double(c2)
table["col3"] = flex.std_string(c3)
# Pickle, then unpickle
obj = pickle.dumps(table)
new_table = pickle.loads(obj)
assert new_table.is_consistent()
assert new_table.nrows() == 10
assert new_table.ncols() == 3
assert all(a == b for a, b in zip(new_table["col1"], c1))
assert all(a == b for a, b in zip(new_table["col2"], c2))
assert all(a == b for a, b in zip(new_table["col3"], c3))
def test_copy():
# Create a table
table = flex.reflection_table([("col1", flex.int(range(10)))])
# Make a shallow copy of the table
shallow = copy.copy(table)
shallow["col2"] = flex.double(range(10))
assert table.ncols() == 2
assert table.is_consistent()
# Make a deep copy of the table
deep = copy.deepcopy(table)
deep["col3"] = flex.std_string(10)
assert table.ncols() == 2
assert deep.ncols() == 3
assert table.is_consistent()
assert deep.is_consistent()
table2 = table.copy()
table2["col3"] = flex.std_string(10)
assert table.ncols() == 2
assert table2.ncols() == 3
assert table.is_consistent()
assert table2.is_consistent()
def test_extract_shoeboxes():
from dials.algorithms.shoebox import MaskCode
random.seed(0)
reflections = flex.reflection_table()
reflections["panel"] = flex.size_t()
reflections["bbox"] = flex.int6()
npanels = 2
width = 1000
height = 1000
frame0 = 10
frame1 = 100
nrefl = 1000
for i in range(nrefl):
xs = random.randint(5, 10)
ys = random.randint(5, 10)
x0 = random.randint(-xs + 1, width - 1)
y0 = random.randint(-ys + 1, height - 1)
z0 = random.randint(frame0, frame1 - 1)
x1 = x0 + xs
y1 = y0 + ys
z1 = min([z0 + random.randint(1, 10), frame1])
assert x1 > x0
assert y1 > y0
assert z1 > z0
assert z0 >= frame0 and z1 <= frame1
bbox = (x0, x1, y0, y1, z0, z1)
reflections.append({"panel": random.randint(0, 1), "bbox": bbox})
reflections["shoebox"] = flex.shoebox(reflections["panel"], reflections["bbox"])
reflections["shoebox"].allocate()
class FakeImageSet:
def __init__(self):
self.data = flex.int(range(height * width))
self.data.reshape(flex.grid(height, width))
def get_array_range(self):
return (frame0, frame1)
def get_detector(self):
class FakeDetector:
def __len__(self):
return npanels
def __getitem__(self, index):
class FakePanel:
def get_trusted_range(self):
return (-1, 1000000)
return FakePanel()
return FakeDetector()
def __len__(self):
return frame1 - frame0
def __getitem__(self, index):
f = frame0 + index
return (self.data + f * 1, self.data + f * 2)
def get_corrected_data(self, index):
f = frame0 + index
return (self.data + f * 1, self.data + f * 2)
def get_mask(self, index):
image = self.get_corrected_data(index)
return tuple(im >= 0 for im in image)
imageset = FakeImageSet()
reflections.extract_shoeboxes(imageset)
for i in range(len(reflections)):
sbox = reflections[i]["shoebox"]
assert sbox.is_consistent()
mask = sbox.mask
data = sbox.data
bbox = sbox.bbox
panel = sbox.panel
x0, x1, y0, y1, z0, z1 = bbox
for z in range(z1 - z0):
for y in range(y1 - y0):
for x in range(x1 - x0):
v1 = data[z, y, x]
m1 = mask[z, y, x]
if (
x0 + x >= 0
and y0 + y >= 0
and x0 + x < width
and y0 + y < height
):
v2 = imageset.data[y + y0, x + x0] + (z + z0) * (panel + 1)
m2 = MaskCode.Valid
assert v1 == v2
assert m1 == m2
else:
assert v1 == 0
assert m1 == 0
def test_split_by_experiment_id():
r = flex.reflection_table()
r["id"] = flex.int()
for i in range(100):
r.append({"id": 0})
r.append({"id": 1})
r.append({"id": 2})
r.append({"id": 3})
r.append({"id": 5})
result = r.split_by_experiment_id()
assert len(result) == 5
for res, exp in zip(result, [0, 1, 2, 3, 5]):
assert len(res) == 100
assert res["id"].count(exp) == 100
# test the same but with experiment_identifiers() set - keep separate as
# function must work with and without experiment_identifiers() set
r.experiment_identifiers()[0] = "0"
r.experiment_identifiers()[1] = "1"
r.experiment_identifiers()[2] = "2"
r.experiment_identifiers()[3] = "3"
r.experiment_identifiers()[5] = "5"
result = r.split_by_experiment_id()
assert len(result) == 5
for res, exp in zip(result, [0, 1, 2, 3, 5]):
assert len(res) == 100
assert res["id"].count(exp) == 100
assert list(res.experiment_identifiers().keys()) == [exp]
assert list(res.experiment_identifiers().values()) == [str(exp)]
def test_split_indices_by_experiment_id():
r = flex.reflection_table()
r["id"] = flex.int()
for i in range(100):
r.append({"id": 0})
r.append({"id": 1})
r.append({"id": 2})
r.append({"id": 3})
r.append({"id": 5})
index_list = r.split_indices_by_experiment_id(6)
assert len(index_list) == 6
for index, exp, num in zip(
index_list, [0, 1, 2, 3, 4, 5], [100, 100, 100, 100, 0, 100]
):
assert len(index) == num
assert r.select(index)["id"].count(exp) == num
def test_split_partials():
r = flex.reflection_table()
r["value1"] = flex.double()
r["value2"] = flex.int()
r["value3"] = flex.double()
r["bbox"] = flex.int6()
expected = []
for i in range(100):
x0 = random.randint(0, 100)
x1 = x0 + random.randint(1, 10)
y0 = random.randint(0, 100)
y1 = y0 + random.randint(1, 10)
z0 = random.randint(0, 100)
z1 = z0 + random.randint(1, 10)
v1 = random.uniform(0, 100)
v2 = random.randint(0, 100)
v3 = random.uniform(0, 100)
r.append(
{"value1": v1, "value2": v2, "value3": v3, "bbox": (x0, x1, y0, y1, z0, z1)}
)
for z in range(z0, z1):
expected.append(
{
"value1": v1,
"value2": v2,
"value3": v3,
"bbox": (x0, x1, y0, y1, z, z + 1),
"partial_id": i,
}
)
r.split_partials()
assert len(r) == len(expected)
EPS = 1e-7
for r1, r2 in zip(r.rows(), expected):
assert abs(r1["value1"] - r2["value1"]) < EPS
assert r1["value2"] == r2["value2"]
assert abs(r1["value3"] - r2["value3"]) < EPS
assert r1["bbox"] == r2["bbox"]
assert r1["partial_id"] == r2["partial_id"]
def test_split_partials_with_shoebox():
from dials.model.data import Shoebox
r = flex.reflection_table()
r["value1"] = flex.double()
r["value2"] = flex.int()
r["value3"] = flex.double()
r["bbox"] = flex.int6()
r["panel"] = flex.size_t()
r["shoebox"] = flex.shoebox()
expected = []
for i in range(100):
x0 = random.randint(0, 100)
x1 = x0 + random.randint(1, 10)
y0 = random.randint(0, 100)
y1 = y0 + random.randint(1, 10)
z0 = random.randint(0, 100)
z1 = z0 + random.randint(1, 10)
v1 = random.uniform(0, 100)
v2 = random.randint(0, 100)
v3 = random.uniform(0, 100)
sbox = Shoebox(0, (x0, x1, y0, y1, z0, z1))
sbox.allocate()
assert sbox.is_consistent()
w = x1 - x0
h = y1 - y0
for z in range(z0, z1):
for y in range(y0, y1):
for x in range(x0, x1):
sbox.data[z - z0, y - y0, x - x0] = x + y * w + z * w * h
r.append(
{
"value1": v1,
"value2": v2,
"value3": v3,
"bbox": (x0, x1, y0, y1, z0, z1),
"panel": 0,
"shoebox": sbox,
}
)
for z in range(z0, z1):
sbox = Shoebox(0, (x0, x1, y0, y1, z, z + 1))
sbox.allocate()
assert sbox.is_consistent()
w = x1 - x0
h = y1 - y0
for y in range(y0, y1):
for x in range(x0, x1):
sbox.data[0, y - y0, x - x0] = x + y * w + z * w * h
expected.append(
{
"value1": v1,
"value2": v2,
"value3": v3,
"bbox": (x0, x1, y0, y1, z, z + 1),
"partial_id": i,
"panel": 0,
"shoebox": sbox,
}
)
r.split_partials_with_shoebox()
assert len(r) == len(expected)
EPS = 1e-7
for r1, r2 in zip(r.rows(), expected):
assert abs(r1["value1"] - r2["value1"]) < EPS
assert r1["value2"] == r2["value2"]
assert abs(r1["value3"] - r2["value3"]) < EPS
assert r1["bbox"] == r2["bbox"]
assert r1["partial_id"] == r2["partial_id"]
assert r1["panel"] == r2["panel"]
assert (
r1["shoebox"]
.data.as_double()
.as_1d()
.all_approx_equal(r2["shoebox"].data.as_double().as_1d())
)
def test_find_overlapping():
N = 10000
r = flex.reflection_table(N)
r["bbox"] = flex.int6(N)
r["panel"] = flex.size_t(N)
r["id"] = flex.int(N)
r["imageset_id"] = flex.int(N)
for i in range(N):
x0 = random.randint(0, 100)
x1 = random.randint(1, 10) + x0
y0 = random.randint(0, 100)
y1 = random.randint(1, 10) + y0
z0 = random.randint(0, 100)
z1 = random.randint(1, 10) + z0
panel = random.randint(0, 2)
pid = random.randint(0, 2)
r["bbox"][i] = (x0, x1, y0, y1, z0, z1)
r["panel"][i] = panel
r["id"][i] = pid
r["imageset_id"][i] = pid
def is_overlap(b0, b1, border):
b0 = (
b0[0] - border,
b0[1] + border,
b0[2] - border,
b0[3] + border,
b0[4] - border,
b0[5] + border,
)
b1 = (
b1[0] - border,
b1[1] + border,
b1[2] - border,
b1[3] + border,
b1[4] - border,
b1[5] + border,
)
if not (
b1[0] > b0[1]
or b1[1] < b0[0]
or b1[2] > b0[3]
or b1[3] < b0[2]
or b1[4] > b0[5]
or b1[5] < b0[4]
):
return True
return False
for i in [0, 2, 5]:
overlaps = r.find_overlaps(border=i)
for item in overlaps.edges():
i0 = overlaps.source(item)
i1 = overlaps.target(item)
r0 = r[i0]
r1 = r[i1]
p0 = r0["panel"]
p1 = r1["panel"]
b0 = r0["bbox"]
b1 = r1["bbox"]
j0 = r0["imageset_id"]
j1 = r1["imageset_id"]
assert j0 == j1
assert p0 == p1
assert is_overlap(b0, b1, i)
def test_to_from_msgpack(tmpdir):
from dials.model.data import Shoebox
def gen_shoebox():
shoebox = Shoebox(0, (0, 4, 0, 3, 0, 1))
shoebox.allocate()
for k in range(1):
for j in range(3):
for i in range(4):
shoebox.data[k, j, i] = i + j + k + 0.1
shoebox.mask[k, j, i] = i % 2
shoebox.background[k, j, i] = i * j + 0.2
return shoebox
def compare(a, b):
assert a.is_consistent()
assert b.is_consistent()
assert a.panel == b.panel
assert a.bbox == b.bbox
for aa, bb in zip(a.data, b.data):
if abs(aa - bb) > 1e-9:
return False
for aa, bb in zip(a.background, b.background):
if abs(aa - bb) > 1e-9:
return False
for aa, bb in zip(a.mask, b.mask):
if aa != bb:
return False
return True
# The columns as lists
c1 = list(range(10))
c2 = list(range(10))
c3 = ["a", "b", "c", "d", "e", "f", "g", "i", "j", "k"]
c4 = [True, False, True, False, True] * 2
c5 = list(range(10))
c6 = [(i + 1, i + 2) for i in range(10)]
c7 = [(i + 1, i + 2, i + 3) for i in range(10)]
c8 = [tuple(i + j for j in range(9)) for i in range(10)]
c9 = [tuple(i + j for j in range(6)) for i in range(10)]
c10 = [(i + 1, i + 2, i + 3) for i in range(10)]
c11 = [gen_shoebox() for i in range(10)]
# Create a table with some elements
table = flex.reflection_table()
table["col1"] = flex.int(c1)
table["col2"] = flex.double(c2)
table["col3"] = flex.std_string(c3)
table["col4"] = flex.bool(c4)
table["col5"] = flex.size_t(c5)
table["col6"] = flex.vec2_double(c6)
table["col7"] = flex.vec3_double(c7)
table["col8"] = flex.mat3_double(c8)
table["col9"] = flex.int6(c9)
table["col10"] = flex.miller_index(c10)
table["col11"] = flex.shoebox(c11)
obj = table.as_msgpack()
new_table = flex.reflection_table.from_msgpack(obj)
assert new_table.is_consistent()
assert new_table.nrows() == 10
assert new_table.ncols() == 11
assert all(tuple(a == b for a, b in zip(new_table["col1"], c1)))
assert all(tuple(a == b for a, b in zip(new_table["col2"], c2)))
assert all(tuple(a == b for a, b in zip(new_table["col3"], c3)))
assert all(tuple(a == b for a, b in zip(new_table["col4"], c4)))
assert all(tuple(a == b for a, b in zip(new_table["col5"], c5)))
assert all(tuple(a == b for a, b in zip(new_table["col6"], c6)))
assert all(tuple(a == b for a, b in zip(new_table["col7"], c7)))
assert all(tuple(a == b for a, b in zip(new_table["col8"], c8)))
assert all(tuple(a == b for a, b in zip(new_table["col9"], c9)))
assert all(tuple(a == b for a, b in zip(new_table["col10"], c10)))
assert all(tuple(compare(a, b) for a, b in zip(new_table["col11"], c11)))
table.as_msgpack_file(tmpdir.join("reflections.mpack").strpath)
new_table = flex.reflection_table.from_msgpack_file(
tmpdir.join("reflections.mpack").strpath
)
assert new_table.is_consistent()
assert new_table.nrows() == 10
assert new_table.ncols() == 11
assert all(tuple(a == b for a, b in zip(new_table["col1"], c1)))
assert all(tuple(a == b for a, b in zip(new_table["col2"], c2)))
assert all(tuple(a == b for a, b in zip(new_table["col3"], c3)))
assert all(tuple(a == b for a, b in zip(new_table["col4"], c4)))
assert all(tuple(a == b for a, b in zip(new_table["col5"], c5)))
assert all(tuple(a == b for a, b in zip(new_table["col6"], c6)))
assert all(tuple(a == b for a, b in zip(new_table["col7"], c7)))
assert all(tuple(a == b for a, b in zip(new_table["col8"], c8)))
assert all(tuple(a == b for a, b in zip(new_table["col9"], c9)))
assert all(tuple(a == b for a, b in zip(new_table["col10"], c10)))
assert all(tuple(compare(a, b) for a, b in zip(new_table["col11"], c11)))
def test_experiment_identifiers():
from dxtbx.model import Experiment, ExperimentList
table = flex.reflection_table()
table["id"] = flex.int([0, 1, 2, 3])
table.assert_experiment_identifiers_are_consistent()
identifiers = table.experiment_identifiers()
identifiers[0] = "abcd"
identifiers[1] = "efgh"
identifiers[2] = "ijkl"
identifiers[3] = "mnop"
assert identifiers[0] == "abcd"
assert identifiers[1] == "efgh"
assert identifiers[2] == "ijkl"
assert identifiers[3] == "mnop"
for k, v in identifiers:
if k == 0:
assert v == "abcd"
if k == 1:
assert v == "efgh"
if k == 2:
assert v == "ijkl"
if k == 3:
assert v == "mnop"
assert tuple(identifiers.keys()) == (0, 1, 2, 3)
assert tuple(identifiers.values()) == ("abcd", "efgh", "ijkl", "mnop")
table.assert_experiment_identifiers_are_consistent()
experiments = ExperimentList()
experiments.append(Experiment(identifier="abcd"))
experiments.append(Experiment(identifier="efgh"))
experiments.append(Experiment(identifier="ijkl"))
experiments.append(Experiment(identifier="mnop"))
table.assert_experiment_identifiers_are_consistent()
experiments = ExperimentList()
experiments.append(Experiment(identifier="abcd"))
experiments.append(Experiment(identifier="efgh"))
experiments.append(Experiment(identifier="ijkl"))
experiments.append(Experiment(identifier="mnop"))
experiments[3].identifier = "ijkl"
with pytest.raises(AssertionError):
table.assert_experiment_identifiers_are_consistent(experiments)
experiments[2].identifier = "mnop"
table.assert_experiment_identifiers_are_consistent(experiments)
identifiers = table.experiment_identifiers()
identifiers[0] = "abcd"
identifiers[1] = "efgh"
identifiers[2] = "ijkl"
identifiers[3] = "ijkl"
with pytest.raises(AssertionError):
table.assert_experiment_identifiers_are_consistent()
identifiers[3] = "mnop"
pickled = pickle.dumps(table)
table2 = pickle.loads(pickled)
id1 = table.experiment_identifiers()
id2 = table2.experiment_identifiers()
for i in id1.keys():
assert id1[i] == id2[i]
other_table = flex.reflection_table()
other_table["id"] = flex.int([3, 4])
table.assert_experiment_identifiers_are_consistent()
packed = table.as_msgpack()
table2 = table.from_msgpack(packed)
id1 = table.experiment_identifiers()
id2 = table2.experiment_identifiers()
for i in id1.keys():
assert id1[i] == id2[i]
other_table = flex.reflection_table()
other_table["id"] = flex.int([3, 4])
table.assert_experiment_identifiers_are_consistent()
identifiers = other_table.experiment_identifiers()
identifiers[3] = "mnop"
identifiers[4] = "qrst"
table.extend(other_table)
assert len(table.experiment_identifiers()) == 5
assert table.experiment_identifiers()[0] == "abcd"
assert table.experiment_identifiers()[1] == "efgh"
assert table.experiment_identifiers()[2] == "ijkl"
assert table.experiment_identifiers()[3] == "mnop"
assert table.experiment_identifiers()[4] == "qrst"
assert len(table.experiment_identifiers()) == 5
assert table.experiment_identifiers()[0] == "abcd"
assert table.experiment_identifiers()[1] == "efgh"
assert table.experiment_identifiers()[2] == "ijkl"
assert table.experiment_identifiers()[3] == "mnop"
assert table.experiment_identifiers()[4] == "qrst"
def test_select_remove_on_experiment_identifiers():
table = flex.reflection_table()
table["id"] = flex.int([0, 1, 2, 3])
experiments = ExperimentList()
experiments.append(Experiment(identifier="abcd"))
experiments.append(Experiment(identifier="efgh"))
experiments.append(Experiment(identifier="ijkl"))
experiments.append(Experiment(identifier="mnop"))
table.experiment_identifiers()[0] = "abcd"
table.experiment_identifiers()[1] = "efgh"
table.experiment_identifiers()[2] = "ijkl"
table.experiment_identifiers()[3] = "mnop"
table.assert_experiment_identifiers_are_consistent(experiments)
table = table.remove_on_experiment_identifiers(["efgh"])
del experiments[1]
table.assert_experiment_identifiers_are_consistent(experiments)
assert list(table.experiment_identifiers().keys()) == [0, 2, 3]
assert list(table.experiment_identifiers().values()) == ["abcd", "ijkl", "mnop"]
table = table.select_on_experiment_identifiers(["abcd", "mnop"])
del experiments[1] # now ijkl
table.assert_experiment_identifiers_are_consistent(experiments)
assert list(table.experiment_identifiers().keys()) == [0, 3]
assert list(table.experiment_identifiers().values()) == ["abcd", "mnop"]
# reset 'id' column such that they are numbered 0 .. n-1
table.reset_ids()
table.assert_experiment_identifiers_are_consistent(experiments)
assert list(table.experiment_identifiers().keys()) == [0, 1]
assert list(table.experiment_identifiers().values()) == ["abcd", "mnop"]
# test that the function doesn't fail if no identifiers set
table1 = copy.deepcopy(table)
for k in table1.experiment_identifiers().keys():
del table1.experiment_identifiers()[k]
table1.reset_ids()
assert list(table1.experiment_identifiers().keys()) == []
# Test exception is raised if bad choice
with pytest.raises(KeyError):
table.remove_on_experiment_identifiers(["efgh"])
with pytest.raises(KeyError):
table.select_on_experiment_identifiers(["efgh"])
table = flex.reflection_table()
table["id"] = flex.int([0, 1, 2, 3])
# Test exception is raised if identifiers map not set
with pytest.raises(KeyError):
table.remove_on_experiment_identifiers(["efgh"])
with pytest.raises(KeyError):
table.select_on_experiment_identifiers(["abcd", "mnop"])
def test_as_miller_array():
table = flex.reflection_table()
table["intensity.1.value"] = flex.double([1.0, 2.0, 3.0])
table["intensity.1.variance"] = flex.double([0.25, 1.0, 4.0])
table["miller_index"] = flex.miller_index([(1, 0, 0), (2, 0, 0), (3, 0, 0)])
crystal = Crystal(
real_space_a=(10, 0, 0),
real_space_b=(0, 11, 0),
real_space_c=(0, 0, 12),
space_group=sgtbx.space_group_info("P 222").group(),
)
experiment = Experiment(crystal=crystal)
iobs = table.as_miller_array(experiment, intensity="1")
assert list(iobs.data()) == list(table["intensity.1.value"])
assert list(iobs.sigmas()) == list(flex.sqrt(table["intensity.1.variance"]))
with pytest.raises(KeyError):
_ = table.as_miller_array(experiment, intensity="2")
table["intensity.2.value"] = flex.double([1.0, 2.0, 3.0])
with pytest.raises(KeyError):
_ = table.as_miller_array(experiment, intensity="2")
def test_map_centroids_to_reciprocal_space(dials_regression):
data_dir = os.path.join(dials_regression, "indexing_test_data", "i04_weak_data")
pickle_path = os.path.join(data_dir, "full.pickle")
expts_path = os.path.join(data_dir, "experiments_import.json")
refl = flex.reflection_table.from_file(pickle_path)
expts = load.experiment_list(expts_path, check_format=False)
# check mm values not in
assert "xyzobs.mm.value" not in refl
refl.centroid_px_to_mm(expts)
for k in ("xyzobs.mm.value", "xyzobs.mm.variance"):
assert k in refl
assert refl["xyzobs.mm.value"][0] == pytest.approx(
(199.43400000000003, 11.908133333333334, 1.4324789835743459)
)
assert refl["xyzobs.mm.variance"][0] == pytest.approx(
(0.0035346345381526106, 0.0029881028112449803, 5.711576621000785e-07)
)
refl.map_centroids_to_reciprocal_space(expts)
for k in ("s1", "rlp"):
assert k in refl
assert refl["s1"][0] == pytest.approx(
(-0.035321308540942425, 0.6030297672949761, -0.8272574664632307)
)
assert refl["rlp"][0] == pytest.approx(
(-0.035321308540942425, 0.27833194706770875, -0.5700990597173606)
)
# select only those centroids on the first image
sel = refl["xyzobs.px.value"].parts()[2] < 1
refl1 = refl.select(sel)
del refl1["xyzobs.mm.value"], refl1["xyzobs.mm.variance"], refl1["s1"], refl1["rlp"]
# pretend this is a still and hence no scan or goniometer
expts[0].goniometer = None
expts[0].scan = None
refl1.centroid_px_to_mm(expts)
refl1.map_centroids_to_reciprocal_space(expts)
assert refl1["s1"][0] == pytest.approx(
(-0.035321308540942425, 0.6030297672949761, -0.8272574664632307)
)
# numbers for rlp are different to above since for the goniometer case the
# starting angle of the first image is non-zero, so the rlps are rotated back
# to zero degrees
assert refl1["rlp"][0] == pytest.approx(
(-0.035321308540942425, 0.6030297672949761, 0.19707031842793443)
)
def test_calculate_entering_flags(dials_regression):
data_dir = os.path.join(dials_regression, "indexing_test_data", "i04_weak_data")
pickle_path = os.path.join(data_dir, "full.pickle")
experiments_path = os.path.join(data_dir, "experiments_import.json")
refl = flex.reflection_table.from_pickle(pickle_path)
experiments = load.experiment_list(experiments_path, check_format=False)
refl.centroid_px_to_mm(experiments)
refl.map_centroids_to_reciprocal_space(experiments)
refl.calculate_entering_flags(experiments)
assert "entering" in refl
flags = refl["entering"]
assert flags.count(True) == 58283
assert flags.count(False) == 57799
def test_random_split():
"""Test the reflection_table.random_split() method.
A random seed is set, so the result is reproducible.
"""
flex.set_random_seed(0)
table = flex.reflection_table()
table["id"] = flex.int(range(0, 10))
# first test splitting into 2 or 3 tables.
split_tables = table.random_split(2)
expected = [[5, 7, 3, 0, 8], [2, 9, 6, 1, 4]]
for t, e in zip(split_tables, expected):
assert list(t["id"]) == e
split_tables = table.random_split(3)
expected = [[5, 7, 0], [4, 3, 8], [1, 9, 2, 6]]
for t, e in zip(split_tables, expected):
assert list(t["id"]) == e
# test that a float is handled
split_tables = table.random_split(1.0)
assert split_tables[0] is table
# values below 1 should just return the table
split_tables = table.random_split(0)
assert split_tables[0] is table
# if n > len(table), the table should split into n=len(table) tables with
# one entry.
split_tables = table.random_split(20)
assert len(split_tables) == 10
for t in split_tables:
assert len(t) == 1
def test_match_basic():
n = 100
s = 10
def r():
return random.random()
xyz = flex.vec3_double()
for j in range(n):
xyz.append((r() * s, r() * s, r() * s * 20))
order = list(range(n))
random.shuffle(order)
xyz2 = xyz.select(flex.size_t(order))
a = flex.reflection_table()
b = flex.reflection_table()
a["xyz"] = xyz
b["xyz"] = xyz2
mm, nn, distance = a.match(b, key="xyz", scale=(1.0, 1.0, 0.05))
a_ = a.select(mm)
b_ = b.select(nn)
for _a, _b in zip(a_["xyz"], b_["xyz"]):
assert _a == pytest.approx(_b)
def test_match_mismatched_sizes():
n = 100
s = 10
def r():
return random.random()
xyz = flex.vec3_double()
for j in range(n):
xyz.append((r() * s, r() * s, r() * s * 20))
order = list(range(n))
random.shuffle(order)
xyz2 = xyz.select(flex.size_t(order))
a = flex.reflection_table()
b = flex.reflection_table()
a["xyz"] = xyz[: n // 2]
b["xyz"] = xyz2
mm, nn, distance = a.match(b, key="xyz", scale=(1.0, 1.0, 0.05))
a_ = a.select(mm)
b_ = b.select(nn)
for _a, _b in zip(a_["xyz"], b_["xyz"]):
assert _a == pytest.approx(_b)
def test_match_by_hkle():
nn = 10
h = flex.int([n % nn for n in range(nn)])
k = flex.int([(n + 2) % nn for n in range(nn)])
l = flex.int([(n + 4) % nn for n in range(nn)])
e = flex.int([n % 2 for n in range(nn)])
hkl = flex.miller_index(h, k, l)
t0 = flex.reflection_table()
t0["miller_index"] = hkl
t0["entering"] = e
i = list(range(nn))
random.shuffle(i)
t1 = t0.select(flex.size_t(i))
# because t0.match_by_hkle(t1) will give the _inverse_ to i
n0, n1 = t1.match_by_hkle(t0)
assert list(n0) == list(range(nn))
assert list(n1) == i
def test_concat():
table1 = flex.reflection_table()
table2 = flex.reflection_table()
table1["id"] = flex.size_t([0, 0, 1, 1])
table2["id"] = flex.size_t([0, 0, 1, 1])
ids1 = table1.experiment_identifiers()
ids2 = table2.experiment_identifiers()
ids1[0] = "a"
ids1[1] = "b"
ids2[0] = "c"
ids2[1] = "d"
table1 = flex.reflection_table.concat([table1, table2])
assert list(table1["id"]) == [0, 0, 1, 1, 2, 2, 3, 3]
assert list(ids1.keys()) == [0, 1, 2, 3]
assert list(ids1.values()) == ["a", "b", "c", "d"]
| dials/dials | tests/array_family/test_reflection_table.py | Python | bsd-3-clause | 50,591 | [
"CRYSTAL"
] | d6cbd2cb211a578a1489da2422a37e881fa6eedeb33e7fdf05b8eedb7b554841 |
import logging
import os
import subprocess
import sys
from galaxy import eggs
eggs.require( "decorator" )
eggs.require( "Tempita" )
eggs.require( "SQLAlchemy" )
eggs.require( "sqlalchemy_migrate" )
from migrate.versioning import repository
from migrate.versioning import schema
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy import Table
from galaxy.util.odict import odict
from galaxy.model.orm import dialect_to_egg
from tool_shed.util import common_util
log = logging.getLogger( __name__ )
# Path relative to galaxy
migrate_repository_directory = os.path.dirname( __file__ ).replace( os.getcwd() + os.path.sep, '', 1 )
migrate_repository = repository.Repository( migrate_repository_directory )
def verify_tools( app, url, galaxy_config_file, engine_options={} ):
# Check the value in the migrate_tools.version database table column to verify that the number is in
# sync with the number of version scripts in ~/lib/galaxy/tools/migrate/versions.
dialect = ( url.split( ':', 1 ) )[0]
try:
egg = dialect_to_egg[ dialect ]
try:
eggs.require( egg )
log.debug( "%s egg successfully loaded for %s dialect" % ( egg, dialect ) )
except:
# If the module is in the path elsewhere (i.e. non-egg), it'll still load.
log.warning( "%s egg not found, but an attempt will be made to use %s anyway" % ( egg, dialect ) )
except KeyError:
# Let this go, it could possibly work with db's we don't support
log.error( "database_connection contains an unknown SQLAlchemy database dialect: %s" % dialect )
# Create engine and metadata
engine = create_engine( url, **engine_options )
meta = MetaData( bind=engine )
# The migrate_tools table was created in database version script 0092_add_migrate_tools_table.py.
version_table = Table( "migrate_tools", meta, autoload=True )
# Verify that the code and the database are in sync.
db_schema = schema.ControlledSchema( engine, migrate_repository )
latest_tool_migration_script_number = migrate_repository.versions.latest
if latest_tool_migration_script_number != db_schema.version:
# The default behavior is that the tool shed is down.
tool_shed_accessible = False
if app.new_installation:
# New installations will not be missing tools, so we don't need to worry about them.
missing_tool_configs_dict = odict()
else:
tool_panel_configs = common_util.get_non_shed_tool_panel_configs( app )
if tool_panel_configs:
# The missing_tool_configs_dict contents are something like:
# {'emboss_antigenic.xml': [('emboss', '5.0.0', 'package', '\nreadme blah blah blah\n')]}
tool_shed_accessible, missing_tool_configs_dict = common_util.check_for_missing_tools( app,
tool_panel_configs,
latest_tool_migration_script_number )
else:
# It doesn't matter if the tool shed is accessible since there are no migrated tools defined in the local Galaxy instance, but
# we have to set the value of tool_shed_accessible to True so that the value of migrate_tools.version can be correctly set in
# the database.
tool_shed_accessible = True
missing_tool_configs_dict = odict()
have_tool_dependencies = False
for k, v in missing_tool_configs_dict.items():
if v:
have_tool_dependencies = True
break
config_arg = ''
if os.path.abspath( os.path.join( os.getcwd(), 'galaxy.ini' ) ) != galaxy_config_file:
config_arg = ' -c %s' % galaxy_config_file.replace( os.path.abspath( os.getcwd() ), '.' )
if not app.config.running_functional_tests:
if tool_shed_accessible:
# Automatically update the value of the migrate_tools.version database table column.
cmd = 'sh manage_tools.sh%s upgrade' % config_arg
proc = subprocess.Popen( args=cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
return_code = proc.wait()
output = proc.stdout.read( 32768 )
if return_code != 0:
raise Exception( "Error attempting to update the value of migrate_tools.version: %s" % output )
elif missing_tool_configs_dict:
if len( tool_panel_configs ) == 1:
plural = ''
tool_panel_config_file_names = tool_panel_configs[ 0 ]
else:
plural = 's'
tool_panel_config_file_names = ', '.join( tool_panel_configs )
msg = "\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
msg += "\n\nThe list of files at the end of this message refers to tools that are configured to load into the tool panel for\n"
msg += "this Galaxy instance, but have been removed from the Galaxy distribution. These tools and their dependencies can be\n"
msg += "automatically installed from the Galaxy tool shed at http://toolshed.g2.bx.psu.edu.\n\n"
msg += "To skip this process, attempt to start your Galaxy server again (e.g., sh run.sh or whatever you use). If you do this,\n"
msg += "be aware that these tools will no longer be available in your Galaxy tool panel, and entries for each of them should\n"
msg += "be removed from your file%s named %s.\n\n" % ( plural, tool_panel_config_file_names )
msg += "CRITICAL NOTE IF YOU PLAN TO INSTALL\n"
msg += "The location in which the tool repositories will be installed is the value of the 'tool_path' attribute in the <tool>\n"
msg += 'tag of the file named ./migrated_tool_conf.xml (i.e., <toolbox tool_path="../shed_tools">). The default location\n'
msg += "setting is '../shed_tools', which may be problematic for some cluster environments, so make sure to change it before\n"
msg += "you execute the installation process if appropriate. The configured location must be outside of the Galaxy installation\n"
msg += "directory or it must be in a sub-directory protected by a properly configured .hgignore file if the directory is within\n"
msg += "the Galaxy installation directory hierarchy. This is because tool shed repositories will be installed using mercurial's\n"
msg += "clone feature, which creates .hg directories and associated mercurial repository files. Not having .hgignore properly\n"
msg += "configured could result in undesired behavior when modifying or updating your local Galaxy instance or the tool shed\n"
msg += "repositories if they are in directories that pose conflicts. See mercurial's .hgignore documentation at the following\n"
msg += "URL for details.\n\nhttp://mercurial.selenic.com/wiki/.hgignore\n\n"
if have_tool_dependencies:
msg += "The following tool dependencies can also optionally be installed (see the option flag in the command below). If you\n"
msg += "choose to install them (recommended), they will be installed within the location specified by the 'tool_dependency_dir'\n"
msg += "setting in your main Galaxy configuration file (e.g., uninverse_wsgi.ini).\n"
processed_tool_dependencies = []
for missing_tool_config, tool_dependencies in missing_tool_configs_dict.items():
for tool_dependencies_tup in missing_tool_configs_dict[ missing_tool_config ][ 'tool_dependencies' ]:
if tool_dependencies_tup not in processed_tool_dependencies:
msg += "------------------------------------\n"
msg += "Tool Dependency\n"
msg += "------------------------------------\n"
msg += "Name: %s, Version: %s, Type: %s\n" % ( tool_dependencies_tup[ 0 ],
tool_dependencies_tup[ 1 ],
tool_dependencies_tup[ 2 ] )
if len( tool_dependencies_tup ) >= 4:
msg += "Requirements and installation information:\n"
msg += "%s\n" % tool_dependencies_tup[ 3 ]
else:
msg += "\n"
msg += "------------------------------------\n"
processed_tool_dependencies.append( tool_dependencies_tup )
msg += "\n"
msg += "%s" % output.replace( 'done', '' )
msg += "vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n"
msg += "sh ./scripts/migrate_tools/%04d_tools.sh\n" % latest_tool_migration_script_number
msg += "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n"
if have_tool_dependencies:
msg += "The tool dependencies listed above will be installed along with the repositories if you add the 'install_dependencies'\n"
msg += "option to the above command like this:\n\n"
msg += "vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n"
msg += "sh ./scripts/migrate_tools/%04d_tools.sh install_dependencies\n" % latest_tool_migration_script_number
msg += "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n"
msg += "Tool dependencies can be installed after the repositories have been installed as well.\n\n"
msg += "After the installation process finishes, you can start your Galaxy server. As part of this installation process,\n"
msg += "entries for each of the following tool config files will be added to the file named ./migrated_tool_conf.xml, so these\n"
msg += "tools will continue to be loaded into your tool panel. Because of this, existing entries for these tools have been\n"
msg += "removed from your file%s named %s.\n\n" % ( plural, tool_panel_config_file_names )
for missing_tool_config, tool_dependencies in missing_tool_configs_dict.items():
msg += "%s\n" % missing_tool_config
msg += "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"
raise Exception( msg )
else:
log.debug( "The main Galaxy tool shed is not currently available, so skipped tool migration %s until next server startup" % db_schema.version )
else:
log.info( "At migrate_tools version %d" % db_schema.version )
def migrate_to_current_version( engine, schema ):
# Changes to get to current version.
changeset = schema.changeset( None )
for ver, change in changeset:
nextver = ver + changeset.step
log.info( 'Installing tools from version %s -> %s... ' % ( ver, nextver ) )
old_stdout = sys.stdout
class FakeStdout( object ):
def __init__( self ):
self.buffer = []
def write( self, s ):
self.buffer.append( s )
def flush( self ):
pass
sys.stdout = FakeStdout()
try:
schema.runchange( ver, change, changeset.step )
finally:
for message in "".join( sys.stdout.buffer ).split( "\n" ):
log.info( message )
sys.stdout = old_stdout
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/tool_shed/galaxy_install/migrate/check.py | Python | gpl-3.0 | 12,303 | [
"Galaxy"
] | 42a3d13fb7489866cba9a0b63028f1db4f49336ca611e8fd5c28c38a04c0a5ef |
import pytest
import re
from capybara.exceptions import ExpectationNotMet
class TestAssertCurrentPath:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_js")
def test_does_not_raise_if_the_page_has_the_given_current_path(self, session):
session.assert_current_path("/with_js")
def test_allows_regexp_matches(self, session):
session.assert_current_path(re.compile(r"w[a-z]{3}_js"))
def test_handles_non_escaped_query_options(self, session):
session.click_link("Non-escaped query options")
session.assert_current_path("/with_html?options[]=things")
def test_handles_escaped_query_options(self, session):
session.click_link("Escaped query options")
session.assert_current_path("/with_html?options%5B%5D=things")
@pytest.mark.requires("js")
def test_waits_for_current_path(self, session):
session.click_link("Change page")
session.assert_current_path("/with_html")
def test_raises_an_error_if_the_page_does_not_have_the_given_current_path(self, session):
with pytest.raises(ExpectationNotMet) as excinfo:
session.assert_current_path("/with_html")
assert "expected '/with_js' to equal '/with_html'" in str(excinfo.value)
def test_checks_query_options(self, session):
session.visit("/with_js?test=test")
session.assert_current_path("/with_js?test=test")
def test_compares_the_full_url(self, session):
session.assert_current_path(re.compile(r"\Ahttp://[^/]*/with_js\Z"), url=True)
def test_ignores_the_query(self, session):
session.visit("/with_js?test=test")
session.assert_current_path("/with_js?test=test")
session.assert_current_path("/with_js", only_path=True)
class TestAssertNoCurrentPath:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_js")
def test_raises_if_the_page_has_the_given_current_path(self, session):
with pytest.raises(ExpectationNotMet):
session.assert_no_current_path("/with_js")
def test_allows_regex_matches(self, session):
session.assert_no_current_path(re.compile(r"monkey"))
@pytest.mark.requires("js")
def test_waits_for_current_path_to_disappear(self, session):
session.click_link("Change page")
session.assert_no_current_path("/with_js")
def test_does_not_raise_if_the_page_does_not_have_the_given_current_path(self, session):
session.assert_no_current_path("/with_html")
| elliterate/capybara.py | capybara/tests/session/test_assert_current_path.py | Python | mit | 2,570 | [
"VisIt"
] | eb6a66f8e5776081b7bc62e620fd701a86ba72891edf382f07288fdaf797280a |
import IPython
if IPython.release.version<'0.11':
raise ImportError,'this module requires at least v0.11 of IPython'
from rdkit.Chem import rdchem
from rdkit.Chem import Draw
from cStringIO import StringIO
import copy
import numpy
import Image
molSize=(450,150)
highlightSubstructs=True
kekulizeStructures=True
def _toPNG(mol):
if hasattr(mol,'__sssAtoms'):
highlightAtoms=mol.__sssAtoms
else:
highlightAtoms=[]
try:
mol.GetAtomWithIdx(0).GetExplicitValence()
except RuntimeError:
mol.UpdatePropertyCache(False)
mc = copy.deepcopy(mol)
try:
img = Draw.MolToImage(mc,size=molSize,kekulize=kekulizeStructures,
highlightAtoms=highlightAtoms)
except ValueError: # <- can happen on a kekulization failure
mc = copy.deepcopy(mol)
img = Draw.MolToImage(mc,size=molSize,kekulize=False,
highlightAtoms=highlightAtoms)
sio = StringIO()
img.save(sio,format='PNG')
return sio.getvalue()
def _GetSubstructMatch(mol,query,**kwargs):
res = mol.__GetSubstructMatch(query,**kwargs)
if highlightSubstructs:
mol.__sssAtoms=list(res)
else:
mol.__sssAtoms=[]
return res
def _GetSubstructMatches(mol,query,**kwargs):
res = mol.__GetSubstructMatches(query,**kwargs)
mol.__sssAtoms=[]
if highlightSubstructs:
for entry in res:
mol.__sssAtoms.extend(list(entry))
return res
# code for displaying PIL images directly,
def display_pil_image(img):
"""displayhook function for PIL Images, rendered as PNG"""
sio = StringIO()
img.save(sio,format='PNG')
return sio.getvalue()
def InstallIPythonRenderer():
rdchem.Mol._repr_png_=_toPNG
if not hasattr(rdchem.Mol,'__GetSubstructMatch'):
rdchem.Mol.__GetSubstructMatch=rdchem.Mol.GetSubstructMatch
rdchem.Mol.GetSubstructMatch=_GetSubstructMatch
if not hasattr(rdchem.Mol,'__GetSubstructMatches'):
rdchem.Mol.__GetSubstructMatches=rdchem.Mol.GetSubstructMatches
rdchem.Mol.GetSubstructMatches=_GetSubstructMatches
Image.Image._repr_png_=display_pil_image
InstallIPythonRenderer()
def UninstallIPythonRenderer():
del rdchem.Mol._repr_png_
if hasattr(rdchem.Mol,'__GetSubstructMatch'):
rdchem.Mol.GetSubstructMatch=rdchem.Mol.__GetSubstructMatch
del rdchem.Mol.__GetSubstructMatch
if hasattr(rdchem.Mol,'__GetSubstructMatches'):
rdchem.Mol.GetSubstructMatches=rdchem.Mol.__GetSubstructMatches
del rdchem.Mol.__GetSubstructMatches
del Image.Image._repr_png_
| rdkit/rdkit-orig | rdkit/Chem/Draw/IPythonConsole.py | Python | bsd-3-clause | 2,622 | [
"RDKit"
] | 7b0c594e9b939e794b4190c6b09bb987f10466494d0e13407542a85f3a387ad5 |
#! /usr/bin/env python
from ase import Atoms
from pyDFTutils.ase_utils import to_smallest_positive_pos,cut_lattice ,scaled_pos_to_pos
force_in_cell = to_smallest_positive_pos
from ase.lattice.cubic import SimpleCubicFactory
from ase.lattice.tetragonal import SimpleTetragonalFactory
from ase.lattice.triclinic import TriclinicFactory
from ase.geometry.cell import cellpar_to_cell
from numpy import array,sqrt
import numpy as np
from ase.lattice.spacegroup import crystal
import re
# Define cubic perovskite
class PerovskiteCubicFactory(SimpleCubicFactory):
"A factory for creating perovskite (ABO3) lattices"
xtal_name = 'cubic perovskite'
bravais_basis = [
[0.,0.,0.],
[0.5,0.5,0.5],
[0.5,0.5,0.],[0.,0.5,0.5],[0.5,0.,0.5]]
element_basis = (0,1,2,2,2)
PerovskiteCubic=PerovskiteCubicFactory()
PerovskiteCubic.__doc__="""
Usage:
eg. STO=PerovskiteCubic(['Sr','Ti','O'],latticeconstant=3.905)
"""
# Define double perovskite
class DoublePerovskiteFactory(SimpleTetragonalFactory):
"A factory for creating double perovskite ((A1,A2)BO3) lattices"
def __init__(self,oxygen=6.):
self.oxygen=oxygen
self.xtal_name = 'double perovskite'
if self.oxygen==6:
self.bravais_basis = [[0.5,0.5,0.25],[0.5,0.5,0.75],
[0.,0.,0.],[0.,0.,0.5],
[0.5,0.,0.],[0.,0.5,0.],[0.,0.,0.25],
[0.5,0.,0.5],[0.,0.5,0.5],[0.,0.,0.75]]
self.element_basis = (0,1,2,2,3,3,3,3,3,3)
elif self.oxygen==5:
self.bravais_basis = [[0.5,0.5,0.25],[0.5,0.5,0.75],
[0.,0.,0.],[0.,0.,0.5],
[0.5,0.,0.],[0.,0.5,0.],
[0.5,0.,0.5],[0.,0.5,0.5],[0.,0.,0.75]]
self.element_basis = (0,1,2,2,3,3,3,3,3)
else:
raise ValueError("oxygen keyword only accepts values 5 or 6")
DoublePerovskite=DoublePerovskiteFactory()
class DoublePerovskiteFactory_3(SimpleTetragonalFactory):
"""
gen (A2B)C3O9 type double perovksite lattice
symbols: [A, B, C, O]
cell: [a,b,c]
"""
def __init__(self):
self.xtal_name=r'double perovksite (0.33)'
bravais_basis_A=[(0,0,1.0/3*i) for i in [0,1]]
bravais_basis_B=[(0,0,1.0/3*i) for i in [2]]
bravais_basis_C=[(0.5,0.5,1.0/3*i+1.0/6) for i in [0,1,2]]
bravais_basis_O=[(0.5,0.5,1.0/3*i) for i in [0,1,2]]+[(0.5,0,1.0/3*i+1.0/6) for i in [0,1,2]]+[(0,0.5,1.0/3*i+1.0/6) for i in [0,1,2]]
self.bravais_basis=bravais_basis_A+bravais_basis_B+bravais_basis_C+bravais_basis_O
self.element_basis=(0,0,1,2,2,2,3,3,3,3,3,3,3,3,3)
DoublePerovskite_3=DoublePerovskiteFactory_3()
class R3c_factory(TriclinicFactory):
"""
R3c factory eg. BFO ### NOT IMPLEMENTED
"""
def __init__(self,latticeconstant=(1,60),e_base=0.25,**kwargs):
a=latticeconstant[0]
alpha=latticeconstant[1]
TriclinicFactory.__init__(latticeconstant={'a':a,'b':a,'c':a ,'alpha':alpha,'beta':alpha,'gamma':alpha})
raise Exception("Not implemented yet, don't want to do that")
def gen_pnma():
a=5.742
b=7.668
c=5.532
atoms=crystal(['La','Mn','O','O'],[(0.549,0.25,0.01),(0,0,0),(-0.014,0.25,-0.07),(0.309,0.039,0.244)],spacegroup='pnma',cellpar=[a,b,c,90,90,90])
atoms.set_pbc([True,True,True])
mag=[0]*20
mag[4:8]=[1,-1,1,-1]
atoms.set_initial_magnetic_moments(mag)
return atoms
def gen_pbnm():
a=5.742
b=7.668
c=5.532
atoms=crystal(['La','Mn','O','O'],[(0.549,0.25,0.01),(0,0,0),(-0.014,0.25,-0.07),(0.309,0.039,0.244)],spacegroup='pbnm',cellpar=[a,b,c,90,90,90])
atoms.set_pbc([True,True,True])
mag=[0]*20
mag[4:8]=[1,-1,1,-1]
atoms.set_initial_magnetic_moments(mag)
return atoms
def R3c_builder(a,alpha,symbol_list,basis):
"""
a, alpha: as they are
symbols: a list. eg. ['Bi','Fe',O]
basis:
"""
symbols=symbol_list[0]*2+symbol_list[1]*2+symbol_list[2]*6
x0=basis[0][0]
scaled_positions=[(x0,x0,x0)]
scaled_positions.append(force_in_cell((0.5+x0,0.5+x0,0.5+x0)))
x1 =basis[1][0]
scaled_positions.append(force_in_cell((x1,x1,x1)))
scaled_positions.append(force_in_cell((0.5+x1,0.5+x1,0.5+x1)))
x,y,z=basis[2]
scaled_positions.append(force_in_cell(array([x,y,z])))
scaled_positions.append(force_in_cell(array([z,x,y])))
scaled_positions.append(force_in_cell(array([y,z,x])))
####NOTE: not just the above +0.5 but with a rotation.
scaled_positions.append(force_in_cell(array([y,x,z])+0.5))
scaled_positions.append(force_in_cell(array([x,z,y])+0.5))
scaled_positions.append(force_in_cell(array([z,y,x])+0.5))
atoms=Atoms(symbols=symbols,scaled_positions=scaled_positions,cell=cellpar_to_cell([a,a,a,alpha,alpha,alpha]))
return atoms
def cut_R3c_222(atoms):
cell=atoms.get_cell()
vec0=np.dot(np.array([0.5,0.5,0.5]),cell)
vec1=np.dot(np.array([1,0,0]),cell)
vec2=np.dot(np.array([0,1,0]),cell)
vec3=np.dot(np.array([0,0,1]),cell)
new_cell=array([vec1-vec0,vec2-vec0,vec3-vec0])*2
#print cell_to_cellpar(new_cell)
#print new_cell
new_atoms=cut_lattice(atoms,new_cell,nrepeat=4)
return new_atoms
def cut_cubic_s2s22(atoms):
"""
cut a cubic -> sqrt(2)*sqrt(2)*2
"""
cell=atoms.get_cell()
vec0=np.dot(np.array([0,0,0]),cell)
vec1=np.dot(np.array([1,-1,0]),cell)
vec2=np.dot(np.array([1,1,0]),cell)
vec3=np.dot(np.array([0,0,2]),cell)
new_cell=array([vec1-vec0,vec2-vec0,vec3-vec0])*2
#print cell_to_cellpar(new_cell)
#print new_cell
new_atoms=cut_lattice(atoms,new_cell,nrepeat=4)
return new_atoms
def cut_R3c_22(atoms):
"""
This cut 2*2*2->sqrt(2)*sqrt(2)*2 lattice
"""
cell=atoms.get_cell()
vec0=scaled_pos_to_pos([0,0.5,1],cell)
vec1=scaled_pos_to_pos([0.5,0,1],cell)
vec2=scaled_pos_to_pos([0.5,1,1],cell)
vec3=scaled_pos_to_pos([0,0.5,0],cell)
new_cell=np.asarray([vec1-vec0,vec2-vec0,vec3-vec0])
new_atoms=cut_lattice(atoms,new_cell,nrepeat=2)
cell=new_atoms.get_cell()
#vec=scaled_pos_to_pos([0,0.5,0],cell)
#new_atoms=translation(new_atoms,vec)
return new_atoms
def R_3c_builder(a,alpha,symbol_list,basis=[(0,0,0),(0.227,0.227,0.227),(0.542,0.943,0.397)]):
"""
a, alpha: as they are
symbols: a list. eg. ['Bi','Fe',O]
basis:
"""
symbols=symbol_list[0]*2+symbol_list[1]*2+symbol_list[2]*6
x0=basis[0][0]
scaled_positions=[(x0,x0,x0)]
scaled_positions.append(force_in_cell((0.5+x0,0.5+x0,0.5+x0)))
x1 =basis[1][0]
scaled_positions.append(force_in_cell((x1,x1,x1)))
scaled_positions.append(force_in_cell((0.5+x1,0.5+x1,0.5+x1)))
x,y,z=basis[2]
scaled_positions.append(force_in_cell(array([x,y,z])))
scaled_positions.append(force_in_cell(array([z,x,y])))
scaled_positions.append(force_in_cell(array([y,z,x])))
####NOTE: not just the above +0.5 but with a rotation.
scaled_positions.append(force_in_cell(array([x,y,z])+0.5))
scaled_positions.append(force_in_cell(array([z,x,y])+0.5))
scaled_positions.append(force_in_cell(array([y,z,x])+0.5))
atoms=Atoms(symbols=symbols,scaled_positions=scaled_positions,cell=cellpar_to_cell([a,a,a,alpha,alpha,alpha]))
raise NotImplementedError('Implementation Wrong, try to use sth instead , e.g. LaAlO3')
return atoms
class NaClPrimFactory(TriclinicFactory):
bravais_basis = [[0, 0, 0], [0.5, 0.5, 0.5]]
element_basis = (0, 1)
# Rocksalts
#-----------------
rocksalt_prim = NaClPrimFactory()
def gen_rocksalt_prim(name, latticecostant):
"""
generate primitive cell of rocksalt structure.
e.g.
gen_rocksalt_prim('NaCl', 2.8)
"""
elems = re.findall('[A-Z][a-z]*', name)
a = latticecostant
atoms = rocksalt_prim(elems, latticeconstant=(a, a, a, 60, 60, 60))
return atoms
#-----------------
def test_r3c():
atoms=R3c_builder(5.52,59.84,symbol_list=['Bi','Fe','O'],basis=[(0,0,0),(0.227,0.227,0.227),(0.542,0.943,0.397)])
print(atoms.positions)
atoms=cut_R3c_222(atoms)
atoms=cut_R3c_22(atoms)
print(atoms.get_chemical_symbols())
return atoms
if __name__=='__main__':
test_r3c()
| mailhexu/pyDFTutils | pyDFTutils/perovskite/lattice_factory.py | Python | lgpl-3.0 | 8,373 | [
"ASE",
"CRYSTAL"
] | 9fc39257199a3b178aa92db2a1c9ca48598cb799f6b732c3585e754bd9ac2b95 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from enum import Enum
import urllib
import galaxy
import general
import datetime
import traceback
from scraper import Scraper
from datetime import timedelta
class Messages(Scraper):
def __init__(self, browser, config):
self.general_client = general.General(browser, config)
super(Messages, self).__init__(browser, config)
def get_messages(self):
raise NotImplementedError
def get_spy_reports(self):
self.logger.info('Getting messages')
url = self.url_provider.get_page_url('messages')
data = urllib.urlencode({'tab': 20, 'ajax': 1})
self.logger.info("Getting messages for first page")
res = self.open_url(url, data)
soup = BeautifulSoup(res.read(), "lxml")
spy_reports = []
self.done = False
# add messages from the first page
spy_reports.extend(self.parse_spy_reports(soup))
pagination_info = soup.find('li', {"class": "curPage"}).text
page_count = int(pagination_info.split('/')[1])
# add messages from the other pages
for page in range(1, page_count):
if self.done:
break
try:
page_number = page + 1
self.logger.info("Getting messages for page %d" % page_number)
data = urllib.urlencode({'messageId': -1, 'tabid': 20, 'action': 107, 'pagination': page_number, 'ajax': 1})
res = self.open_url(url, data)
soup = BeautifulSoup(res.read(), "lxml")
page_reports = self.parse_spy_reports(soup)
spy_reports.extend(page_reports)
except Exception as e:
exception_message = traceback.format_exc()
self.logger.error(exception_message)
return spy_reports
def clear_spy_reports(self):
# No need to clear
return True
url = self.url_provider.get_page_url('messages')
data = urllib.urlencode({'tab': 20, 'messageId': -1, 'action': 103, 'ajax': 1})
self.open_url(url, data)
self.logger.info("Clearing spy reports")
def parse_spy_reports(self, soup):
"""parse spy reports for an individual page"""
message_boxes = soup.findAll("li", {"class": "msg "})
message_boxes += soup.findAll("li", {"class": "msg msg_new"})
spy_reports = []
game_date = self.general_client.get_game_datetime()
for message_box in message_boxes:
try:
# We already are in the espionage tab, there is only spy reports and reports from other
# players spying on us here. If the report is from other player spying on us the message div
# should contain an span with the class espionageDefText
is_spy_report = True if message_box.find("span", {"class": "espionageDefText"}) is None else False
msg_date_node = message_box.find("span", {"class": "msg_date fright"})
message_datetime = parse_report_datetime(
msg_date_node.text if msg_date_node is not None else "1.1.2016 00:00:00")
if message_datetime < (game_date - timedelta(minutes=self.config.spy_report_life)):
self.done = True
return spy_reports
if is_spy_report:
planet_info = message_box.find("a", {"class": "txt_link"}).text
planet_name = planet_info.split('[')[0].strip()
coordinates_data = planet_info.split('[')
# If there is nothing after an '[' character it means the planet has been destroyed
if len(coordinates_data) <= 1:
self.logger.info("Hmm, I found a destroyed planet")
continue
coordinates = coordinates_data[1].replace(']', '').strip()
# find inactive player name
player_node = message_box.find("span", {"class": "status_abbr_longinactive"})
# If not long inactive, check for simply inactive
if player_node is None:
player_node = message_box.find("span", {"class": "status_abbr_inactive"})
if player_node is not None:
player_name = player_node.text.strip()
player_state = galaxy.PlayerState.Inactive
else:
# if the player isn't inactive I don't care about the name
player_name = 'unknown'
player_state = galaxy.PlayerState.Active
message_content = message_box.findAll("div", {"class": "compacting"})
if len(message_content) > 0:
resources_row = message_content[1]
resources_data = resources_row.findAll("span", {"class": "resspan"})
resources = None
if resources_data is not None:
metal = parse_resource(resources_data[0].text)
crystal = parse_resource(resources_data[1].text)
deuterium = parse_resource(resources_data[2].text)
resources = general.Resources(metal, crystal, deuterium)
loot_row = message_content[2]
loot_data = loot_row.find("span", {"class": "ctn ctn4"})
loot = parse_loot_percentage(loot_data.text)
defense_row = message_content[3]
fleet_data = defense_row.find("span", {"class": "ctn ctn4 tooltipLeft"})
defenses_data = defense_row.find("span", {"class": "ctn ctn4 fright tooltipRight"})
if fleet_data is not None and defenses_data is not None:
fleet = parse_resource(fleet_data.text)
defenses = parse_resource(defenses_data.text)
else:
fleet = None
defenses = None
else:
fleet = None
defenses = None
resources = None
loot = None
report = SpyReport(planet_name.encode('utf-8'), player_name, player_state, coordinates, resources, fleet,
defenses, loot, message_datetime)
spy_reports.append(report)
except Exception as e:
exception_message = traceback.format_exc()
self.logger.error(exception_message)
return spy_reports
def parse_resource(text):
"""Use to parse resources values to int, ex: metal: 2.492M becomes 2492000"""
try:
value = int(text.split(':')[1].strip().replace(".", "").replace(",", "").replace("Mn", "000").replace("M", "000"))
except Exception as e:
print 'Failed to parse resources string "' + text + '"'
exception_message = traceback.format_exc()
print exception_message
# Default to 1
value = 1
return value
def parse_loot_percentage(text):
"""Use to parse loot percentage string, ie: Roubo: 50% becomes 0.5"""
try:
percentage = float(text.split(':')[1].strip("%")) / 100
except Exception as e:
print 'Failed to parse loot string "' + text + '"'
exception_message = traceback.format_exc()
print exception_message
# Default to 50 %
percentage = 0.5
return percentage
def parse_report_datetime(text):
time = datetime.datetime.strptime(text.strip(), "%d.%m.%Y %H:%M:%S")
return time
class MessageType(Enum):
SpyReport = 1
class SpyReport(object):
def __init__(self, planet_name, player_name, player_state, coordinates, resources, fleet, defenses, loot,
report_datetime):
self.planet_name = planet_name
self.player_name = player_name
self.player_state = player_state
self.coordinates = coordinates
self.resources = resources
self.defenses = defenses
self.fleet = fleet
self.loot = loot
self.report_datetime = report_datetime
def __str__(self):
return "Planet %s,Player %s, State: %s, coordinates %s, Resouces = %s, Fleet: %s, Defenses: %s, Loot: %s " % (
self.planet_name, self.player_name, self.player_state, self.coordinates, self.resources, self.fleet,
self.defenses, self.loot)
def get_loot(self):
return self.resources.total() * self.loot
| yosh778/OG-Bot | ogbot/scraping/messages.py | Python | mit | 8,789 | [
"CRYSTAL",
"Galaxy"
] | 98648ea3cfae1b47152bc8aad55cc2fd18a30b92069c0acd69625ef3e16da34e |
# NeHe Tutorial Lesson: 44 - Lense Flare
#
# Ported to PyOpenGL 2.0 by Brian Leair 2004
#
# This code was created by Jeff Molofee 2000
#
# The port was based on the PyOpenGL tutorials and from
# PyOpenGLContext (tests/glprint.py)
#
# If you've found this code useful, feel free to let me know
# at (Brian Leair telcom_sage@yahoo.com).
#
# See original source and C based tutorial at http://nehe.gamedev.net
#
# Note:
# -----
# This code is not an ideal example of Pythonic coding or use of OO
# techniques. It is a simple and direct exposition of how to use the
# Open GL API in Python via the PyOpenGL package. It also uses GLUT,
# a high quality platform independent library. Due to using these APIs,
# this code is more like a C program using procedural programming.
#
# To run this example you will need:
# Python - www.python.org (v 2.3 as of 1/2004)
# PyOpenGL - pyopengl.sourceforge.net (v 2.0.1.07 as of 1/2004)
# Numeric Python - (v.22 of "numpy" as of 1/2004) numpy.sourceforge.net
# Python Image Library - http://www.pythonware.com/products/pil/
#
# #########################################################
# Please note, don't use PyOpenGL older than 2.0.1.07.
# Older PyOpenGL had a bug glGetFloat () that prevents this
# tutorial from working.
#
#
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import Image # PIL
try:
import win32api # GetTickCount ()
gHaveWin32 = 1
except:
gHaveWin32 = 0
import sys
import time # clock ()
import os
from glCamera import *
from glFont import *
# *********************** Globals ***********************
# Python 2.2 defines these directly
try:
True
except NameError:
True = 1==1
False = 1==0
# Some api in the chain is translating the keystrokes to this octal string
# so instead of saying: ESCAPE = 27, we use the following.
ESCAPE = '\033'
# Number of the glut window.
window = 0
base = None
gInfoOn = False
gFrames = 0
gStartTime = -1
gCurrentTime = -1
gFPS = -1
gCamera = None
# //################## NEW STUFF ##################################
qobj = None # //the quadric for our cylinder
gcylList = None
def LoadTexture (path):
""" // Load Image And Convert To A Texture
path can be a relative path, or a fully qualified path.
returns tuple of status and ID:
returns False if the requested image couldn't loaded as a texture
returns True and the texture ID if image was loaded
"""
# Catch exception here if image file couldn't be loaded
try:
# Note, NYI, path specified as URL's could be access using python url lib
# OleLoadPicturePath () supports url paths, but that capability isn't critcial to this tutorial.
Picture = Image.open (path)
except:
return False, 0
glMaxTexDim = glGetIntegerv (GL_MAX_TEXTURE_SIZE)
WidthPixels = Picture.size [0]
HeightPixels = Picture.size [1]
if ((WidthPixels > glMaxTexDim) or (HeightPixels > glMaxTexDim)):
# The image file is too large. Shrink it to fit within the texture dimensions
# support by our rendering context for a GL texture.
# Note, Feel free to experiemnt and force a resize by placing a small val into
# glMaxTexDim (e.g. 32,64,128).
raise RuntimeError, "Texture image (%d by %d) is larger than supported by GL %d." % (WidthPixels, HeightPixels, glMaxTexDim)
# Create a raw string from the image data - data will be unsigned bytes
# RGBpad, no stride (0), and first line is top of image (-1)
pBits = Picture.tostring("raw", "RGBX", 0, -1)
# // Typical Texture Generation Using Data From The Bitmap
texid = glGenTextures(1); # // Create The Texture
glBindTexture(GL_TEXTURE_2D, texid); # // Bind To The Texture ID
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); # // (Modify This For The Type Of Filtering You Want)
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); # // (Modify This For The Type Of Filtering You Want)
# // (Modify This If You Want Mipmaps)
glTexImage2D(GL_TEXTURE_2D, 0, 3, WidthPixels, HeightPixels, 0, GL_RGBA, GL_UNSIGNED_BYTE, pBits);
# Cleanup (this would all happen automatically upon return... just spelling it out)
# // Decrements IPicture Reference Count
Picture = None
return True, texid # // Return True (All Good)
# A general OpenGL initialization function. Sets all of the initial parameters.
def InitGL(Width, Height): # We call this right after our OpenGL window is created.
global gFont, gCamera, gStartTime, gcylList, qobj
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glClearColor(0.0, 0.0, 0.0, 0.5) # This Will Clear The Background Color To Black
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glDepthFunc(GL_LEQUAL) # The Type Of Depth Test To Do
glHint (GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # Really Nice Perspective Calculations
status, tex = LoadTexture (os.path.join("Art","Font.bmp"))
if (status):
gFont = glFont ()
gFont.SetFontTexture (tex)
gFont.SetWindowSize (1024, 768)
gFont.BuildFont (1.0)
else:
raise RuntimeError, "Failed to build font 'Art\\Font.bmp'"
gCamera = glCamera ()
gCamera.m_MaxHeadingRate = 1.0; # // Set our Maximum rates for the camera
gCamera.m_MaxPitchRate = 1.0; # // Set our Maximum rates for the camera
gCamera.m_HeadingDegrees = 0.0; # // Set our Maximum rates for the camera
# // Try and load the HardGlow texture tell the user if we can't find it then quit
status, gCamera.m_GlowTexture = LoadTexture(os.path.join("Art","HardGlow2.bmp"));
if (not status):
raise RuntimeError, "Failed to load Hard Glow texture."
# // Try and load the BigGlow texture tell the user if we can't find it then quit
status, gCamera.m_BigGlowTexture = LoadTexture(os.path.join("Art","BigGlow3.bmp"))
if (not status):
raise RuntimeError, "Failed to load Big Glow texture."
# // Try and load the Halo texture tell the user if we can't find it then quit
status, gCamera.m_HaloTexture = LoadTexture(os.path.join("Art","Halo3.bmp"))
if (not status):
raise RuntimeError, "Failed to load Halo texture."
# // Try and load the Streaks texture tell the user if we can't find it then quit
status, gCamera.m_StreakTexture = LoadTexture(os.path.join("Art","Streaks4.bmp"))
if (not status):
raise RuntimeError, "Failed to load Streaks texture."
# //################## NEW STUFF ##################################
# // Just create a cylinder that will be used as occluder object
gcylList = glGenLists(1);
qobj = gluNewQuadric();
gluQuadricDrawStyle(qobj, GLU_FILL);
gluQuadricNormals(qobj, GLU_SMOOTH);
glNewList(gcylList, GL_COMPILE);
# List Start
glEnable(GL_COLOR_MATERIAL);
glColor3f(0.0, 0.0, 1.0);
glEnable(GL_LIGHT0);
glEnable(GL_LIGHTING);
glTranslatef(0.0,0.0,-2.0);
gluCylinder(qobj, 0.5, 0.5, 4.0, 15, 5);
glDisable(GL_LIGHTING);
glDisable(GL_LIGHT0);
glDisable(GL_COLOR_MATERIAL);
glEndList();
# List End
# if (gHaveWin32):
# gStartTime = win32api.GetTickCount () # // Get the time the app started
gStartTime = time.clock (); # // Get the time the app started
return True # // Initialization Went OK
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def ReSizeGLScene(Width, Height):
if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small
Height = 1
glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
# // field of view, aspect ratio, near and far
# This will squash and stretch our objects as the window is resized.
# Note that the near clip plane is 1 (hither) and the far plane is 1000 (yon)
gluPerspective(45.0, float(Width)/float(Height), 1, 1000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def DrawGLInfo ():
global gCamera, gFont, gFrames, gCurrentTime, gCurrentTime, gStartTime, gFPS
projMatrix = glGetFloatv(GL_PROJECTION_MATRIX); # // Grab the projection matrix
modelMatrix = glGetFloatv(GL_MODELVIEW_MATRIX); # // Grab the modelview matrix
# // Print out the cameras position
glColor4f(1.0, 1.0, 1.0, 1.0);
String = "m_Position............. = %.02f, %.02f, %.02f" % (gCamera.m_Position.x, gCamera.m_Position.y, gCamera.m_Position.z)
gFont.glPrintf(10, 720, 1, String);
# // Print out the cameras direction
String = "m_DirectionVector...... = %.02f, %.02f, %.02f" % (gCamera.m_DirectionVector.i, gCamera.m_DirectionVector.j, gCamera.m_DirectionVector.k);
gFont.glPrintf(10, 700, 1, String);
# // Print out the light sources position
String = "m_LightSourcePos....... = %.02f, %.02f, %.02f" % (gCamera.m_LightSourcePos.x, gCamera.m_LightSourcePos.y, gCamera.m_LightSourcePos.z);
gFont.glPrintf(10, 680, 1, String);
# // Print out the intersection point
String = "ptIntersect............ = %.02f, %.02f, %.02f" % (gCamera.m_ptIntersect.x, gCamera.m_ptIntersect.y, gCamera.m_ptIntersect.z);
gFont.glPrintf(10, 660, 1, String);
# // Print out the vector that points from the light source to the camera
String = "vLightSourceToCamera... = %.02f, %.02f, %.02f" % (gCamera.vLightSourceToCamera.i, gCamera.vLightSourceToCamera.j, gCamera.vLightSourceToCamera.k);
gFont.glPrintf(10, 640, 1, String);
# // Print out the vector that points from the light source to the intersection point.
String = "vLightSourceToIntersect = %.02f, %.02f, %.02f" % (gCamera.vLightSourceToIntersect.i, gCamera.vLightSourceToIntersect.j, gCamera.vLightSourceToIntersect.k);
gFont.glPrintf(10, 620, 1, String);
# // Let everyone know the below matrix is the model view matrix
String = "GL_MODELVIEW_MATRIX";
gFont.glPrintf(10, 580, 1, String);
# // Print out row 1 of the model view matrix
String = "%.02f, %.02f, %.02f, %.02f" % (modelMatrix[0][0], modelMatrix[0][1], modelMatrix[0][2], modelMatrix[0][3]);
gFont.glPrintf(10, 560, 1, String);
# // Print out row 2 of the model view matrix
String = "%.02f, %.02f, %.02f, %.02f" % (modelMatrix[1][0], modelMatrix[1][1], modelMatrix[1][2], modelMatrix[1][3]);
gFont.glPrintf(10, 540, 1, String);
# // Print out row 3 of the model view matrix
String = "%.02f, %.02f, %.02f, %.02f" % (modelMatrix[2][0], modelMatrix[2][1], modelMatrix[2][2], modelMatrix[2][3]);
gFont.glPrintf(10, 520, 1, String);
# // Print out row 4 of the model view matrix
String = "%.02f, %.02f, %.02f, %.02f" % (modelMatrix[3][0], modelMatrix[3][1], modelMatrix[3][2], modelMatrix[3][3]);
gFont.glPrintf(10, 500, 1, String);
# // Let everyone know the below matrix is the projection matrix
String = "GL_PROJECTION_MATRIX";
gFont.glPrintf(10, 460, 1, String);
# // Print out row 1 of the projection view matrix
String = "%.02f, %.02f, %.02f, %.02f" % (projMatrix[0][0], projMatrix[0][1], projMatrix[0][2], projMatrix[0][3]);
gFont.glPrintf(10, 440, 1, String);
# // Print out row 2 of the projection view matrix
String = "%.02f, %.02f, %.02f, %.02f" % (projMatrix[1][0], projMatrix[1][1], projMatrix[1][2], projMatrix[1][3]);
gFont.glPrintf(10, 420, 1, String);
# // Print out row 3 of the projection view matrix
String = "%.02f, %.02f, %.03f, %.03f" % (projMatrix[2][0], projMatrix[2][1], projMatrix[2][2], projMatrix[2][3]);
gFont.glPrintf(10, 400, 1, String);
# // Print out row 4 of the projection view matrix
String = "%.02f, %.02f, %.03f, %.03f" % (projMatrix[3][0], projMatrix[3][1], projMatrix[3][2], projMatrix[3][3]);
gFont.glPrintf(10, 380, 1, String);
# // Let everyone know the below values are the Frustum clipping planes
gFont.glPrintf(10, 320, 1, "FRUSTUM CLIPPING PLANES");
# // Print out the right clipping plane
String = "%.02f, %.02f, %.02f, %.02f" % (gCamera.m_Frustum[0][0], gCamera.m_Frustum[0][1], gCamera.m_Frustum[0][2], gCamera.m_Frustum[0][3]);
gFont.glPrintf(10, 300, 1, String);
# // Print out the left clipping plane
String = "%.02f, %.02f, %.02f, %.02f" % (gCamera.m_Frustum[1][0], gCamera.m_Frustum[1][1], gCamera.m_Frustum[1][2], gCamera.m_Frustum[1][3]);
gFont.glPrintf(10, 280, 1, String);
# // Print out the bottom clipping plane
String = "%.02f, %.02f, %.02f, %.02f" % (gCamera.m_Frustum[2][0], gCamera.m_Frustum[2][1], gCamera.m_Frustum[2][2], gCamera.m_Frustum[2][3]);
gFont.glPrintf(10, 260, 1, String);
# // Print out the top clipping plane
String = "%.02f, %.02f, %.02f, %.02f" % (gCamera.m_Frustum[3][0], gCamera.m_Frustum[3][1], gCamera.m_Frustum[3][2], gCamera.m_Frustum[3][3]);
gFont.glPrintf(10, 240, 1, String);
# // Print out the far clipping plane
String = "%.02f, %.02f, %.02f, %.02f" % (gCamera.m_Frustum[4][0], gCamera.m_Frustum[4][1], gCamera.m_Frustum[4][2], gCamera.m_Frustum[4][3]);
gFont.glPrintf(10, 220, 1, String);
# // Print out the near clipping plane
String = "%.02f, %.02f, %.02f, %.02f" % (gCamera.m_Frustum[5][0], gCamera.m_Frustum[5][1], gCamera.m_Frustum[5][2], gCamera.m_Frustum[5][3]);
gFont.glPrintf(10, 200, 1, String);
if(gFrames >= 100): # // if we are due for another FPS update
# gCurrentTime = win32api.GetTickCount (); # // Get the current time
gCurrentTime = time.clock (); # // Get the current time
DiffTime = gCurrentTime - gStartTime; # // Find the difference between the start and end times
# gFPS = (gFrames / float (DiffTime)) * 1000.0; # // Compute the FPS
gFPS = (gFrames / float (DiffTime)); # // Compute the FPS
gStartTime = gCurrentTime; # // Set the current start time to the current time
gFrames = 1; # // Set the number of frames to 1
else:
gFrames += 1; # // We are not due to for another update so add one to the frame count
# // Print out the FPS
String = "FPS %.02f" % (gFPS);
gFont.glPrintf(10, 160, 1, String);
return
def DrawGLScene ():
""" // Here's Where We Do All The Drawing """
global gCamera, gcylList, ginfoOn
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); # // Clear Screen And Depth Buffer
glLoadIdentity(); # // Reset The Current Modelview Matrix
# // We want our light source to be 50 units if front
# // of the camera all the time to make it look like
# // it is infinately far away from the camera. We only
# // do this to the z coordinate because we want to see
# // the flares adjust if we fly in a straight line.
gCamera.m_LightSourcePos.z = gCamera.m_Position.z - 50.0;
# //##################### NEW STUFF ##########################
# // Draw our cylinder and make it "do something"
# // Of course we do that BEFORE testing for occlusion
# // We need our depth buffer to be filled to check against occluder objects
glPushMatrix();
glLoadIdentity();
glTranslatef(0.0, 0.0, -20.0);
# glRotatef(win32api.GetTickCount () / 50.0, 0.3, 0.0, 0.0);
# glRotatef(win32api.GetTickCount () / 50.0, 0.0, 0.5, 0.0);
glRotatef((time.clock () * 1000.0) / 50.0, 0.3, 0.0, 0.0);
glRotatef((time.clock () * 1000.0) / 50.0, 0.0, 0.5, 0.0);
glCallList(gcylList);
glPopMatrix();
gCamera.SetPrespective(); # // Set our perspective/oriention on the world
gCamera.RenderLensFlare(); # // Render the lens flare
gCamera.UpdateFrustumFaster(); # // Update the frustum as fast as possible.
# // Check to see if info has been toggled by 1,2
if (gInfoOn):
DrawGLInfo(); # // Info is on so draw the GL information.
glutSwapBuffers()
return True
# The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y)
def keyPressed(*args):
global window, gCamera, gInfoOn, gFont, gcylList, qobj
# If escape is pressed, kill everything.
key = args [0]
if key == ESCAPE:
gFont.release ()
gCamera.release ()
gluDeleteQuadric (qobj)
glDeleteLists (gcylList, 1)
sys.exit ()
if key == 'W' or key == 'w':
gCamera.ChangePitch(-0.2); # // Pitch the camera up 0.2 degrees
if key == 'S' or key == 's':
gCamera.ChangePitch(0.2); # // Pitch the camera down 0.2 degrees
if key == 'D' or key == 'd':
gCamera.ChangeHeading(0.2); # // Yaw the camera to the left
if key == 'A' or key == 'a':
gCamera.ChangeHeading(-0.2); # // Yaw the camera to the right
if key == 'Z' or key == 'z':
gCamera.m_ForwardVelocity = 0.01; # // Start moving the camera forward 0.01 units every frame
if key == 'C' or key == 'c':
gCamera.m_ForwardVelocity = -0.01; # // Start moving the camera backwards 0.01 units every frame
if key == 'X' or key == 'x':
gCamera.m_ForwardVelocity = 0.0; # // Stop the camera from moving.
if args[0] == '1':
gInfoOn = True; # // Toggle info on
if args[0] == '2':
gInfoOn = False; # // Toggle info off
def main():
global window
# pass arguments to init
glutInit(sys.argv)
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
# get a 640 x 480 window
glutInitWindowSize(640, 480)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to use when closing, but for those of you new
# to Python, remember this assignment would make the variable local and not global
# if it weren't for the global declaration at the start of main.
window = glutCreateWindow("Lens Flare Tutorial")
# Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc(DrawGLScene)
# Uncomment this line to get full screen.
#glutFullScreen()
# When we are doing nothing, redraw the scene.
glutIdleFunc(DrawGLScene)
# Register the function called when our window is resized.
glutReshapeFunc(ReSizeGLScene)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc(keyPressed)
# We've told Glut the type of window we want, and we've told glut about
# various functions that we want invoked (idle, resizing, keyboard events).
# Glut has done the hard work of building up thw windows DC context and
# tying in a rendering context, so we are ready to start making immediate mode
# GL calls.
# Call to perform inital GL setup (the clear colors, enabling modes, and most releveant -
# consturct the displays lists for the bitmap font.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
# Print message to console, and kick off the main to get it rolling.
if __name__ == "__main__":
print "Hit ESC key to quit."
main()
| mgood7123/UPM | Sources/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/NeHe/lesson44/lesson44.py | Python | gpl-3.0 | 18,610 | [
"Brian"
] | 3b4f6fe7889b3580a48f308e153b0e0d2a63e6bb8048c473e77160bf27d1f635 |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2018 Peter Williams <peter@newton.cx> and collaborators.
# Licensed under the MIT License.
"""This module helps you run the synchrotron code described in Fleischman &
Kuznetsov (2010; hereafter FK10) [`DOI:10.1088/0004-637X/721/2/1127
<https://doi.org/10.1088/0004-637X/721/2/1127>`_]. The code is provided as a
precompiled binary module. It’s meant to be called from IDL, but we won’t let
that stop us!
The main interface to the code is the :class:`Calculator` class. But before
you can use it, you must install the code, as described below.
Installing the code
-------------------
To do anything useful with this module, you must first obtain the precompiled
module. This isn't the sort of module you’d want to install into your system
shared libraries, so for applications you’ll probably be storing it in some
random directory. Therefore, all actions in this module start by specifying
the path to the library.
The module can be downloaded from as part of a Supplementary Data archive
attached to the journal paper. At the moment, the direct link is `here
<http://iopscience.iop.org/0004-637X/721/2/1127/suppdata/apj351391_sourcecode.tar.gz>`_,
but that might change over time. The `journal’s website for the paper
<https://doi.org/10.1088/0004-637X/721/2/1127>`_ should always have a link.
The archive contains compiled versions of the code for Windows, 32-bit Linux,
and 64-bit Linux. It is quite worrisome that maybe one day these files will
stop working, but that’s what we’ve got right now.
Anyway, you should download and unpack the archive and copy the desired file
to wherever makes the most sense for your software environment and
application. On 64-bit Linux, the file name is ``libGS_Std_HomSrc_CEH.so.64``.
Any variable named *shlib_path* that comes up in the API should be a path to
this file. Note that relative paths should include a directory component (e.g.
``./libGS_Std_HomSrc_CEH.so.64``); the :mod:`ctypes` module treats bare
filenames specially.
"""
from __future__ import absolute_import, division, print_function
__all__ = '''
Calculator
'''
import ctypes
import numpy as np
from . import cgs
# Here we have the very low-level interface to the compiled code. These aren't
# documented in the main documentation since, hopefully, regular users will
# never need to use it.
pointer_pair = ctypes.c_void_p * 2
IN_VAL_AREA = 0
IN_VAL_DEPTH = 1
IN_VAL_T0 = 2
IN_VAL_EPSILON = 3
IN_VAL_KAPPA = 4
IN_VAL_INTEG_METH = 5
IN_VAL_EMIN = 6
IN_VAL_EMAX = 7
IN_VAL_EBREAK = 8
IN_VAL_DELTA1 = 9
IN_VAL_DELTA2 = 10
IN_VAL_N0 = 11
IN_VAL_NB = 12
IN_VAL_B = 13
IN_VAL_THETA = 14
IN_VAL_FREQ0 = 15
IN_VAL_LOGDFREQ = 16
IN_VAL_EDIST = 17
IN_VAL_NFREQ = 18
IN_VAL_PADIST = 19
IN_VAL_LCBDY = 20
IN_VAL_BEAMDIR = 21
IN_VAL_DELTAMU = 22
IN_VAL_A4 = 23
# 24 is unused
# "CEH" library only:
IN_VAL_FCCR = 25
IN_VAL_FWHCR = 26
IN_VAL_RENORMFLAG = 27
# all libraries again:
IN_VAL_QFLAG = 28
EDIST_THM = 2
EDIST_PLW = 3
EDIST_DPL = 4
EDIST_TNT = 5
EDIST_KAP = 6
EDIST_PLP = 7
EDIST_PLG = 8
EDIST_TNP = 9
EDIST_TNG = 10
PADIST_ISO = 1
PADIST_ELC = 2
PADIST_GLC = 3
PADIST_GAU = 4
PADIST_SGA = 5
OUT_VAL_FREQ = 0
OUT_VAL_OINT = 1
OUT_VAL_ODAMP = 2
OUT_VAL_XINT = 3
OUT_VAL_XDAMP = 4
E0_MEV = cgs.me * cgs.c**2 * cgs.evpererg * 1e-6 # helpful: electron rest-mass-energy in MeV (~= 0.511)
class FK10Invoker(object):
"The lowest-level interface to the FK10 code."
def __init__(self, shlib_path):
self.shlib_path = shlib_path
self.shlib = ctypes.CDLL(self.shlib_path)
self.get_mw = self.shlib.GET_MW
self.get_mw.restype = ctypes.c_int
self.get_mw.argtypes = (ctypes.c_int, ctypes.POINTER(pointer_pair))
def __call__(self, in_values, out_values=None):
if not isinstance(in_values, np.ndarray):
raise ValueError('in_values must be an ndarray')
if not in_values.flags.c_contiguous:
raise ValueError('in_values must be C-contiguous')
if in_values.dtype != np.float32:
raise ValueError('in_values must have the C-float dtype')
if in_values.shape != (29,):
raise ValueError('in_values must have a shape of (29,)')
n_freqs = int(in_values[IN_VAL_NFREQ])
if out_values is None:
out_values = np.empty((n_freqs, 5), dtype=np.float32)
out_values.fill(np.nan)
else:
if not isinstance(out_values, np.ndarray):
raise ValueError('out_values must be an ndarray')
if not out_values.flags.c_contiguous:
raise ValueError('out_values must be C-contiguous')
if not out_values.flags.writeable:
raise ValueError('out_values must be writeable')
if out_values.dtype != np.float32:
raise ValueError('out_values must have the C-float dtype')
if out_values.shape != (n_freqs, 5):
raise ValueError('out_values must have a shape of ({},5), where the first '
'dimension comes from in_values'.format(n_freqs))
in_ptr = in_values.ctypes.data_as(ctypes.c_void_p)
out_ptr = out_values.ctypes.data_as(ctypes.c_void_p)
res = self.get_mw(2, pointer_pair(in_ptr, out_ptr))
if res != 0:
raise Exception('bad inputs to GET_MW function; return code was {}'.format(res))
return out_values
def make_in_vals_array():
return np.zeros(29, dtype=np.float32)
# Some diagnostics of the low-level code.
def do_figure9_calc_lowlevel(shlib_path, set_unused=True):
"""Reproduce the calculation used to produce Figure 9 of the Fleischman &
Kuznetsov (2010) paper, using our low-level interfaces.
Input parameters, etc., come from the file ``Flare071231a.pro`` that is
distributed with the paper’s Supplementary Data archive.
Invoke with something like::
from pwkit import fk10
arr = fk10.do_figure9_calc('path/to/libGS_Std_HomSrc_CEH.so.64')
"""
fk10func = FK10Invoker(shlib_path)
in_vals = make_in_vals_array()
in_vals[IN_VAL_AREA] = 1.33e18
in_vals[IN_VAL_DEPTH] = 6e8
in_vals[IN_VAL_T0] = 2.1e7
# EPSILON (these markers are to aid counting indices)
# KAPPA
in_vals[IN_VAL_INTEG_METH] = 16
in_vals[IN_VAL_EMIN] = 0.016
in_vals[IN_VAL_EMAX] = 4.0
# EBREAK
in_vals[IN_VAL_DELTA1] = 3.7
# DELTA2
in_vals[IN_VAL_N0] = 3e9
in_vals[IN_VAL_NB] = 5e9 / 3
in_vals[IN_VAL_B] = 48
in_vals[IN_VAL_THETA] = 50
in_vals[IN_VAL_FREQ0] = 5e8
in_vals[IN_VAL_LOGDFREQ] = 0.02
in_vals[IN_VAL_EDIST] = EDIST_PLW
in_vals[IN_VAL_NFREQ] = 100
in_vals[IN_VAL_PADIST] = PADIST_GLC
in_vals[IN_VAL_LCBDY] = 90
# BEAMDIR
in_vals[IN_VAL_DELTAMU] = 0.4
# A4
# (slot 24 unused)
in_vals[IN_VAL_FCCR] = 12
in_vals[IN_VAL_FWHCR] = in_vals[IN_VAL_FCCR]
in_vals[IN_VAL_RENORMFLAG] = 1
in_vals[IN_VAL_QFLAG] = 2
if set_unused:
# Sanity-checking: these parameters shouldn't affect the calculated
# result.
in_vals[IN_VAL_EPSILON] = 0.05
in_vals[IN_VAL_KAPPA] = 4.0
in_vals[IN_VAL_EBREAK] = 1.0
in_vals[IN_VAL_DELTA2] = 6.0
in_vals[IN_VAL_BEAMDIR] = 90
in_vals[IN_VAL_A4] = 1
return fk10func(in_vals)
def do_figure9_calc_highlevel(shlib_path):
"""Reproduce the calculation used to produce Figure 9 of the Fleischman &
Kuznetsov (2010) paper, using our high-level interfaces.
The meat is moved to a class method on Calculator since it's handy to have
a quick way to create an instance that has reasonable settings for all of
its parameters.
"""
return Calculator.new_for_fk10_fig9(shlib_path).compute_lowlevel()
def make_figure9_plot(shlib_path, use_lowlevel=True, **kwargs):
"""Reproduce Figure 9 of the Fleischman & Kuznetsov (2010) paper, using our
low-level interfaces. Uses OmegaPlot, of course.
Input parameters, etc., come from the file ``Flare071231a.pro`` that is
distributed with the paper’s Supplementary Data archive.
Invoke with something like::
from pwkit import fk10
fk10.make_figure9_plot('path/to/libGS_Std_HomSrc_CEH.so.64').show()
"""
import omega as om
if use_lowlevel:
out_vals = do_figure9_calc_lowlevel(shlib_path, **kwargs)
else:
out_vals = do_figure9_calc_highlevel(shlib_path, **kwargs)
freqs = out_vals[:,OUT_VAL_FREQ]
tot_ints = out_vals[:,OUT_VAL_OINT] + out_vals[:,OUT_VAL_XINT]
pos = (tot_ints > 0)
p = om.quickXY(freqs[pos], tot_ints[pos], 'Calculation', xlog=1, ylog=1)
nu_obs = np.array([1.0, 2.0, 3.75, 9.4, 17.0, 34.0])
int_obs = np.array([12.0, 43.0, 29.0, 6.3, 1.7, 0.5])
p.addXY(nu_obs, int_obs, 'Observations', lines=False)
p.defaultKeyOverlay.hAlign = 0.93
p.setBounds(0.5, 47, 0.1, 60)
p.setLabels('Emission frequency, GHz', 'Total intensity, sfu')
return p
# The high-level interface that someone might actually want to use.
class Calculator(object):
"""An interface to the FK10 synchrotron routines.
This class maintains state about the input parameters that can be passed
to the routines, and can invoke them for you.
**Constructor arguments**
*shlib_path*
The path to the compiled FK10 code, as described in the module-level
documentation.
Newly-constructed objects are initialized with::
self.set_hybrid_parameters(12, 12)
self.set_ignore_q_terms(False)
self.set_trapezoidal_integration(15)
"""
def __init__(self, shlib_path):
self.func = FK10Invoker(shlib_path)
self.in_vals = make_in_vals_array()
# Some desirable defaults:
self.set_hybrid_parameters(12, 12)
self.set_ignore_q_terms(False)
self.set_trapezoidal_integration(15)
# The "intensity" results out of the FK10 code just scale linearly
# with this parameter, so we can basically set it arbitrarily.
self.in_vals[IN_VAL_AREA] = 1.0
# The choice of this parameter actually affects the outputs from the
# code, because it annoyingly tries to do the RT integral for us.
# Fortunately find_rt_coefficients() can get something useful unless
# the input depth, or problem parameters, are really bonkers.
self.in_vals[IN_VAL_DEPTH] = 1e9
@classmethod
def new_for_fk10_fig9(cls, shlib_path):
"""Create a calculator initialized to reproduce Figure 9 from FK10.
This is mostly to provide a handy way to create a new
:class:`Calculator` instance that is initialized with reasonable
values for all of its parameters.
"""
inst = (cls(shlib_path)
.set_thermal_background(2.1e7, 3e9)
.set_bfield(48)
.set_edist_powerlaw(0.016, 4.0, 3.7, 5e9/3)
.set_freqs(100, 0.5, 50)
.set_hybrid_parameters(12, 12)
.set_ignore_q_terms(False)
.set_obs_angle(50 * np.pi / 180)
.set_padist_gaussian_loss_cone(0.5 * np.pi, 0.4)
.set_trapezoidal_integration(15))
# haven't yet figure out how to deal with this part:
inst.in_vals[0] = 1.33e18
inst.in_vals[1] = 6e8
return inst
def compute_lowlevel(self, **kwargs):
"""Return the raw array computed by the FK10 code
**Calling sequence**
``**kwargs``
Passed on to :meth:`FK10Invoker.__call__`; you can specify
a keyword *out_vals* to reuse an existing output array.
Returns
An array of shape ``(N, 5)``, where *N* has been specified
by your frequency configuration. Layout as described in the
main module documentation.
"""
return self.func(self.in_vals, **kwargs)
def set_bfield(self, B_G):
"""Set the strength of the local magnetic field.
**Call signature**
*B_G*
The magnetic field strength, in Gauss
Returns
*self* for convenience in chaining.
"""
if not (B_G > 0):
raise ValueError('must have B_G > 0; got %r' % (B_G,))
self.in_vals[IN_VAL_B] = B_G
return self
def set_bfield_for_s0(self, s0):
"""Set B to probe a certain harmonic number.
**Call signature**
*s0*
The harmonic number to probe at the lowest frequency
Returns
*self* for convenience in chaining.
This just proceeds from the relation ``nu = s nu_c = s e B / 2 pi m_e
c``. Since *s* and *nu* scale with each other, if multiple frequencies
are being probed, the harmonic numbers being probed will scale in the
same way.
"""
if not (s0 > 0):
raise ValueError('must have s0 > 0; got %r' % (s0,))
B0 = 2 * np.pi * cgs.me * cgs.c * self.in_vals[IN_VAL_FREQ0] / (cgs.e * s0)
self.in_vals[IN_VAL_B] = B0
return self
def set_edist_powerlaw(self, emin_mev, emax_mev, delta, ne_cc):
"""Set the energy distribution function to a power law.
**Call signature**
*emin_mev*
The minimum energy of the distribution, in MeV
*emax_mev*
The maximum energy of the distribution, in MeV
*delta*
The power-law index of the distribution
*ne_cc*
The number density of energetic electrons, in cm^-3.
Returns
*self* for convenience in chaining.
"""
if not (emin_mev >= 0):
raise ValueError('must have emin_mev >= 0; got %r' % (emin_mev,))
if not (emax_mev >= emin_mev):
raise ValueError('must have emax_mev >= emin_mev; got %r, %r' % (emax_mev, emin_mev))
if not (delta >= 0):
raise ValueError('must have delta >= 0; got %r, %r' % (delta,))
if not (ne_cc >= 0):
raise ValueError('must have ne_cc >= 0; got %r, %r' % (ne_cc,))
self.in_vals[IN_VAL_EDIST] = EDIST_PLW
self.in_vals[IN_VAL_EMIN] = emin_mev
self.in_vals[IN_VAL_EMAX] = emax_mev
self.in_vals[IN_VAL_DELTA1] = delta
self.in_vals[IN_VAL_NB] = ne_cc
return self
def set_edist_powerlaw_gamma(self, gmin, gmax, delta, ne_cc):
"""Set the energy distribution function to a power law in the Lorentz factor
**Call signature**
*gmin*
The minimum Lorentz factor of the distribution
*gmax*
The maximum Lorentz factor of the distribution
*delta*
The power-law index of the distribution
*ne_cc*
The number density of energetic electrons, in cm^-3.
Returns
*self* for convenience in chaining.
"""
if not (gmin >= 1):
raise ValueError('must have gmin >= 1; got %r' % (gmin,))
if not (gmax >= gmin):
raise ValueError('must have gmax >= gmin; got %r, %r' % (gmax, gmin))
if not (delta >= 0):
raise ValueError('must have delta >= 0; got %r, %r' % (delta,))
if not (ne_cc >= 0):
raise ValueError('must have ne_cc >= 0; got %r, %r' % (ne_cc,))
self.in_vals[IN_VAL_EDIST] = EDIST_PLG
self.in_vals[IN_VAL_EMIN] = (gmin - 1) * E0_MEV
self.in_vals[IN_VAL_EMAX] = (gmax - 1) * E0_MEV
self.in_vals[IN_VAL_DELTA1] = delta
self.in_vals[IN_VAL_NB] = ne_cc
return self
def set_freqs(self, n, f_lo_ghz, f_hi_ghz):
"""Set the frequency grid on which to perform the calculations.
**Call signature**
*n*
The number of frequency points to sample.
*f_lo_ghz*
The lowest frequency to sample, in GHz.
*f_hi_ghz*
The highest frequency to sample, in GHz.
Returns
*self* for convenience in chaining.
"""
if not (f_lo_ghz >= 0):
raise ValueError('must have f_lo_ghz >= 0; got %r' % (f_lo_ghz,))
if not (f_hi_ghz >= f_lo_ghz):
raise ValueError('must have f_hi_ghz >= f_lo_ghz; got %r, %r' % (f_hi_ghz, f_lo_ghz))
if not n >= 1:
raise ValueError('must have n >= 1; got %r' % (n,))
self.in_vals[IN_VAL_NFREQ] = n
self.in_vals[IN_VAL_FREQ0] = f_lo_ghz * 1e9 # GHz => Hz
self.in_vals[IN_VAL_LOGDFREQ] = np.log10(f_hi_ghz / f_lo_ghz) / n
return self
def set_hybrid_parameters(self, s_C, s_WH, do_renorm=True):
"""Set the hybrid/renormalization control parameters.
**Call signature**
*s_C*
The harmonic number above which the continuous approximation
is used (with special behavior; see below).
*s_WH*
The harmonic number above which the Wild-Hill BEssel function
approximations are used.
*do_renorm* (default True)
Whether to do any renormalization at all.
Returns
*self* for convenience in chaining.
FK10 uses frequency parameters f^C_cr and f^WH_cr to control some of
its optimizations. This function sets these parameters as multiples of
the electron cyclotron frequency (f_Be in FK10 notation): e.g.,
``f^C_cr = s_C * f_Be``.
At frequencies above f^C_cr, the "continuum" approximation is
introduced, replacing the "exact" sum with an integral. At frequencies
above f^WH_cr, the Wild-Hild approximations to the Bessel functions
are used. In both cases, the activation of the optimizations can
result in normalization shifts in the calculations. "Renormalization"
computes these shifts (by doing both kinds of calculations at the
transition frequencies) and attempts to correct them. (Some of the
FK10 documentation seems to refer to renormalization as
"R-optimization".)
If f^C_cr is below the lowest frequency integrated, all calculations
will be done in continuum mode. In this case, the sign of *s_C* sets
whether Wild-Hill renormalization is applied. If *s_C* is negative and
f^WH_cr is above the lowest frequency integration, renormalization is
done. Otherwise, it is not.
The documentation regarding f^WH_cr is confusing. It states that
f^WH_cr only matters if (1) s_WH < s_C or (2) s_C < 0 and f^WH_cr >
f_0. It is not obvious to me why s_WH > s_C should only matter if s_C
< 0, but that's what's implied.
In most examples in FK10, both of these parameters are set to 12.
"""
self.in_vals[IN_VAL_FCCR] = s_C
self.in_vals[IN_VAL_FWHCR] = s_WH
self.in_vals[IN_VAL_RENORMFLAG] = 1 if do_renorm else 0
return self
def set_obs_angle(self, theta_rad):
"""Set the observer angle relative to the field.
**Call signature**
*theta_rad*
The angle between the ray path and the local magnetic field,
in radians.
Returns
*self* for convenience in chaining.
"""
self.in_vals[IN_VAL_THETA] = theta_rad * 180 / np.pi # rad => deg
return self
def set_one_freq(self, f_ghz):
"""Set the code to calculate results at just one frequency.
**Call signature**
*f_ghz*
The frequency to sample, in GHz.
Returns
*self* for convenience in chaining.
"""
if not (f_ghz >= 0):
raise ValueError('must have f_lo_ghz >= 0; got %r' % (f_lo_ghz,))
self.in_vals[IN_VAL_NFREQ] = 1
self.in_vals[IN_VAL_FREQ0] = f_ghz * 1e9 # GHz -> Hz
self.in_vals[IN_VAL_LOGDFREQ] = 1.0
return self
def set_padist_gaussian_loss_cone(self, boundary_rad, expwidth):
"""Set the pitch-angle distribution to a Gaussian loss cone.
**Call signature**
*boundary_rad*
The angle inside which there are no losses, in radians.
*expwidth*
The characteristic width of the Gaussian loss profile
*in direction-cosine units*.
Returns
*self* for convenience in chaining.
See ``OnlineI.pdf`` in the Supplementary Data for a precise
definition. (And note the distinction between α_c and μ_c since not
everything is direction cosines.)
"""
self.in_vals[IN_VAL_PADIST] = PADIST_GLC
self.in_vals[IN_VAL_LCBDY] = boundary_rad * 180 / np.pi # rad => deg
self.in_vals[IN_VAL_DELTAMU] = expwidth
return self
def set_padist_isotropic(self):
"""Set the pitch-angle distribution to be isotropic.
**Returns**
*self* for convenience in chaining.
"""
self.in_vals[IN_VAL_PADIST] = PADIST_ISO
return self
def set_ignore_q_terms(self, ignore_q_terms):
"""Set whether "Q" terms are ignored.
**Call signature**
*ignore_q_terms*
If true, ignore "Q" terms in the integrations.
Returns
*self* for convenience in chaining.
See Section 4.3 of FK10 and ``OnlineII.pdf`` in the Supplementary Data
for a precise explanation. The default is to *not* ignore the terms.
"""
# Note that we are ignoring the magic 1 value that only applies to the
# "HomSrc_C" version of the library.
self.in_vals[IN_VAL_QFLAG] = 0 if ignore_q_terms else 2
return self
def set_thermal_background(self, T_K, nth_cc):
"""Set the properties of the background thermal plasma.
**Call signature**
*T_K*
The temperature of the background plasma, in Kelvin.
*nth_cc*
The number density of thermal electrons, in cm^-3.
Returns
*self* for convenience in chaining.
Note that the parameters set here are the same as the ones that
describe the thermal electron distribution, if you choose one of the
electron energy distributions that explicitly models a thermal
component ("thm", "tnt", "tnp", "tng", "kappa" in the code's
terminology). For the power-law-y electron distributions, these
parameters are used to calculate dispersion parameters (e.g.
refractive indices) and a free-free contribution, but their
synchrotron contribution is ignored.
"""
if not (T_K >= 0):
raise ValueError('must have T_K >= 0; got %r' % (T_K,))
if not (nth_cc >= 0):
raise ValueError('must have nth_cc >= 0; got %r, %r' % (nth_cc,))
self.in_vals[IN_VAL_T0] = T_K
self.in_vals[IN_VAL_N0] = nth_cc
return self
def set_trapezoidal_integration(self, n):
"""Set the code to use trapezoidal integration.
**Call signature**
*n*
Use this many nodes
Returns
*self* for convenience in chaining.
"""
if not (n >= 2):
raise ValueError('must have n >= 2; got %r' % (n,))
self.in_vals[IN_VAL_INTEG_METH] = n + 1
return self
def find_rt_coefficients(self, depth0=None):
"""Figure out emission and absorption coefficients for the current parameters.
**Argument**
*depth0* (default None)
A first guess to use for a good integration depth, in cm. If None,
the most recent value is used.
**Return value**
A tuple ``(j_O, alpha_O, j_X, alpha_X)``, where:
*j_O*
The O-mode emission coefficient, in erg/s/cm^3/Hz/sr.
*alpha_O*
The O-mode absorption coefficient, in cm^-1.
*j_X*
The X-mode emission coefficient, in erg/s/cm^3/Hz/sr.
*alpha_X*
The X-mode absorption coefficient, in cm^-1.
The main outputs of the FK10 code are intensities and "damping
factors" describing a radiative transfer integration of the emission
from a homogeneous source. But there are times when we'd rather just
know what the actual emission and absorption coefficients are. These
can be backed out from the FK10 outputs, but only if the "damping
factor" takes on an intermediate value not extremely close to either 0
or 1. Unfortunately, there's no way for us to know a priori what
choice of the "depth" parameter will yield a nice value for the
damping factor. This routine automatically figures one out, by
repeatedly running the calculation.
To keep things simple, this routine requires that you only be solving
for coefficients for one frequency at a time (e.g.,
:meth:`set_one_freq`).
"""
if self.in_vals[IN_VAL_NFREQ] != 1:
raise Exception('must have nfreq=1 to run Calculator.find_rt_coefficients()')
if depth0 is not None:
depth = depth0
self.in_vals[IN_VAL_DEPTH] = depth0
else:
depth = self.in_vals[IN_VAL_DEPTH]
scale_factor = 100
buf = np.empty((1, 5), dtype=np.float32)
def classify(damping_factor):
if damping_factor >= 0.99:
return 1
if damping_factor <= 0.01:
return -1
return 0
DONE, SHRINK, GROW, ABORT = 0, 1, 2, 3
actions = {
(-1, -1): SHRINK,
(-1, 0): SHRINK,
(-1, 1): ABORT,
( 0, -1): SHRINK,
( 0, 0): DONE,
( 0, 1): GROW,
( 1, -1): ABORT,
( 1, 0): GROW,
( 1, 1): GROW,
}
last_change = DONE # our first change will be treated as a change in direction
for attempt_number in range(20):
self.compute_lowlevel(out_values=buf)
co = classify(buf[0,OUT_VAL_ODAMP])
cx = classify(buf[0,OUT_VAL_XDAMP])
action = actions[co, cx]
###print('Z', attempt_number, self.in_vals[IN_VAL_DEPTH], last_change, buf, co, cx, action)
if action == DONE:
break
elif action == ABORT:
raise Exception('depths of X and O modes are seriously incompatible')
elif action == GROW:
if last_change != GROW:
scale_factor *= 0.3
depth *= scale_factor
last_change = GROW
elif action == SHRINK:
if last_change != SHRINK:
scale_factor *= 0.3
depth /= scale_factor
last_change = SHRINK
self.in_vals[IN_VAL_DEPTH] = depth
else:
# If we get here, we never explicitly quit the loop
raise Exception('depth-finding algorithm did not converge!')
# OK, we found some good depths! Now calculate the RT coefficients. I believe that
# I'm doing this right ...
sfu_to_specintens = 1e4 * cgs.cgsperjy * cgs.cmperau**2 / self.in_vals[IN_VAL_AREA]
damp_X = buf[0,OUT_VAL_XDAMP]
alpha_X = -np.log(damp_X) / depth
si_X = buf[0,OUT_VAL_XINT] * sfu_to_specintens
j_X = si_X * alpha_X / (1 - damp_X)
damp_O = buf[0,OUT_VAL_ODAMP]
alpha_O = -np.log(damp_O) / depth
si_O = buf[0,OUT_VAL_OINT] * sfu_to_specintens
j_O = si_O * alpha_O / (1 - damp_O)
return (j_O, alpha_O, j_X, alpha_X)
def find_rt_coefficients_tot_intens(self, depth0=None):
"""Figure out total-intensity emission and absorption coefficients for the
current parameters.
**Argument**
*depth0* (default None)
A first guess to use for a good integration depth, in cm. If None,
the most recent value is used.
**Return value**
A tuple ``(j_I, alpha_I)``, where:
*j_I*
The total intensity emission coefficient, in erg/s/cm^3/Hz/sr.
*alpha_I*
The total intensity absorption coefficient, in cm^-1.
See :meth:`find_rt_coefficients` for an explanation how this routine
works. This version merely postprocesses the results from that method
to convert the coefficients to refer to total intensity.
"""
j_O, alpha_O, j_X, alpha_X = self.find_rt_coefficients(depth0=depth0)
j_I = j_O + j_X
alpha_I = 0.5 * (alpha_O + alpha_X) # uhh... right?
return (j_I, alpha_I)
| pkgw/pwkit | pwkit/fk10.py | Python | mit | 28,406 | [
"Gaussian"
] | 8808590f3d494a0f853250c61578ac8bfa3b4d8bfb6a06dd926d5c975449ccb7 |
from core.datacontainer import DataContainer
from bio.neuron import Neuron
from PyQt5 import QtGui, QtWidgets, QtCore
class NeuronGUI(QtWidgets.QGroupBox):
def __init__(self, data_container):
if not isinstance(data_container, DataContainer):
raise TypeError("the data container has the wrong type")
super().__init__("NEURON")
# Register yourself as an observer
self.__data_container = data_container
self.__data_container.add_observer(self)
# CREATE THE GUI ELEMENTS
self.__name_label = QtWidgets.QLabel("name:")
self.__name_value = QtWidgets.QLabel("")
self.__threshold_label = QtWidgets.QLabel("threshold:")
self.__threshold_value = QtWidgets.QLabel("")
# ADD THE GUI ELEMENTS TO A LAYOUT
layout = QtWidgets.QGridLayout()
layout.addWidget(self.__name_label, 0, 0, 1, 1, QtCore.Qt.AlignLeft)
layout.addWidget(self.__name_value, 0, 1, 1, 1, QtCore.Qt.AlignLeft)
layout.addWidget(self.__threshold_label, 1, 0, 1, 1, QtCore.Qt.AlignLeft)
layout.addWidget(self.__threshold_value, 1, 1, 1, 1, QtCore.Qt.AlignLeft)
self.setLayout(layout)
self.hide()
def observable_changed(self, change, data):
# Decide what to do depending on the change
if change == DataContainer.change_is_new_selection:
self.__update(data)
def __update(self, data):
neurons = list()
# Collect the neuron(s)
for item in data:
if isinstance(item, Neuron):
neurons.append(item)
# We can deal with a single neuron
if len(neurons) != 1:
self.hide()
return
# Update the GUI elements
self.__name_value.setText(neurons[0].name)
self.__threshold_value.setText(str(round(neurons[0].threshold, 3)))
# Show this GUI element to the user
self.show()
| zibneuro/brainvispy | gui/neurongui.py | Python | bsd-3-clause | 1,786 | [
"NEURON"
] | 4077cb60d68571e7b9b79f3fbddaf4e2202bd41c9fbf442cb7c2c825e71897c6 |
from scipy import integrate
import numpy as np
print "Gaussian integral", np.sqrt(np.pi), integrate.quad(lambda x: np.exp(-x**2), -np.inf, np.inf)
| moonbury/notebooks | github/Numpy/Chapter10/gaussquad.py | Python | gpl-3.0 | 148 | [
"Gaussian"
] | 7463c870608a25b1c206f2d580f3f899b1e92acb212b6fa8612bb2e603f14ad3 |
import numpy as np
from pysisyphus.calculators.Calculator import Calculator
class LennardJones(Calculator):
# Corresponds to σ = 1 Å, as the default value in ASE, but
# pysisyphus uses au/Bohr.
def __init__(self, sigma=1.8897261251, epsilon=1, rc=None):
super().__init__()
self.sigma = sigma
self.epsilon = epsilon
# Cutoff distance
if rc is None:
rc = 3 * self.sigma
self.rc = rc
# Shift energy
self.e0 = (4 * self.epsilon *
((self.sigma/self.rc)**12 - (self.sigma/self.rc)**6)
)
def calculate(self, coords3d):
# Index pairs
a, b = np.triu_indices(len(coords3d), 1)
# Distances
diffs = coords3d[a] - coords3d[b] # Shape: (N_atoms, 3)
rs = np.linalg.norm(diffs, axis=1)
c6 = np.where(rs <= self.rc, (self.sigma/rs)**6, np.zeros_like(rs))
energy = -self.e0 * (c6 != 0.0).sum()
c12 = c6**2
energy += np.sum(4*self.epsilon * (c12 - c6))
"""
Gradient related remarks:
Lennard-Jones-potential:
LJ(r) =
4*ε*[(σ/r)**12 - (σ/r)**6]
Derivative of quotient appearing in LJ potential w.r.t first cartesian
coordinate:
d(σ/r)**n/dx_1
= σ**n * d/dx_1 r**(-n)
= σ**n * (-n/2) * (2x1-2x2) * r**(-n) * r**(-2)
= σ**n * r**(-n) * (-n) * (x1-x2) * r**(-2)
= (σ/r)**n * (-n) * (x1-x2) / r**2
Derivate w.r.t to cartesian x coordinate of atom A (x_1):
dLJ(r)/dx_1 =
24*ε*[-2*(σ/r)**12 + (σ/r)**6]*(x1-x2)/r**2
Derivate w.r.t to cartesian x coordinate of atom B (x_2):
dLJ(r)/dx_2 =
-24*ε*[-2*(σ/r)**12 + (σ/r)**6]*(x1-x2)/r**2
The derivate w.r.t to x_2 differs only by a factor of -1!
"""
prefactors = 24*self.epsilon * (c6 - 2*c12) / rs**2
products = prefactors[:,None] * diffs
gradient = np.zeros_like(coords3d)
# Every pair (a, b) contributes to the total gradient of atoms a and b.
for i, prod in enumerate(products):
gradient[a[i]] += prod
gradient[b[i]] -= prod
return energy, -gradient
def get_energy(self, atoms, coords):
energy, _ = self.calculate(coords.reshape(-1, 3))
return {"energy": energy}
def get_forces(self, atoms, coords):
energy, forces = self.calculate(coords.reshape(-1, 3))
return {"energy": energy,
"forces": forces.flatten(),
}
| eljost/pysisyphus | pysisyphus/calculators/LennardJones.py | Python | gpl-3.0 | 2,628 | [
"ASE"
] | 1fd5f05844f44d93aa71eda30fde894210a8d3afdf73cb472d670f653a51f8e0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of OpenDrift.
#
# OpenDrift is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2
#
# OpenDrift is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenDrift. If not, see <https://www.gnu.org/licenses/>.
#
# Copyright 2015, Knut-Frode Dagestad, MET Norway
import os
import unittest
from datetime import datetime, timedelta
import numpy as np
import xarray as xr
from opendrift.models.oceandrift import OceanDrift
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.readers import reader_ROMS_native
from opendrift.readers.interpolation import \
expand_numpy_array, \
ReaderBlock, LinearND2DInterpolator, \
NDImage2DInterpolator, Nearest2DInterpolator, \
Nearest1DInterpolator, Linear1DInterpolator
o = OceanDrift()
class TestInterpolation(unittest.TestCase):
"""Tests spatial interpolation"""
def test_dateline(self):
# Make synthetic netCDF file with currents from 0 to 360 deg longitude
fc = 'opendrift_test_current_0_360.nc'
lon = np.arange(0, 360)
lat = np.arange(-88, 89)
start_time = datetime(2021, 1, 1)
time = [start_time + i*timedelta(hours=24) for i in range(3)]
t, xcurr, ycurr = np.meshgrid(time, np.zeros(lat.shape), np.zeros(lon.shape), indexing='ij')
xcurr[:, :, 0:180] = 1 # eastward
xcurr[:, :, 180:] = -1 # westward, i.e. current divergence at lon=0
ds = xr.Dataset(
{"xcurr": (("time", "lat", "lon"), xcurr, {'standard_name': 'x_sea_water_velocity'}),
"ycurr": (("time", "lat", "lon"), ycurr, {'standard_name': 'y_sea_water_velocity'})},
coords={"lon": lon, "lat": lat, "time": time})
ds.to_netcdf(fc)
# Make synthetic netCDF file with winds from -180 to 180 deg longitude
fw = 'opendrift_test_winds_180_180.nc'
lon = np.arange(-180, 180)
t, xwind, ywind = np.meshgrid(time, np.zeros(lat.shape), np.zeros(lon.shape), indexing='ij')
ywind[:, :, 0:180] = 1 # northward
ywind[:, :, 180:] = -1 # southward, i.e. wind divergence at lon=180
ds = xr.Dataset(
{"xwind": (("time", "lat", "lon"), xwind, {'standard_name': 'x_wind'}),
"ywind": (("time", "lat", "lon"), ywind, {'standard_name': 'y_wind'})},
coords={"lon": lon, "lat": lat, "time": time})
ds.to_netcdf(fw)
# Make synthetic netCDF file with winds from 160 to 280 deg longitude (Pacific)
fw2 = 'opendrift_test_winds_160_280.nc'
lon = np.arange(160, 280)
t, xwind, ywind = np.meshgrid(time, np.zeros(lat.shape), np.zeros(lon.shape), indexing='ij')
ywind[:, :, 0:20] = -1 # southhward
ywind[:, :, 20:] = 1 # northward, i.e. wind divergence at lon=180
ds = xr.Dataset(
{"xwind": (("time", "lat", "lon"), xwind, {'standard_name': 'x_wind'}),
"ywind": (("time", "lat", "lon"), ywind, {'standard_name': 'y_wind'})},
coords={"lon": lon, "lat": lat, "time": time})
ds.to_netcdf(fw2)
reader_current = reader_netCDF_CF_generic.Reader(fc)
reader_wind = reader_netCDF_CF_generic.Reader(fw)
reader_wind2 = reader_netCDF_CF_generic.Reader(fw2)
lons = np.array([-175, 0, 175])
lats = np.array([60, 60, 60])
np.testing.assert_array_almost_equal(reader_wind.covers_positions(lons, lats)[0], [0, 1, 2], decimal=1)
np.testing.assert_array_almost_equal(reader_wind2.covers_positions(lons, lats)[0], [0, 2], decimal=1)
# Simulation across 0 meridian
o = OceanDrift(loglevel=30)
o.add_readers_from_list([fc, fw])
o.seed_elements(lon=[-2, 2], lat=[60, 60], time=start_time, wind_drift_factor=.1)
o.run(steps=2)
# Check that current give divergence, and that
# wind is northwards east of 0 and southwards to the east
np.testing.assert_array_almost_equal(o.elements.lon, [-2.129, 2.129], decimal=3)
np.testing.assert_array_almost_equal(o.elements.lat, [60.006, 59.994], decimal=3)
# Simulation across dateline (180 E/W)
o = OceanDrift(loglevel=30)
o.add_readers_from_list([fc, fw])
o.seed_elements(lon=[-175, 175], lat=[60, 60], time=start_time, wind_drift_factor=.1)
o.run(steps=2)
#o.plot(fast=True)
# Check that current give convergence, and that
# wind is northwards east of 180 and southwards to the west
np.testing.assert_array_almost_equal(o.elements.lon, [-175.129, 175.129], decimal=3)
np.testing.assert_array_almost_equal(o.elements.lat, [60.006, 59.994], decimal=3)
# Same as above, but with wind reader from 160 to 280 deg
o = OceanDrift(loglevel=30)
o.add_readers_from_list([fc, fw2])
o.seed_elements(lon=[-175, 175], lat=[60, 60], time=start_time, wind_drift_factor=.1)
o.run(steps=2)
#o.plot(fast=True)
# Check that current give convergence, and that
# wind is northwards east of 180 and southwards to the west
np.testing.assert_array_almost_equal(o.elements.lon, [-175.129, 175.129], decimal=3)
np.testing.assert_array_almost_equal(o.elements.lat, [60.006, 59.994], decimal=3)
# Cleaning up
os.remove(fw)
os.remove(fw2)
os.remove(fc)
def get_synthetic_data_dict(self):
data_dict = {}
data_dict['x'] = np.linspace(-70, 470, 200)
data_dict['y'] = np.linspace(10, 340, 100)
data_dict['z'] = np.array([-0, -3, -10, -25, -100])
# Make a horizontal slice
xg, yg = np.meshgrid(data_dict['x'], data_dict['y'])
slice1 = np.ma.array(np.cos(np.radians(xg)) +
np.sin(np.radians(yg)))
# Add some holes
slice1[0:40, 50:60] = np.nan
slice1[40:60, 100:120] = np.nan
slice1[20:22, 30:32] = np.nan
slice1 = np.ma.masked_invalid(slice1)
# Make another horizontal slice ("below") with more holes
slice2 = slice1*1.1
slice2[70:80, 20:28] = np.nan
# Add a 2D and a 3D variable to dictionary
data_dict['var2d'] = slice1
data_dict['var3d'] = np.ma.array([slice1, slice2, 1.2*slice1,
1*3*slice1, 10*slice1])
data_dict['time'] = datetime.now()
# Generate some points
x = np.linspace(data_dict['x'].min(), data_dict['x'].max(), 100)
y = np.linspace(data_dict['y'].min(), data_dict['y'].max(), 100)
z = np.linspace(data_dict['z'].min(), data_dict['z'].max(), 100)
return data_dict, x, y, z
def test_covers_positions(self):
data_dict, x, y, z = self.get_synthetic_data_dict()
# Make block from dictionary, and apply tests
b = ReaderBlock(data_dict)
xn = np.linspace(-70, 470, 100)
yn = np.linspace(10, 340, 100)
self.assertTrue(b.covers_positions(xn, yn))
xn = np.linspace(500, 600, 100)
yn = np.linspace(400, 500, 100)
self.assertFalse(b.covers_positions(xn, yn))
xn = np.linspace(400, 500, 100)
yn = np.linspace(0, 30, 100)
self.assertFalse(b.covers_positions(xn, yn))
def test_interpolation_horizontal(self):
data_dict, x, y, z = self.get_synthetic_data_dict()
# Make block from dictionary, and apply tests
b = ReaderBlock(data_dict, interpolation_horizontal='ndimage')
self.assertEqual(b.data_dict['var2d'].shape,
(len(b.y), len(b.x)))
self.assertEqual(b.data_dict['var3d'].shape,
(len(b.z), len(b.y), len(b.x)))
# Make some element positions
interpolator2d = b.Interpolator2DClass(b.x, b.y, x, y)
values = interpolator2d(data_dict['var2d'])
# Checking output is as expected
self.assertAlmostEqual(values[10], 1.6487979858538129)
self.assertEqual(sum(values.mask), 15)
def test_flipped(self):
x = np.arange(10).astype(np.float32)
y = np.arange(20).astype(np.float32)
X, Y = np.meshgrid(x, y)
d = X*5
b = ReaderBlock({'x': x, 'y': y, 'v': d, 'time': datetime.now()})
b_flipped = ReaderBlock({'x': np.flip(x), 'y': y, 'v': np.flip(d, axis=1), 'time': datetime.now()})
x0 = np.array([0, 7, 7.3, 7.41, 9]) # Some random points
y0 = np.array([5, 5, 8, 8.2, 5])
bi = b.Interpolator2DClass(b.x, b.y, x0, y0)
bi_flipped = b_flipped.Interpolator2DClass(b_flipped.x, b_flipped.y, x0, y0)
np.testing.assert_array_almost_equal(bi(d), bi_flipped(np.flip(d, axis=1)))
np.testing.assert_array_almost_equal(bi(d), 5*x0)
def test_interpolation_ensemble(self):
data_dict, x, y, z = self.get_synthetic_data_dict()
x = x[0:15]
y = y[0:15]
z = z[0:15]
data_dict['var2d'] = np.ones(data_dict['var2d'].shape)
data_dict['var3d'] = np.ones(data_dict['var3d'].shape)
data_dict['var2de'] = [data_dict['var2d']*1,
data_dict['var2d']*2,
data_dict['var2d']*3]
data_dict['var3de'] = [data_dict['var3d']*31,
data_dict['var3d']*32,
data_dict['var3d']*33]
b = ReaderBlock(data_dict)
interp = b.interpolate(x, y, z)[0] # 1 is profiles
v2 = interp['var2d']
v2e = interp['var2de']
v3 = interp['var3d']
v3e = interp['var3de']
self.assertEqual(v2[0], 1)
self.assertEqual(v2e[0], 1)
self.assertEqual(v2e[1], 2)
self.assertEqual(v2e[3], 1)
self.assertEqual(v3[0], 1)
self.assertEqual(v3e[0], 31)
self.assertEqual(v3e[1], 32)
self.assertAlmostEqual(v3e[3], 31)
def test_interpolation_vertical(self):
# 3 elements, 4 depths
zgrid = np.array([0, 1, 3, 10])
z = np.array([.5, 3, 9])
data = np.array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
interpolator = Nearest1DInterpolator(zgrid, z)
self.assertTrue(np.allclose(interpolator(data), [0, 2, 3]))
interpolator = Linear1DInterpolator(zgrid, z)
self.assertTrue(np.allclose(interpolator(data),
[0.5, 2, 2.85714286]))
# And with exatrapolation (~to surface and bottom)
zgrid = np.array([1, 3, 5, 10])
z = np.array([.5, 6, 12])
interpolator = Nearest1DInterpolator(zgrid, z)
self.assertTrue(np.allclose(interpolator(data), [0, 2, 3]))
interpolator = Linear1DInterpolator(zgrid, z)
self.assertTrue(np.allclose(interpolator(data),
[0.0, 2.2, 3]))
def test_compare_interpolators(self):
data_dict, x, y, z = self.get_synthetic_data_dict()
arr = data_dict['var2d']
# Make block from dictionary, and apply tests
linearData = LinearND2DInterpolator(data_dict['x'], data_dict['y'],
x, y)(data_dict['var2d'])
nearestData = Nearest2DInterpolator(data_dict['x'], data_dict['y'],
x, y)(data_dict['var2d'])
ndimageData = NDImage2DInterpolator(data_dict['x'], data_dict['y'],
x, y)(data_dict['var2d'])
# Check that all interpolator give nearly equal values
# for a given position
i = 10
self.assertAlmostEqual(linearData[i], nearestData[i], places=2)
self.assertAlmostEqual(linearData[i], ndimageData[i], places=2)
def test_interpolation_3dArrays(self):
"""Test interpolation."""
reader = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
# 100000 points within 50x50 pixels over sea (corner of domain)
num_points = 1000
np.random.seed(0) # To get the same random numbers each time
x = np.random.uniform(reader.xmin, reader.xmin+800*50, num_points)
y = np.random.uniform(reader.ymax-800*50, reader.ymax, num_points)
z = np.random.uniform(-200, 0, num_points)
variables = ['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature']
# Read a block of data covering the points
data = reader.get_variables(variables, time=reader.start_time,
x=x, y=y, z=z)
b = ReaderBlock(data, interpolation_horizontal='nearest')
env, prof = b.interpolate(x, y, z, variables,
profiles=['sea_water_temperature'],
profiles_depth=[-30, 0])
self.assertAlmostEqual(env['x_sea_water_velocity'][100],
0.075019, 3)
self.assertAlmostEqual(prof['sea_water_temperature'][0,11],
7.549999, 3)
self.assertAlmostEqual(prof['sea_water_temperature'][-1,11],
8.389999, 3)
self.assertEqual(prof['z'][-1], b.z[-1])
def test_zNone(self):
d = {}
d['x'] = np.arange(5)
d['y'] = np.arange(7)
d['z'] = 0
d['time'] = None
d['var'] = np.random.rand(5,6)
rb = ReaderBlock(d)
z = None
i,p = rb.interpolate(np.array([1, 2]), np.array([2, 3]), z, 'var')
self.assertTrue(i['var'][0] > 0)
def test_repeated(self):
"""Check that block can be used for interpolation to several sets of positions"""
reader = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'14Jan2016_NorKyst_z_3d/NorKyst-800m_ZDEPTHS_his_00_3Dsubset.nc')
# 100 points within 50x50 pixels over sea (corner of domain)
num_points = 100
np.random.seed(0) # To get the same random numbers each time
x = np.random.uniform(reader.xmin, reader.xmin+800*50, num_points)
y = np.random.uniform(reader.ymax-800*50, reader.ymax, num_points)
z = np.random.uniform(-200, 0, num_points)
variables = ['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature']
# Read a block of data covering the points
data = reader.get_variables(variables, time=reader.start_time,
x=x, y=y, z=z)
b = ReaderBlock(data, interpolation_horizontal='nearest')
env, prof = b.interpolate(x, y, z, 'sea_water_temperature')
x2 = x[20:30]
y2 = y[20:30]
z2 = z[20:30]
env2, prof2 = b.interpolate(x2, y2, z2, 'sea_water_temperature')
env3, prof3 = b.interpolate(x, y, z, 'sea_water_temperature')
self.assertEqual(env['sea_water_temperature'][0],
env3['sea_water_temperature'][0])
self.assertEqual(env['sea_water_temperature'][20],
env2['sea_water_temperature'][0])
def test_interpolation_missing(self):
"""Test interpolation."""
reader = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
num_points = 50
np.random.seed(0) # To get the same random numbers each time
lons = np.random.uniform(10, 11, num_points)
lats = np.random.uniform(66, 67.0, num_points)
z = np.random.uniform(-200, 0, num_points)
x, y = reader.lonlat2xy(lons, lats)
variables = ['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_water_temperature']
# Read a block of data covering the points
data = reader.get_variables(variables, time=reader.start_time,
x=x, y=y, z=z)
# Introduce missing values
data['x_sea_water_velocity'] = np.ma.masked_where(
data['x_sea_water_velocity']>.08,
data['x_sea_water_velocity'])
b = ReaderBlock(data, interpolation_horizontal='linearND')
env, prof = b.interpolate(x, y, z, variables,
profiles=['x_sea_water_velocity'],
profiles_depth=[-30, 0])
self.assertAlmostEqual(env['x_sea_water_velocity'][10],
0.074, 2)
self.assertAlmostEqual(prof['x_sea_water_velocity'][5,48],
-0.090, 2)
def test_linearNDFast(self):
"""Test interpolation."""
reader = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
reader.buffer=3
num_points = 50
np.random.seed(0) # To get the same random numbers each time
lons = np.random.uniform(14, 15, num_points)
lats = np.random.uniform(68, 68.4, num_points)
z = np.random.uniform(-20, 0, num_points)
x, y = reader.lonlat2xy(lons, lats)
#z=None
variables = ['x_sea_water_velocity']
# Read a block of data covering the points
data = reader.get_variables(variables, time=reader.start_time,
x=x, y=y, z=z)
b = ReaderBlock(data.copy(),
interpolation_horizontal='linearNDFast')
x2 = np.random.uniform(x.min(), x.max(), num_points)
y2 = np.random.uniform(y.min(), y.max(), num_points)
z2 = np.random.uniform(-20, 0, num_points)
self.assertTrue(b.covers_positions(x, y, z))
self.assertTrue(b.covers_positions(x2, y2, z2))
# Check that there are holes in the arrays of the ReaderBlock
self.assertEqual(
np.sum(~np.isfinite(b.data_dict['x_sea_water_velocity'])), 1001)
# Check that LinearNDFast interpolation gives a real value
env, prof = b.interpolate(x2, y2, z2,
variables,
profiles=variables,
profiles_depth=[-30, 0])
self.assertEqual(
np.sum(~np.isfinite(env['x_sea_water_velocity'])), 0)
# Check that the arrays of the ReaderBlock have been filled in
self.assertEqual(
np.sum(~np.isfinite(b.data_dict['x_sea_water_velocity'])), 0)
# Check that nearest interpolation contains some NaN values
b2 = ReaderBlock(data.copy(), interpolation_horizontal='nearest')
env, prof = b2.interpolate(x2, y2, z2,
variables,
profiles=variables,
profiles_depth=[-30, 0])
self.assertEqual(
np.sum(~np.isfinite(env['x_sea_water_velocity'])), 31)
def test_expand_array(self):
reader = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
reader.buffer=1
num_points = 50
np.random.seed(0) # To get the same random numbers each time
lons = np.random.uniform(14, 15, num_points)
lats = np.random.uniform(68, 68.4, num_points)
x, y = reader.lonlat2xy(lons, lats)
variables = ['x_sea_water_velocity']
# Read a block of data covering the points
data = reader.get_variables(variables, time=reader.start_time,
x=x, y=y, z=0)
data = np.ma.filled(data['x_sea_water_velocity'],
fill_value=np.nan)
self.assertTrue(np.isnan(data.max()))
self.assertEqual(sum(~np.isfinite(data.ravel())), 80)
expand_numpy_array(data)
self.assertEqual(sum(~np.isfinite(data.ravel())), 40)
expand_numpy_array(data)
self.assertEqual(sum(~np.isfinite(data.ravel())), 9)
expand_numpy_array(data)
self.assertEqual(sum(~np.isfinite(data.ravel())), 0)
self.assertFalse(np.isnan(data.max()))
if __name__ == '__main__':
unittest.main()
| OpenDrift/opendrift | tests/readers/test_interpolation.py | Python | gpl-2.0 | 20,549 | [
"NetCDF"
] | 2d543f719f2192c4efc023a3e7dd260bc2f7f71e16d9fa9b3a5d549b3e98802d |
########################################################################
# File : InProcessComputingElement.py
# Author : Stuart Paterson
########################################################################
""" The simplest Computing Element instance that submits jobs locally.
This is also the standard "CE" invoked from the JobAgent
"""
__RCSID__ = "$Id$"
import os
import stat
from DIRAC import S_OK, S_ERROR
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
class InProcessComputingElement( ComputingElement ):
#############################################################################
def __init__( self, ceUniqueID ):
""" Standard constructor.
"""
ComputingElement.__init__( self, ceUniqueID )
self.submittedJobs = 0
#############################################################################
def _addCEConfigDefaults( self ):
""" Method to make sure all necessary Configuration Parameters are defined
"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults( self )
# Now InProcess specific ones
#############################################################################
def submitJob( self, executableFile, proxy, **kwargs ):
""" Method to submit job (overriding base method).
:param executableFile: file to execute via systemCall. Normally the JobWrapperTemplate when invoked by the JobAgent.
:type executableFile: string
:param proxy: the proxy used for running the job (the payload). It will be dumped to a file.
:type proxy: string
"""
ret = getProxyInfo()
if not ret['OK']:
pilotProxy = None
else:
pilotProxy = ret['Value']['path']
self.log.notice( 'Pilot Proxy:', pilotProxy )
payloadEnv = dict( os.environ )
payloadProxy = ''
renewTask = None
if proxy:
self.log.verbose( 'Setting up proxy for payload' )
result = self.writeProxyToFile( proxy )
if not result['OK']:
return result
payloadProxy = result['Value'] # proxy file location
# pilotProxy = os.environ['X509_USER_PROXY']
payloadEnv[ 'X509_USER_PROXY' ] = payloadProxy
self.log.verbose( 'Starting process for monitoring payload proxy' )
result = gThreadScheduler.addPeriodicTask( self.proxyCheckPeriod, self.monitorProxy,
taskArgs = ( pilotProxy, payloadProxy ),
executions = 0, elapsedTime = 0 )
if result[ 'OK' ]:
renewTask = result[ 'Value' ]
if not os.access( executableFile, 5 ):
os.chmod( executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH )
cmd = os.path.abspath( executableFile )
self.log.verbose( 'CE submission command: %s' % ( cmd ) )
result = systemCall( 0, cmd, callbackFunction = self.sendOutput, env = payloadEnv )
if payloadProxy:
os.unlink( payloadProxy )
if renewTask:
gThreadScheduler.removeTask( renewTask )
ret = S_OK()
if not result['OK']:
self.log.error( 'Fail to run InProcess', result['Message'] )
elif result['Value'][0] > 128:
# negative exit values are returned as 256 - exit
self.log.warn( 'InProcess Job Execution Failed' )
self.log.info( 'Exit status:', result['Value'][0] - 256 )
if result['Value'][0] - 256 == -2:
error = 'JobWrapper initialization error'
elif result['Value'][0] - 256 == -1:
error = 'JobWrapper execution error'
else:
error = 'InProcess Job Execution Failed'
res = S_ERROR( error )
res['Value'] = result['Value'][0] - 256
return res
elif result['Value'][0] > 0:
self.log.warn( 'Fail in payload execution' )
self.log.info( 'Exit status:', result['Value'][0] )
ret['PayloadFailed'] = result['Value'][0]
else:
self.log.debug( 'InProcess CE result OK' )
self.submittedJobs += 1
return ret
#############################################################################
def getCEStatus( self ):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = 0
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
return result
#############################################################################
def monitorProxy( self, pilotProxy, payloadProxy ):
""" Monitor the payload proxy and renew as necessary.
"""
return self._monitorProxy( pilotProxy, payloadProxy )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| Andrew-McNab-UK/DIRAC | Resources/Computing/InProcessComputingElement.py | Python | gpl-3.0 | 4,942 | [
"DIRAC"
] | 84fe396d7357ce5fe35a7fb69a52185b2887d02f5c224304a6e7fcfefc53bf82 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
from spack import *
class RBiobase(RPackage):
"""Functions that are needed by many other packages
or which replace R functions."""
homepage = "https://www.bioconductor.org/packages/Biobase/"
url = "https://git.bioconductor.org/packages/Biobase"
list_url = homepage
version('2.38.0', git='https://git.bioconductor.org/packages/Biobase', commit='83f89829e0278ac014b0bc6664e621ac147ba424')
version('2.36.2', git='https://git.bioconductor.org/packages/Biobase', commit='15f50912f3fa08ccb15c33b7baebe6b8a59ce075')
depends_on('r-biocgenerics@0.16.1:', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@2.36.2:')
| EmreAtes/spack | var/spack/repos/builtin/packages/r-biobase/package.py | Python | lgpl-2.1 | 1,894 | [
"Bioconductor"
] | 43eae646b0fed80c6531cbb930d4027834b5fefa5409cbc2f986d7add2820fcc |
import numpy as np
import FittingUtilities
import HelperFunctions
import matplotlib.pyplot as plt
import sys
import os
from astropy import units
import DataStructures
from scipy.interpolate import InterpolatedUnivariateSpline as interp
from scipy.interpolate import UnivariateSpline as smooth
import MakeModel
import HelperFunctions
from collections import Counter
from sklearn.gaussian_process import GaussianProcess
from sklearn import cross_validation
from scipy.stats import gmean
from astropy.io import fits, ascii
def SmoothData(order, windowsize=91, smoothorder=5, lowreject=3, highreject=3, numiters=10, expand=0, normalize=True):
denoised = HelperFunctions.Denoise(order.copy())
denoised.y = FittingUtilities.Iterative_SV(denoised.y, windowsize, smoothorder, lowreject=lowreject, highreject=highreject, numiters=numiters, expand=expand)
if normalize:
denoised.y /= denoised.y.max()
return denoised
def roundodd(num):
rounded = round(num)
if rounded%2 != 0:
return rounded
else:
if rounded > num:
return rounded - 1
else:
return rounded + 1
def cost(data, prediction, scale = 1, dx=1):
retval = np.sum((prediction - data)**2/scale**2)/float(prediction.size)
#retval = gmean(data/prediction) / np.mean(data/prediction)
#idx = np.argmax(abs(data - prediction))
#std = np.std(data - prediction)
#retval = abs(data[idx] - prediction[idx]) / std
#retval = np.std(data/(prediction/prediction.sum())) / scale
#retval = np.std(data - prediction)/np.mean(scale)
return retval# + 1e-10*np.mean(np.gradient(np.gradient(prediction, dx), dx)**2)
def OptimalSmooth(order, normalize=True):
"""
Determine the best window size with cross-validation
"""
#Flatten the spectrum
order.y /= order.cont/order.cont.mean()
order.err /= order.cont/order.cont.mean()
#Remove outliers (telluric residuals)
smoothed = SmoothData(order, windowsize=41, normalize=False)
temp = smoothed.copy()
temp.y = order.y/smoothed.y
temp.cont = FittingUtilities.Continuum(temp.x, temp.y, lowreject=2, highreject=2, fitorder=3)
outliers = HelperFunctions.FindOutliers(temp, numsiglow=6, numsighigh=6, expand=10)
data = order.copy()
if len(outliers) > 0:
#order.y[outliers] = order.cont[outliers]
order.y[outliers] = smoothed.y[outliers]
order.err[outliers] = 9e9
#Make cross-validation sets
inp = np.transpose((order.x, order.err, order.cont))
X_train, X_test, y_train, y_test = cross_validation.train_test_split(inp, order.y, test_size=0.2)
X_train = X_train.transpose()
X_test = X_test.transpose()
sorter_train = np.argsort(X_train[0])
sorter_test = np.argsort(X_test[0])
training = DataStructures.xypoint(x=X_train[0][sorter_train], y=y_train[sorter_train], err=X_train[1][sorter_train], cont=X_train[2][sorter_train])
validation = DataStructures.xypoint(x=X_test[0][sorter_test], y=y_test[sorter_test], err=X_test[1][sorter_test], cont=X_test[2][sorter_test])
"""
#Try each smoothing parameter
s_array = np.logspace(-3, 1, 100)
chisq = []
for s in s_array:
fcn = smooth(training.x, training.y, w=1.0/training.err, s=s)
prediction = fcn(validation.x)
chisq.append(cost(validation.y, prediction, validation.err))
print s, chisq[-1]
idx = np.argmin(np.array(chisq) - 1.0)
s = s_array[idx]
"""
s = 0.9*order.size()
smoothed = order.copy()
fcn = smooth(smoothed.x, smoothed.y, w=1.0/smoothed.err, s=s)
smoothed.y = fcn(smoothed.x)
plt.plot(order.x, order.y)
plt.plot(smoothed.x, smoothed.y)
plt.show()
return smoothed, s
def CrossValidation(order, smoothorder=5, lowreject=3, highreject=3, numiters=10, normalize=True):
"""
Determine the best window size with cross-validation
"""
#order = HelperFunctions.Denoise(order.copy())
order.y /= order.cont/order.cont.mean()
#plt.plot(order.x, order.y)
# First, find outliers by doing a guess smooth
smoothed = SmoothData(order, windowsize=41, normalize=False)
temp = smoothed.copy()
temp.y = order.y/smoothed.y
temp.cont = FittingUtilities.Continuum(temp.x, temp.y, lowreject=2, highreject=2, fitorder=3)
outliers = HelperFunctions.FindOutliers(temp, numsiglow=6, numsighigh=6, expand=10)
data = order.copy()
if len(outliers) > 0:
#order.y[outliers] = order.cont[outliers]
order.y[outliers] = smoothed.y[outliers]
#plt.plot(order.x, order.y)
#plt.plot(order.x, order.cont)
#plt.show()
#plt.plot(order.x, order.y)
#plt.plot(denoised.x, denoised.y)
#plt.show()
# First, split the data into a training sample and validation sample
# Use every 10th point for the validation sample
cv_indices = range(6, order.size()-1, 6)
training = DataStructures.xypoint(size=order.size()-len(cv_indices))
validation = DataStructures.xypoint(size=len(cv_indices))
cv_idx = 0
tr_idx = 0
for i in range(order.size()):
if i in cv_indices:
validation.x[cv_idx] = order.x[i]
validation.y[cv_idx] = order.y[i]
validation.cont[cv_idx] = order.cont[i]
validation.err[cv_idx] = order.err[i]
cv_idx += 1
else:
training.x[tr_idx] = order.x[i]
training.y[tr_idx] = order.y[i]
training.cont[tr_idx] = order.cont[i]
training.err[tr_idx] = order.err[i]
tr_idx += 1
#Rebin the training set to constant wavelength spacing
xgrid = np.linspace(training.x[0], training.x[-1], training.size())
training = FittingUtilities.RebinData(training, xgrid)
dx = training.x[1] - training.x[0]
size = 40
left = xgrid.size/2 - size
right = left + size*2
func = np.poly1d(np.polyfit(training.x[left:right]-training.x[left+size], training.y[left:right], 5))
sig = np.std(training.y[left:right] - func(training.x[left:right]-training.x[left+size]))
sig = validation.err*0.8
#print "New std = ", sig
#plt.figure(3)
#plt.plot(training.x[left:right], training.y[left:right])
#plt.plot(training.x[left:right], func(training.x[left:right]))
#plt.show()
#plt.figure(1)
#Find the rough location of the best window size
windowsizes = np.logspace(-1.3, 0.5, num=20)
chisq = []
skip = 0
for i, windowsize in enumerate(windowsizes):
npixels = roundodd(windowsize/dx)
if npixels < 6:
skip += 1
continue
if npixels > training.size:
windowsizes = windowsizes[:i]
break
smoothed = FittingUtilities.Iterative_SV(training.y.copy(), npixels, smoothorder, lowreject, highreject, numiters)
smooth_fcn = interp(training.x, smoothed)
predict = smooth_fcn(validation.x)
#sig = validation.err
#chisq.append(cost(training.y, smoothed, training.err))
chisq.append(cost(validation.y, predict, sig, validation.x[1] - validation.x[0]))
#chisq.append(np.sum((predict - validation.y)**2/sig**2)/float(predict.size))
#sig = np.std(smoothed / training.y)
#chisq.append(np.std(predict/validation.y) / sig)
print "\t", windowsize, chisq[-1]
#plt.loglog(windowsizes, chisq)
#plt.show()
windowsizes = windowsizes[skip:]
chisq = np.array(chisq)
idx = np.argmin(abs(chisq-1.0))
sorter = np.argsort(chisq)
chisq = chisq[sorter]
windowsizes = windowsizes[sorter]
left, right = HelperFunctions.GetSurrounding(chisq, 1, return_index=True)
if left > right:
temp = left
left = right
right = temp
print windowsizes[left], windowsizes[right]
#Refine the window size to get more accurate
windowsizes = np.logspace(np.log10(windowsizes[left]), np.log10(windowsizes[right]), num=10)
chisq = []
for i, windowsize in enumerate(windowsizes):
npixels = roundodd(windowsize/dx)
if npixels > training.size:
windowsizes = windowsizes[:i]
break
smoothed = FittingUtilities.Iterative_SV(training.y.copy(), npixels, smoothorder, lowreject, highreject, numiters)
smooth_fcn = interp(training.x, smoothed)
predict = smooth_fcn(validation.x)
#sig = validation.err
#chisq.append(cost(training.y, smoothed, training.err))
chisq.append(cost(validation.y, predict, sig, validation.x[1] - validation.x[0]))
#chisq.append(np.sum((predict - validation.y)**2/sig**2)/float(predict.size))
#sig = np.std(smoothed / training.y)
#chisq.append(np.std(predict/validation.y) / sig)
print "\t", windowsize, chisq[-1]
chisq = np.array(chisq)
idx = np.argmin(abs(chisq-1.0))
windowsize = windowsizes[idx]
npixels = roundodd(windowsize/dx)
smoothed = order.copy()
smoothed.y = FittingUtilities.Iterative_SV(order.y, npixels, smoothorder, lowreject, highreject, numiters)
#plt.plot(data.x, data.y)
#plt.plot(smoothed.x, smoothed.y)
#plt.show()
if normalize:
smoothed.y /= smoothed.y.max()
return smoothed, windowsize
def GPSmooth(data, low=0.1, high=10, debug=False):
"""
This will smooth the data using Gaussian processes. It will find the best
smoothing parameter via cross-validation to be between the low and high.
The low and high keywords are reasonable bounds for A and B stars with
vsini > 100 km/s.
"""
smoothed = data.copy()
# First, find outliers by doing a guess smooth
smoothed = SmoothData(data, normalize=False)
temp = smoothed.copy()
temp.y = data.y/smoothed.y
temp.cont = FittingUtilities.Continuum(temp.x, temp.y, lowreject=2, highreject=2, fitorder=3)
outliers = HelperFunctions.FindOutliers(temp, numsiglow=3, expand=5)
if len(outliers) > 0:
data.y[outliers] = smoothed.y[outliers]
gp = GaussianProcess(corr='squared_exponential',
theta0 = np.sqrt(low*high),
thetaL = low,
thetaU = high,
normalize = False,
nugget = (data.err / data.y)**2,
random_start=1)
try:
gp.fit(data.x[:,None], data.y)
except ValueError:
#On some orders with large telluric residuals, this will fail.
# Just fall back to the old smoothing method in that case.
return SmoothData(data), 91
if debug:
print "\tSmoothing parameter theta = ", gp.theta_
smoothed.y, smoothed.err = gp.predict(data.x[:,None], eval_MSE=True)
return smoothed, gp.theta_[0][0]
if __name__ == "__main__":
fileList = []
plot = False
vsini_file = "%s/School/Research/Useful_Datafiles/Vsini.csv" %(os.environ["HOME"])
vsini_skip = 10
vsini_idx = 1
for arg in sys.argv[1:]:
if "-p" in arg:
plot = True
elif "-vsinifile" in arg:
vsini_file = arg.split("=")[-1]
elif "-vsiniskip" in arg:
vsini_skip = int(arg.split("=")[-1])
elif "-vsiniidx" in arg:
vsini_idx = int(arg.split("=")[-1])
else:
fileList.append(arg)
#Read in the vsini table
vsini_data = ascii.read(vsini_file)[vsini_skip:]
if len(fileList) == 0:
fileList = [f for f in os.listdir("./") if f.endswith("telluric_corrected.fits")]
for fname in fileList:
orders = HelperFunctions.ReadFits(fname, extensions=True, x="wavelength", y="flux", cont="continuum", errors="error")
#Find the vsini of this star
header = fits.getheader(fname)
starname = header["object"]
for data in vsini_data:
if data[0] == starname:
vsini = abs(float(data[vsini_idx]))
break
else:
sys.exit("Cannot find %s in the vsini data: %s" %(starname, vsini_file))
print starname, vsini
#Begin looping over the orders
column_list = []
header_list = []
for i, order in enumerate(orders):
print "Smoothing order %i/%i" %(i+1, len(orders))
#Fix errors
order.err[order.err > 1e8] = np.sqrt(order.y[order.err > 1e8])
#Linearize
xgrid = np.linspace(order.x[0], order.x[-1], order.x.size)
order = FittingUtilities.RebinData(order, xgrid)
dx = order.x[1] - order.x[0]
smooth_factor = 0.8
theta = max(21, roundodd(vsini/3e5 * order.x.mean()/dx * smooth_factor))
denoised = SmoothData(order,
windowsize=theta,
smoothorder=3,
lowreject=3,
highreject=3,
expand=10,
numiters=10)
#denoised, theta = GPSmooth(order.copy())
#denoised, theta = CrossValidation(order.copy(), 5, 2, 2, 10)
#denoised, theta = OptimalSmooth(order.copy())
#denoised.y *= order.cont/order.cont.mean()
print "Window size = %.4f nm" %theta
column = {"wavelength": denoised.x,
"flux": order.y / denoised.y,
"continuum": denoised.cont,
"error": denoised.err}
header_list.append((("Smoother", theta, "Smoothing Parameter"),))
column_list.append(column)
if plot:
plt.figure(1)
plt.plot(order.x, order.y/order.y.mean())
plt.plot(denoised.x, denoised.y/denoised.y.mean())
plt.title(starname)
plt.figure(2)
plt.plot(order.x, order.y/denoised.y)
plt.title(starname)
#plt.plot(order.x, (order.y-denoised.y)/np.median(order.y))
#plt.show()
if plot:
plt.show()
outfilename = "%s_smoothed.fits" %(fname.split(".fits")[0])
print "Outputting to %s" %outfilename
HelperFunctions.OutputFitsFileExtensions(column_list, fname, outfilename, mode='new', headers_info=header_list)
| kgullikson88/Chiron-Scripts | Smooth.py | Python | gpl-3.0 | 13,289 | [
"Gaussian"
] | 3ec891169805017832c3067f59892179a3bacdb0bd4f9d829820b5c6411c0c19 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**************************************
**espressopp.analysis.OrderParameter**
**************************************
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.AnalysisBase import *
from _espressopp import analysis_OrderParameter
class OrderParameterLocal(AnalysisBaseLocal, analysis_OrderParameter):
'The (local) compute of temperature.'
def __init__(self, system, cutoff, angular_momentum=6,
do_cluster_analysis=False, include_surface_particles=False,
ql_low=-1.0, ql_high=1.0):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_OrderParameter, system, cutoff, angular_momentum,
do_cluster_analysis, include_surface_particles,
ql_low, ql_high)
if pmi.isController :
class OrderParameter(AnalysisBase):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.OrderParameterLocal',
pmiproperty = [ 'cutoff', 'l' ]
)
.. function:: espressopp.analysis.OrderParameter(system, cutoff, angular_momentum, do_cluster_analysis, include_surface_particles, ql_low, ql_high)
:param system:
:param cutoff:
:param angular_momentum: (default: 6)
:param do_cluster_analysis: (default: False)
:param include_surface_particles: (default: False)
:param ql_low: (default: -1.0)
:param ql_high: (default: 1.0)
:type system:
:type cutoff:
:type angular_momentum: int
:type do_cluster_analysis:
:type include_surface_particles:
:type ql_low:
:type ql_high: real
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.AnalysisBase import *
from _espressopp import analysis_OrderParameter
class OrderParameterLocal(AnalysisBaseLocal, analysis_OrderParameter):
def __init__(self, system, cutoff, angular_momentum=6,
do_cluster_analysis=False, include_surface_particles=False,
ql_low=-1.0, ql_high=1.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
#print "coupled cluster analysis is currently broken"
cxxinit(self, analysis_OrderParameter, system, cutoff, angular_momentum,
do_cluster_analysis, include_surface_particles,
ql_low, ql_high)
if pmi.isController :
class OrderParameter(AnalysisBase):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.OrderParameterLocal'
)
| junghans/espressopp | src/analysis/OrderParameter.py | Python | gpl-3.0 | 3,528 | [
"ESPResSo"
] | 8d8757c8d17d6b4778665010b5a9f41cf52afab8c8d6128f5a69f06729fca612 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import glance_store
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
import webob
from glance.api import policy
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.gateway
from glance import i18n
import glance.notifier
import glance.schema
LOG = logging.getLogger(__name__)
_ = i18n._
class ImageMembersController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance_store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)
def _lookup_image(self, req, image_id):
image_repo = self.gateway.get_repo(req.context)
try:
return image_repo.get(image_id)
except (exception.NotFound):
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("You are not authorized to lookup image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
@staticmethod
def _get_member_repo(image):
try:
# For public images, a forbidden exception with message
# "Public images do not have members" is thrown.
return image.get_member_repo()
except exception.Forbidden as e:
msg = (_("Error fetching members of image %(image_id)s: "
"%(inner_msg)s"), {"image_id": image.image_id,
"inner_msg": e.msg})
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
def _lookup_member(self, image, member_id):
member_repo = self._get_member_repo(image)
try:
return member_repo.get(member_id)
except (exception.NotFound):
msg = (_("%(m_id)s not found in the member list of the image "
"%(i_id)s."), {"m_id": member_id,
"i_id": image.image_id})
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = (_("You are not authorized to lookup the members of the "
"image %s.") % image.image_id)
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
@utils.mutating
def create(self, req, image_id, member_id):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:retval The response body is a mapping of the following form::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(image)
image_member_factory = self.gateway.get_image_member_factory(
req.context)
try:
new_member = image_member_factory.new_image_member(image,
member_id)
member_repo.add(new_member)
return new_member
except exception.Forbidden:
msg = _("Not allowed to create members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.Duplicate:
msg = _("Member %(member_id)s is duplicated for image "
"%(image_id)s") % {"member_id": member_id,
"image_id": image_id}
LOG.warning(msg)
raise webob.exc.HTTPConflict(explanation=msg)
except exception.ImageMemberLimitExceeded as e:
msg = (_("Image member limit exceeded for image %(id)s: %(e)s:")
% {"id": image_id, "e": utils.exception_to_str(e)})
LOG.warning(msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
@utils.mutating
def update(self, req, image_id, member_id, status):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:retval The response body is a mapping of the following form::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(image)
member = self._lookup_member(image, member_id)
try:
member.status = status
member_repo.save(member)
return member
except exception.Forbidden:
msg = _("Not allowed to update members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except ValueError as e:
msg = _("Incorrect request: %s") % utils.exception_to_str(e)
LOG.warning(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
def index(self, req, image_id):
"""
Return a list of dictionaries indicating the members of the
image, i.e., those tenants the image is shared with.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:retval The response body is a mapping of the following form::
{'members': [
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}, ..
]}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(image)
members = []
try:
for member in member_repo.list():
members.append(member)
except exception.Forbidden:
msg = _("Not allowed to list members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
return dict(members=members)
def show(self, req, image_id, member_id):
"""
Returns the membership of the tenant wrt to the image_id specified.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:retval The response body is a mapping of the following form::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
try:
image = self._lookup_image(req, image_id)
return self._lookup_member(image, member_id)
except webob.exc.HTTPForbidden as e:
# Convert Forbidden to NotFound to prevent information
# leakage.
raise webob.exc.HTTPNotFound(explanation=e.explanation)
@utils.mutating
def delete(self, req, image_id, member_id):
"""
Removes a membership from the image.
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(image)
member = self._lookup_member(image, member_id)
try:
member_repo.remove(member)
return webob.Response(body='', status=204)
except exception.Forbidden:
msg = _("Not allowed to delete members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
class RequestDeserializer(wsgi.JSONRequestDeserializer):
def __init__(self):
super(RequestDeserializer, self).__init__()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
def create(self, request):
body = self._get_request_body(request)
try:
member_id = body['member']
if not member_id:
raise ValueError()
except KeyError:
msg = _("Member to be added not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
except ValueError:
msg = _("Member can't be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(member_id=member_id)
def update(self, request):
body = self._get_request_body(request)
try:
status = body['status']
except KeyError:
msg = _("Status not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(status=status)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema()
def _format_image_member(self, member):
member_view = {}
attributes = ['member_id', 'image_id', 'status']
for key in attributes:
member_view[key] = getattr(member, key)
member_view['created_at'] = timeutils.isotime(member.created_at)
member_view['updated_at'] = timeutils.isotime(member.updated_at)
member_view['schema'] = '/v2/schemas/member'
member_view = self.schema.filter(member_view)
return member_view
def create(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def update(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def index(self, response, image_members):
image_members = image_members['members']
image_members_view = []
for image_member in image_members:
image_member_view = self._format_image_member(image_member)
image_members_view.append(image_member_view)
totalview = dict(members=image_members_view)
totalview['schema'] = '/v2/schemas/members'
body = jsonutils.dumps(totalview, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def show(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
_MEMBER_SCHEMA = {
'member_id': {
'type': 'string',
'description': _('An identifier for the image member (tenantId)')
},
'image_id': {
'type': 'string',
'description': _('An identifier for the image'),
'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
},
'created_at': {
'type': 'string',
'description': _('Date and time of image member creation'),
# TODO(brian-rosmaita): our jsonschema library doesn't seem to like the
# format attribute, figure out why (and also fix in images.py)
# 'format': 'date-time',
},
'updated_at': {
'type': 'string',
'description': _('Date and time of last modification of image member'),
# 'format': 'date-time',
},
'status': {
'type': 'string',
'description': _('The status of this image member'),
'enum': [
'pending',
'accepted',
'rejected'
]
},
'schema': {'type': 'string'}
}
def get_schema():
properties = copy.deepcopy(_MEMBER_SCHEMA)
schema = glance.schema.Schema('member', properties)
return schema
def get_collection_schema():
member_schema = get_schema()
return glance.schema.CollectionSchema('members', member_schema)
def create_resource():
"""Image Members resource factory method"""
deserializer = RequestDeserializer()
serializer = ResponseSerializer()
controller = ImageMembersController()
return wsgi.Resource(controller, deserializer, serializer)
| kfwang/Glance-OVA-OVF | glance/api/v2/image_members.py | Python | apache-2.0 | 13,892 | [
"Brian"
] | c87e3bc95bdc25517f042b266854e0b779e88090f973fc6f290b7693481ed312 |
"""
Set of objects to manage triggers streams.
A trigger is an audio signal with a value of 1 surrounded by 0s.
TrigXXX objects use this kind of signal to generate different
processes with sampling rate time accuracy.
"""
"""
Copyright 2009-2015 Olivier Belanger
This file is part of pyo, a python module to help digital signal
processing script creation.
pyo is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
pyo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with pyo. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from _core import *
from _maps import *
from _widgets import createGraphWindow
from types import SliceType, ListType, TupleType
import weakref
class Trig(PyoObject):
"""
Sends one trigger.
A trigger is an audio signal with a value of 1 surrounded by 0s.
Trig sends a trigger each time it's play() method is called.
:Parent: :py:class:`PyoObject`
.. note::
The out() method is bypassed. Trig's signal can not be sent to audio outs.
Trig has no `mul` and `add` attributes.
>>> s = Server().boot()
>>> s.start()
>>> a = Trig()
>>> env = HannTable()
>>> tenv = TrigEnv(a, table=env, dur=5, mul=.3)
>>> n = Noise(tenv).out()
"""
def __init__(self):
PyoObject.__init__(self)
self._base_objs = [Trig_base()]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setMul(self, x):
pass
def setAdd(self, x):
pass
def setSub(self, x):
pass
def setDiv(self, x):
pass
class Metro(PyoObject):
"""
Generates isochronous trigger signals.
A trigger is an audio signal with a value of 1 surrounded by 0s.
The play() method starts the metro and is not called at the object
creation time.
:Parent: :py:class:`PyoObject`
:Args:
time : float or PyoObject, optional
Time between each trigger in seconds. Defaults to 1.
poly : int, optional
Metronome polyphony. Denotes how many independent streams are
generated by the metronome, allowing overlapping processes.
Available only at initialization. Defaults to 1.
.. note::
The out() method is bypassed. Metro's signal can not be sent to audio outs.
Metro has no `mul` and `add` attributes.
>>> s = Server().boot()
>>> s.start()
>>> t = CosTable([(0,0), (50,1), (250,.3), (8191,0)])
>>> met = Metro(time=.125, poly=2).play()
>>> amp = TrigEnv(met, table=t, dur=.25, mul=.3)
>>> freq = TrigRand(met, min=400, max=1000)
>>> a = Sine(freq=freq, mul=amp).out()
"""
def __init__(self, time=1, poly=1):
PyoObject.__init__(self)
self._time = time
self._poly = poly
time, lmax = convertArgsToLists(time)
self._base_objs = [Metro_base(wrap(time,i)*poly, (float(j)/poly)) for i in range(lmax) for j in range(poly)]
def setTime(self, x):
"""
Replace the `time` attribute.
:Args:
x : float or PyoObject
New `time` attribute.
"""
self._time = x
x, lmax = convertArgsToLists(x)
[obj.setTime(wrap(x,i)*self._poly) for i, obj in enumerate(self._base_objs)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setMul(self, x):
pass
def setAdd(self, x):
pass
def setSub(self, x):
pass
def setDiv(self, x):
pass
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.001, 1., 'log', 'time', self._time)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def time(self):
"""float or PyoObject. Time between each trigger in seconds."""
return self._time
@time.setter
def time(self, x): self.setTime(x)
class Seq(PyoObject):
"""
Generates a rhythmic sequence of trigger signals.
A trigger is an audio signal with a value of 1 surrounded by 0s.
The play() method starts the sequence and is not called at the object
creation time.
:Parent: :py:class:`PyoObject`
:Args:
time : float or PyoObject, optional
Base time between each trigger in seconds. Defaults to 1.
seq : list of ints, optional
Sequence of beat durations in time's unit. Defaults to [1].
poly : int, optional
Seq polyphony. Denotes how many independent streams are
generated by the metronome, allowing overlapping processes.
Available only at initialization. Defaults to 1.
.. note::
The out() method is bypassed. Seq's signal can not be sent to audio outs.
Seq has no `mul` and `add` attributes.
>>> s = Server().boot()
>>> s.start()
>>> env = CosTable([(0,0),(300,1),(1000,.3),(8191,0)])
>>> seq = Seq(time=.125, seq=[2,1,1,2], poly=2).play()
>>> tr = TrigRand(seq, min=250, max=500, port=.005)
>>> amp = TrigEnv(seq, table=env, dur=.25, mul=.25)
>>> a = SineLoop(tr, feedback=0.07, mul=amp).out()
"""
def __init__(self, time=1, seq=[1], poly=1):
PyoObject.__init__(self)
if type(seq) != ListType:
print >> sys.stderr, 'TypeError: "seq" argument of %s must be a list.\n' % self.__class__.__name__
exit()
self._time = time
self._seq = seq
self._poly = poly
time, lmax = convertArgsToLists(time)
if type(seq[0]) != ListType:
self._base_players = [Seqer_base(wrap(time,i), seq, poly) for i in range(lmax)]
else:
seqlen = len(seq)
lmax = max(seqlen, lmax)
self._base_players = [Seqer_base(wrap(time,i), wrap(seq,i), poly) for i in range(lmax)]
self._base_objs = [Seq_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
def setTime(self, x):
"""
Replace the `time` attribute.
:Args:
x : float or PyoObject
New `time` attribute.
"""
self._time = x
x, lmax = convertArgsToLists(x)
[obj.setTime(wrap(x,i)) for i, obj in enumerate(self._base_players)]
def setSeq(self, x):
"""
Replace the `seq` attribute.
:Args:
x : list of ints
New `seq` attribute.
"""
self._seq = x
if type(x[0]) != ListType:
[obj.setSeq(x) for i, obj in enumerate(self._base_players)]
else:
[obj.setSeq(wrap(x,i)) for i, obj in enumerate(self._base_players)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setMul(self, x):
pass
def setAdd(self, x):
pass
def setSub(self, x):
pass
def setDiv(self, x):
pass
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.001, 10., 'log', 'time', self._time)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def time(self):
"""float or PyoObject. Base time between each trigger in seconds."""
return self._time
@time.setter
def time(self, x): self.setTime(x)
@property
def seq(self):
"""List of ints. Sequence of beat durations in time's unit."""
return self._seq
@seq.setter
def seq(self, x): self.setSeq(x)
class Cloud(PyoObject):
"""
Generates random triggers.
Generates random triggers with control over the generation density.
A trigger is an audio signal with a value of 1 surrounded by 0s.
The play() method starts the Cloud and is not called at the object
creation time.
:Parent: :py:class:`PyoObject`
:Args:
density : float or PyoObject, optional
Average number of triggers per second. Defaults to 10.
poly : int, optional
Cloud polyphony. Denotes how many independent streams are
generated by the object, allowing overlapping processes.
Available only at initialization. Defaults to 1.
.. note::
The out() method is bypassed. Cloud's signal can not be sent to audio outs.
Cloud has no `mul` and `add` attributes.
>>> s = Server().boot()
>>> s.start()
>>> dens = Expseg([(0,1),(5,50)], loop=True, exp=5, initToFirstVal=True).play()
>>> m = Cloud(density=dens, poly=2).play()
>>> tr = TrigRand(m, min=300, max=1000)
>>> tr_p = Port(tr, risetime=0.001, falltime=0.001)
>>> a = Sine(freq=tr, mul=0.2).out()
"""
def __init__(self, density=10, poly=1):
PyoObject.__init__(self)
self._density = density
self._poly = poly
density, lmax = convertArgsToLists(density)
self._base_players = [Clouder_base(wrap(density,i), poly) for i in range(lmax)]
self._base_objs = [Cloud_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
def setDensity(self, x):
"""
Replace the `density` attribute.
:Args:
x : float or PyoObject
New `density` attribute.
"""
self._density = x
x, lmax = convertArgsToLists(x)
[obj.setDensity(wrap(x,i)) for i, obj in enumerate(self._base_players)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setMul(self, x):
pass
def setAdd(self, x):
pass
def setSub(self, x):
pass
def setDiv(self, x):
pass
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 100., 'lin', 'density', self._density)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def density(self):
"""float or PyoObject. Average density of triggers generation."""
return self._density
@density.setter
def density(self, x): self.setDensity(x)
class Beat(PyoObject):
"""
Generates algorithmic trigger patterns.
A trigger is an audio signal with a value of 1 surrounded by 0s.
Beat generates measures of length `taps` and uses weight parameters
(`w1`, `w2` and `w3`) to compute the chances of a beat to be present
in the generated measure.
User can store the current pattern in one of the 32 preset slots with
the store() method and recall it later with recall(x).
A preset is a list where the first value is the number of beats in the
measure, followed by 1s and 0s. For a 4/4 measure with only down beats:
[16, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0]
The play() method starts the Beat and is not called at the object
creation time.
:Parent: :py:class:`PyoObject`
:Args:
time : float or PyoObject, optional
Time, in seconds, between each beat of the pattern. Defaults to 0.125.
taps : int, optional
Number of beats in the generated pattern, max = 64. Defaults to 16.
w1 : int {0 -> 100}, optional
Probability for down beats. Defaults to 80.
w2 : int {0 -> 100}, optional
Probability for up beats. Defaults to 50.
w3 : int {0 -> 100}, optional
Probability for the weakest beats. Defaults to 30.
poly : int, optional
Beat polyphony. Denotes how many independent streams are
generated by the object, allowing overlapping processes.
Available only at initialization. Defaults to 1.
.. note::
Beat outputs many signals identified with a string between brackets:
| obj['tap'] returns audio stream of the current tap of the measure.
| obj['amp'] returns audio stream of the current beat amplitude.
| obj['dur'] returns audio stream of the current beat duration in seconds.
| obj['end'] returns audio stream with a trigger just before the end of the measure.
obj without brackets returns the generated trigger stream of the measure.
The out() method is bypassed. Beat's signal can not be sent to audio outs.
Beat has no `mul` and `add` attributes.
>>> s = Server().boot()
>>> s.start()
>>> t = CosTable([(0,0), (100,1), (500,.3), (8191,0)])
>>> beat = Beat(time=.125, taps=16, w1=[90,80], w2=50, w3=35, poly=1).play()
>>> trmid = TrigXnoiseMidi(beat, dist=12, mrange=(60, 96))
>>> trhz = Snap(trmid, choice=[0,2,3,5,7,8,10], scale=1)
>>> tr2 = TrigEnv(beat, table=t, dur=beat['dur'], mul=beat['amp'])
>>> a = Sine(freq=trhz, mul=tr2*0.3).out()
"""
def __init__(self, time=.125, taps=16, w1=80, w2=50, w3=30, poly=1):
PyoObject.__init__(self)
self._tap_dummy = []
self._amp_dummy = []
self._dur_dummy = []
self._end_dummy = []
self._time = time
self._taps = taps
self._w1 = w1
self._w2 = w2
self._w3 = w3
self._poly = poly
time, taps, w1, w2, w3, lmax = convertArgsToLists(time, taps, w1, w2, w3)
self._base_players = [Beater_base(wrap(time,i), wrap(taps,i), wrap(w1,i), wrap(w2,i), wrap(w3,i), poly) for i in range(lmax)]
self._base_objs = [Beat_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._tap_objs = [BeatTapStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._amp_objs = [BeatAmpStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._dur_objs = [BeatDurStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._end_objs = [BeatEndStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
def __getitem__(self, i):
if i == 'tap':
self._tap_dummy.append(Dummy([obj for obj in self._tap_objs]))
return self._tap_dummy[-1]
if i == 'amp':
self._amp_dummy.append(Dummy([obj for obj in self._amp_objs]))
return self._amp_dummy[-1]
if i == 'dur':
self._dur_dummy.append(Dummy([obj for obj in self._dur_objs]))
return self._dur_dummy[-1]
if i == 'end':
self._end_dummy.append(Dummy([obj for obj in self._end_objs]))
return self._end_dummy[-1]
if type(i) == SliceType:
return self._base_objs[i]
if i < len(self._base_objs):
return self._base_objs[i]
else:
print "'i' too large!"
def get(self, identifier="amp", all=False):
"""
Return the first sample of the current buffer as a float.
Can be used to convert audio stream to usable Python data.
"tap", "amp" or "dur" must be given to `identifier` to specify
which stream to get value from.
:Args:
identifier : string {"tap", "amp", "dur"}
Address string parameter identifying audio stream.
Defaults to "amp".
all : boolean, optional
If True, the first value of each object's stream
will be returned as a list.
If False, only the value of the first object's
stream will be returned as a float.
"""
if not all:
return self.__getitem__(identifier)[0]._getStream().getValue()
else:
return [obj._getStream().getValue() for obj in self.__getitem__(identifier).getBaseObjects()]
def reset(self):
"""
Reset internal counters to initialization values.
"""
[obj.reset() for obj in self._base_players]
def new(self):
"""
Generates a new pattern with the current parameters.
"""
[obj.new() for i, obj in enumerate(self._base_players)]
def fill(self):
"""
Generates a fill-in pattern and then restore the current one.
"""
[obj.fill() for i, obj in enumerate(self._base_players)]
def store(self, x):
"""
Store the current pattern in memory `x`.
:Args:
x : int
Memory number. 0 <= x < 32.
"""
[obj.store(x) for i, obj in enumerate(self._base_players)]
def recall(self, x):
"""
Recall the pattern previously stored in memory `x`.
:Args:
x : int
Memory number. 0 <= x < 32.
"""
[obj.recall(x) for i, obj in enumerate(self._base_players)]
def getPresets(self):
"""
Returns the list of stored presets.
"""
if len(self._base_players) == 1:
return self._base_players[0].getPresets()
else:
return [obj.getPresets() for obj in self._base_players]
def setPresets(self, x):
"""
Store a list presets.
:Args:
x : list
List of presets.
"""
if len(self._base_players) == 1:
return self._base_players[0].setPresets(x)
else:
return [obj.setPresets(x[i]) for i, obj in enumerate(self._base_players)]
def setTime(self, x):
"""
Replace the `time` attribute.
:Args:
x : float or PyoObject
New `time` attribute.
"""
self._time = x
x, lmax = convertArgsToLists(x)
[obj.setTime(wrap(x,i)) for i, obj in enumerate(self._base_players)]
def setTaps(self, x):
"""
Replace the `taps` attribute.
:Args:
x : int
New `taps` attribute.
"""
self._taps = x
x, lmax = convertArgsToLists(x)
[obj.setTaps(wrap(x,i)) for i, obj in enumerate(self._base_players)]
def setW1(self, x):
"""
Replace the `w1` attribute.
:Args:
x : int
New `w1` attribute.
"""
self.setWeights(w1=x)
def setW2(self, x):
"""
Replace the `w2` attribute.
:Args:
x : int
New `w2` attribute.
"""
self.setWeights(w2=x)
def setW3(self, x):
"""
Replace the `w3` attribute.
:Args:
x : int
New `w3` attribute.
"""
self.setWeights(w3=x)
def setWeights(self, w1=None, w2=None, w3=None):
"""
Replace the weight attributes.
Arguments set to `None` remain unchanged.
:Args:
w1 : int, optional
New `w1` attribute. Defaults to None.
w2 : int, optional
New `w2` attribute. Defaults to None.
w3 : int, optional
New `w3` attribute. Defaults to None.
"""
if w1 != None: self._w1 = w1
if w2 != None: self._w2 = w2
if w3 != None: self._w3 = w3
w1, w2, w3, lmax = convertArgsToLists(w1, w2, w3)
[obj.setWeights(wrap(w1,i), wrap(w2,i), wrap(w3,i)) for i, obj in enumerate(self._base_players)]
def play(self, dur=0, delay=0):
dur, delay, lmax = convertArgsToLists(dur, delay)
self._tap_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._tap_objs)]
self._amp_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._amp_objs)]
self._dur_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._dur_objs)]
self._end_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._end_objs)]
return PyoObject.play(self, dur, delay)
def stop(self):
[obj.stop() for obj in self._tap_objs]
[obj.stop() for obj in self._amp_objs]
[obj.stop() for obj in self._dur_objs]
[obj.stop() for obj in self._end_objs]
return PyoObject.stop(self)
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setMul(self, x):
pass
def setAdd(self, x):
pass
def setSub(self, x):
pass
def setDiv(self, x):
pass
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.001, 1., 'lin', 'time', self._time),
SLMap(2, 64, 'lin', 'taps', self._taps, res="int", dataOnly=True),
SLMap(0, 100, 'lin', 'w1', self._w1, res="int", dataOnly=True),
SLMap(0, 100, 'lin', 'w2', self._w2, res="int", dataOnly=True),
SLMap(0, 100, 'lin', 'w3', self._w3, res="int", dataOnly=True)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def time(self):
"""float or PyoObject. Time, in seconds, between each beat."""
return self._time
@time.setter
def time(self, x): self.setTime(x)
@property
def taps(self):
"""int. Number of beats in the generated pattern."""
return self._taps
@taps.setter
def taps(self, x): self.setTaps(x)
@property
def w1(self):
"""int. Probability for down beats."""
return self._w1
@w1.setter
def w1(self, x): self.setW1(x)
@property
def w2(self):
"""int. Probability for up beats."""
return self._w2
@w2.setter
def w2(self, x): self.setW2(x)
@property
def w3(self):
"""int. Probability for other beats."""
return self._w3
@w3.setter
def w3(self, x): self.setW3(x)
class TrigRandInt(PyoObject):
"""
Pseudo-random integer generator.
TrigRandInt generates a pseudo-random number integer number between
0 and `max` values each time it receives a trigger in its `input`
parameter. The value is kept until the next trigger.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
max : float or PyoObject, optional
Maximum value for the random generation. Defaults to 100.
.. note::
The out() method is bypassed. TrigRandInt's signal can not be sent
to audio outs.
>>> s = Server().boot()
>>> s.start()
>>> t = CosTable([(0,0), (50,1), (250,.3), (8191,0)])
>>> met = Metro(.125, poly=2).play()
>>> amp = TrigEnv(met, table=t, dur=.25, mul=.3)
>>> tr = TrigRandInt(met, max=10, mul=100, add=200)
>>> a = Sine(tr, mul=amp).out()
"""
def __init__(self, input, max=100., mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._max = max
self._in_fader = InputFader(input)
in_fader, max, mul, add, lmax = convertArgsToLists(self._in_fader, max, mul, add)
self._base_objs = [TrigRandInt_base(wrap(in_fader,i), wrap(max,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setMax(self, x):
"""
Replace the `max` attribute.
:Args:
x : float or PyoObject
new `max` attribute.
"""
self._max = x
x, lmax = convertArgsToLists(x)
[obj.setMax(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(1., 200., 'lin', 'max', self._max),
SLMapMul(self._mul)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def max(self):
"""float or PyoObject. Maximum value."""
return self._max
@max.setter
def max(self, x):
self.setMax(x)
class TrigRand(PyoObject):
"""
Pseudo-random number generator.
TrigRand generates a pseudo-random number between `min` and `max`
values each time it receives a trigger in its `input` parameter.
The value is kept until the next trigger.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
min : float or PyoObject, optional
Minimum value for the random generation. Defaults to 0.
max : float or PyoObject, optional
Maximum value for the random generation. Defaults to 1.
port : float, optional
Portamento. Time to reach a new value. Defaults to 0.
init : float, optional
Initial value. Available at initialization time only.
Defaults to 0.
>>> s = Server().boot()
>>> s.start()
>>> t = CosTable([(0,0), (50,1), (250,.3), (8191,0)])
>>> met = Metro(.125, poly=2).play()
>>> amp = TrigEnv(met, table=t, dur=.25, mul=.3)
>>> tr = TrigRand(met, 400, 600)
>>> a = Sine(tr, mul=amp).out()
"""
def __init__(self, input, min=0., max=1., port=0., init=0., mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._min = min
self._max = max
self._port = port
self._in_fader = InputFader(input)
in_fader, min, max, port, init, mul, add, lmax = convertArgsToLists(self._in_fader, min, max, port, init, mul, add)
self._base_objs = [TrigRand_base(wrap(in_fader,i), wrap(min,i), wrap(max,i), wrap(port,i), wrap(init,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setMin(self, x):
"""
Replace the `min` attribute.
:Args:
x : float or PyoObject
new `min` attribute.
"""
self._min = x
x, lmax = convertArgsToLists(x)
[obj.setMin(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setMax(self, x):
"""
Replace the `max` attribute.
:Args:
x : float or PyoObject
new `max` attribute.
"""
self._max = x
x, lmax = convertArgsToLists(x)
[obj.setMax(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setPort(self, x):
"""
Replace the `port` attribute.
:Args:
x : float
new `port` attribute.
"""
self._port = x
x, lmax = convertArgsToLists(x)
[obj.setPort(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0., 1., 'lin', 'min', self._min),
SLMap(1., 2., 'lin', 'max', self._max),
SLMapMul(self._mul)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def min(self):
"""float or PyoObject. Minimum value."""
return self._min
@min.setter
def min(self, x):
self.setMin(x)
@property
def max(self):
"""float or PyoObject. Maximum value."""
return self._max
@max.setter
def max(self, x):
self.setMax(x)
@property
def port(self):
"""float. Ramp time."""
return self._port
@port.setter
def port(self, x):
self.setPort(x)
class TrigChoice(PyoObject):
"""
Random generator from user's defined values.
TrigChoice chooses randomly a new value in list `choice` each
time it receives a trigger in its `input` parameter. The value
is kept until the next trigger.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
choice : list of floats
Possible values for the random generation.
port : float, optional
Portamento. Time to reach a new value. Defaults to 0.
init : float, optional
Initial value. Available at initialization time only.
Defaults to 0.
>>> s = Server().boot()
>>> s.start()
>>> t = CosTable([(0,0), (50,1), (250,.3), (8191,0)])
>>> met = Metro(.125, poly=2).play()
>>> freq = TrigChoice(met, [300, 350, 400, 450, 500, 550])
>>> amp = TrigEnv(met, table=t, dur=.25, mul=.3)
>>> a = Sine(freq=freq, mul=amp).out()
"""
def __init__(self, input, choice, port=0., init=0., mul=1, add=0):
PyoObject.__init__(self, mul, add)
if type(choice) != ListType:
print >> sys.stderr, 'TypeError: "choice" argument of %s must be a list.\n' % self.__class__.__name__
exit()
self._input = input
self._choice = choice
self._port = port
self._in_fader = InputFader(input)
in_fader, port, init, mul, add, lmax = convertArgsToLists(self._in_fader, port, init, mul, add)
if type(choice[0]) != ListType:
self._base_objs = [TrigChoice_base(wrap(in_fader,i), choice, wrap(port,i), wrap(init,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
else:
choicelen = len(choice)
lmax = max(choicelen, lmax)
self._base_objs = [TrigChoice_base(wrap(in_fader,i), wrap(choice,i), wrap(port,i), wrap(init,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setChoice(self, x):
"""
Replace the `choice` attribute.
:Args:
x : list of floats
new `choice` attribute.
"""
self._choice = x
if type(x[0]) != ListType:
[obj.setChoice(self._choice) for i, obj in enumerate(self._base_objs)]
else:
[obj.setChoice(wrap(self._choice,i)) for i, obj in enumerate(self._base_objs)]
def setPort(self, x):
"""
Replace the `port` attribute.
:Args:
x : float
new `port` attribute.
"""
self._port = x
x, lmax = convertArgsToLists(x)
[obj.setPort(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def choice(self):
"""list of floats. Possible values."""
return self._choice
@choice.setter
def choice(self, x):
self.setChoice(x)
@property
def port(self):
"""float. Ramp time."""
return self._port
@port.setter
def port(self, x):
self.setPort(x)
class TrigFunc(PyoObject):
"""
Python function callback.
TrigFunc calls the function given at parameter `function` each
time it receives a trigger in its `input` parameter.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
function : Python function
Function to be called.
arg : anything, optional
Argument sent to the function's call. If None, the function
will be called without argument. Defaults to None.
.. note::
The out() method is bypassed. TrigFunc's signal can not be sent
to audio outs.
TrigFunc has no `mul` and `add` attributes.
>>> s = Server().boot()
>>> s.start()
>>> f = Fader(fadein=.005, fadeout=.1, dur=.12, mul=.2)
>>> a = SineLoop(midiToHz([60,60]), feedback=0.05, mul=f).out()
>>> c = 0.0
>>> def count():
... global c
... freq = midiToHz(round(c) + 60)
... a.freq = [freq, freq*0.995]
... c += 1.77
... if c > 13: c = 0
... f.play()
>>> m = Metro(.125).play()
>>> tf = TrigFunc(m, count)
"""
def __init__(self, input, function, arg=None):
PyoObject.__init__(self)
if type(function) == ListType or type(function) == TupleType:
if not callable(function[0]):
print >> sys.stderr, 'TypeError: "function" argument of %s must be callable.\n' % self.__class__.__name__
exit()
else:
if not callable(function):
print >> sys.stderr, 'TypeError: "function" argument of %s must be callable.\n' % self.__class__.__name__
exit()
self._input = input
self._function = getWeakMethodRef(function)
self._arg = arg
self._in_fader = InputFader(input)
in_fader, function, arg, lmax = convertArgsToLists(self._in_fader, function, arg)
self._base_objs = [TrigFunc_base(wrap(in_fader,i), WeakMethod(wrap(function,i)), wrap(arg,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setMul(self, x):
pass
def setAdd(self, x):
pass
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setFunction(self, x):
"""
Replace the `function` attribute.
:Args:
x : Python function
new `function` attribute.
"""
self._function = getWeakMethodRef(x)
x, lmax = convertArgsToLists(x)
[obj.setFunction(WeakMethod(wrap(x,i))) for i, obj in enumerate(self._base_objs)]
def setArg(self, x):
"""
Replace the `arg` attribute.
:Args:
x : Anything
new `arg` attribute.
"""
self._arg = x
x, lmax = convertArgsToLists(x)
[obj.setArg(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def function(self):
"""Python callable. Function to be called."""
return self._function
@function.setter
def function(self, x):
self.setFunction(x)
@property
def arg(self):
"""Anything. Callable's argument."""
return self._arg
@arg.setter
def arg(self, x):
self.setArg(x)
class TrigEnv(PyoObject):
"""
Envelope reader generator.
TrigEnv starts reading an envelope in `dur` seconds each time it
receives a trigger in its `input` parameter.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
table : PyoTableObject
Table containing the envelope.
dur : float or PyoObject, optional
Duration in seconds of the envelope. Defaults to 1.
interp : int, optional
Choice of the interpolation method. Defaults to 2.
1. no interpolation
2. linear
3. cosinus
4. cubic
.. note::
TrigEnv will sends a trigger signal at the end of the playback.
User can retreive the trigger streams by calling obj['trig'].
Useful to synchronize other processes.
>>> s = Server().boot()
>>> s.start()
>>> env = HannTable()
>>> m = Metro(.125, poly=2).play()
>>> tr = TrigRand(m, 400, 600)
>>> te = TrigEnv(m, table=env, dur=.25, mul=.2)
>>> a = Sine(tr, mul=te).out()
"""
def __init__(self, input, table, dur=1, interp=2, mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._table = table
self._dur = dur
self._interp = interp
self._in_fader = InputFader(input)
in_fader, table, dur, interp, mul, add, lmax = convertArgsToLists(self._in_fader, table, dur, interp, mul, add)
self._base_objs = [TrigEnv_base(wrap(in_fader,i), wrap(table,i), wrap(dur,i), wrap(interp,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
self._trig_objs = Dummy([TriggerDummy_base(obj) for obj in self._base_objs])
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setTable(self, x):
"""
Replace the `table` attribute.
:Args:
x : PyoTableObject
new `table` attribute.
"""
self._table = x
x, lmax = convertArgsToLists(x)
[obj.setTable(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDur(self, x):
"""
Replace the `dur` attribute.
:Args:
x : float or PyoObject
new `dur` attribute.
"""
self._dur = x
x, lmax = convertArgsToLists(x)
[obj.setDur(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setInterp(self, x):
"""
Replace the `interp` attribute.
:Args:
x : int {1, 2, 3, 4}
new `interp` attribute.
"""
self._interp = x
x, lmax = convertArgsToLists(x)
[obj.setInterp(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.01, 10., 'lin', 'dur', self._dur), SLMapMul(self._mul)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def table(self):
"""PyoTableObject. Envelope table."""
return self._table
@table.setter
def table(self, x):
self.setTable(x)
@property
def dur(self):
"""float or PyoObject. Duration in seconds."""
return self._dur
@dur.setter
def dur(self, x):
self.setDur(x)
@property
def interp(self):
"""int {1, 2, 3, 4}, Interpolation method."""
return self._interp
@interp.setter
def interp(self, x):
self.setInterp(x)
class TrigLinseg(PyoObject):
"""
Line segments trigger.
TrigLinseg starts reading a break-points line segments each time it
receives a trigger in its `input` parameter.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
list : list of tuples
Points used to construct the line segments. Each tuple is a
new point in the form (time, value).
Times are given in seconds and must be in increasing order.
.. note::
TrigLinseg will sends a trigger signal at the end of the playback.
User can retreive the trigger streams by calling obj['trig'].
Useful to synchronize other processes.
The out() method is bypassed. TrigLinseg's signal can not be sent
to audio outs.
>>> s = Server().boot()
>>> s.start()
>>> m = Metro(time=1, poly=2).play()
>>> pit = TrigLinseg(m, [(0,1000),(.1,1300),(.2,900),(.3,1000),(2,1000)])
>>> a = Sine(pit, mul=.2).out()
"""
def __init__(self, input, list, mul=1, add=0):
PyoObject.__init__(self, mul, add)
if type(list) != ListType:
print >> sys.stderr, 'TypeError: "list" argument of %s must be a list of tuples.\n' % self.__class__.__name__
exit()
if type(list[0]) != TupleType:
print >> sys.stderr, 'TypeError: "list" argument of %s must be a list of tuples.\n' % self.__class__.__name__
exit()
self._input = input
self._list = list
self._in_fader = InputFader(input)
in_fader, mul, add, lmax = convertArgsToLists(self._in_fader, mul, add)
self._base_objs = [TrigLinseg_base(wrap(in_fader,i), list, wrap(mul,i), wrap(add,i)) for i in range(lmax)]
self._trig_objs = Dummy([TriggerDummy_base(obj) for obj in self._base_objs])
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setList(self, x):
"""
Replace the `list` attribute.
:Args:
x : list of tuples
new `list` attribute.
"""
self._list = x
[obj.setList(x) for i, obj in enumerate(self._base_objs)]
def replace(self, x):
"""
Alias for `setList` method.
:Args:
x : list of tuples
new `list` attribute.
"""
self.setList(x)
def getPoints(self):
return self._list
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMapMul(self._mul)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
def graph(self, xlen=None, yrange=None, title=None, wxnoserver=False):
"""
Opens a grapher window to control the shape of the envelope.
When editing the grapher with the mouse, the new set of points
will be send to the object on mouse up.
Ctrl+C with focus on the grapher will copy the list of points to the
clipboard, giving an easy way to insert the new shape in a script.
:Args:
xlen : float, optional
Set the maximum value of the X axis of the graph. If None, the
maximum value is retrieve from the current list of points.
Defaults to None.
yrange : tuple, optional
Set the min and max values of the Y axis of the graph. If
None, min and max are retrieve from the current list of points.
Defaults to None.
title : string, optional
Title of the window. If none is provided, the name of the
class is used.
wxnoserver : boolean, optional
With wxPython graphical toolkit, if True, tells the
interpreter that there will be no server window.
If `wxnoserver` is set to True, the interpreter will not wait for
the server GUI before showing the controller window.
"""
if xlen == None:
xlen = float(self._list[-1][0])
else:
xlen = float(xlen)
if yrange == None:
ymin = float(min([x[1] for x in self._list]))
ymax = float(max([x[1] for x in self._list]))
if ymin == ymax:
yrange = (0, ymax)
else:
yrange = (ymin, ymax)
createGraphWindow(self, 0, xlen, yrange, title, wxnoserver)
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def list(self):
"""list of tuples. Points used to construct the line segments."""
return self._list
@list.setter
def list(self, x):
self.setList(x)
class TrigExpseg(PyoObject):
"""
Exponential segments trigger.
TrigExpseg starts reading break-points exponential segments each time
it receives a trigger in its `input` parameter.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
list : list of tuples
Points used to construct the line segments. Each tuple is a
new point in the form (time, value).
Times are given in seconds and must be in increasing order.
exp : float, optional
Exponent factor. Used to control the slope of the curves.
Defaults to 10.
inverse : boolean, optional
If True, downward slope will be inversed. Useful to create
biexponential curves. Defaults to True.
.. note::
TrigExpseg will sends a trigger signal at the end of the playback.
User can retreive the trigger streams by calling obj['trig'].
Useful to synchronize other processes.
The out() method is bypassed. TrigExpseg's signal can not be sent
to audio outs.
>>> s = Server().boot()
>>> s.start()
>>> m = Metro(time=0.5, poly=2).play()
>>> pit = TrigExpseg(m, [(0,1000),(.25,1300),(.5,1000),(1,1000)])
>>> a = Sine(pit, mul=.2).out()
"""
def __init__(self, input, list, exp=10, inverse=True, mul=1, add=0):
PyoObject.__init__(self, mul, add)
if type(list) != ListType:
print >> sys.stderr, 'TypeError: "list" argument of %s must be a list of tuples.\n' % self.__class__.__name__
exit()
if type(list[0]) != TupleType:
print >> sys.stderr, 'TypeError: "list" argument of %s must be a list of tuples.\n' % self.__class__.__name__
exit()
self._input = input
self._list = list
self._exp = exp
self._inverse = inverse
self._in_fader = InputFader(input)
in_fader, exp, inverse, mul, add, lmax = convertArgsToLists(self._in_fader, exp, inverse, mul, add)
self._base_objs = [TrigExpseg_base(wrap(in_fader,i), list, wrap(exp,i), wrap(inverse,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
self._trig_objs = Dummy([TriggerDummy_base(obj) for obj in self._base_objs])
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setList(self, x):
"""
Replace the `list` attribute.
:Args:
x : list of tuples
new `list` attribute.
"""
self._list = x
[obj.setList(x) for i, obj in enumerate(self._base_objs)]
def setExp(self, x):
"""
Replace the `exp` attribute.
:Args:
x : float
new `exp` attribute.
"""
self._exp = x
x, lmax = convertArgsToLists(x)
[obj.setExp(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setInverse(self, x):
"""
Replace the `inverse` attribute.
:Args:
x : boolean
new `inverse` attribute.
"""
self._inverse = x
x, lmax = convertArgsToLists(x)
[obj.setInverse(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def replace(self, x):
"""
Alias for `setList` method.
:Args:
x : list of tuples
new `list` attribute.
"""
self.setList(x)
def getPoints(self):
return self._list
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMapMul(self._mul)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
def graph(self, xlen=None, yrange=None, title=None, wxnoserver=False):
"""
Opens a grapher window to control the shape of the envelope.
When editing the grapher with the mouse, the new set of points
will be send to the object on mouse up.
Ctrl+C with focus on the grapher will copy the list of points to the
clipboard, giving an easy way to insert the new shape in a script.
:Args:
xlen : float, optional
Set the maximum value of the X axis of the graph. If None, the
maximum value is retrieve from the current list of points.
Defaults to None.
yrange : tuple, optional
Set the min and max values of the Y axis of the graph. If
None, min and max are retrieve from the current list of points.
Defaults to None.
title : string, optional
Title of the window. If none is provided, the name of the
class is used.
wxnoserver : boolean, optional
With wxPython graphical toolkit, if True, tells the
interpreter that there will be no server window.
If `wxnoserver` is set to True, the interpreter will not wait for
the server GUI before showing the controller window.
"""
if xlen == None:
xlen = float(self._list[-1][0])
else:
xlen = float(xlen)
if yrange == None:
ymin = float(min([x[1] for x in self._list]))
ymax = float(max([x[1] for x in self._list]))
if ymin == ymax:
yrange = (0, ymax)
else:
yrange = (ymin, ymax)
createGraphWindow(self, 2, xlen, yrange, title, wxnoserver)
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def list(self):
"""list of tuples. Points used to construct the line segments."""
return self._list
@list.setter
def list(self, x):
self.setList(x)
@property
def exp(self):
"""float. Exponent factor."""
return self._exp
@exp.setter
def exp(self, x):
self.setExp(x)
@property
def inverse(self):
"""boolean. Inversion of downward slope."""
return self._inverse
@inverse.setter
def inverse(self, x):
self.setInverse(x)
class TrigXnoise(PyoObject):
"""
Triggered X-class pseudo-random generator.
Xnoise implements a few of the most common noise distributions.
A new value is generated each time the object receive a trigger
in input. Each distribution generates values in the range 0 and 1.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
dist : string or int, optional
Distribution type. Defaults to 0.
x1 : float or PyoObject, optional
First parameter. Defaults to 0.5.
x2 : float or PyoObject, optional
Second parameter. Defaults to 0.5.
.. note::
Available distributions are:
0. uniform
1. linear minimum
2. linear maximum
3. triangular
4. exponential minimum
5. exponential maximum
6. double (bi)exponential
7. cauchy
8. weibull
9. gaussian
10. poisson
11. walker (drunk)
12. loopseg (drunk with looped segments)
Depending on the distribution, `x1` and `x2` parameters are applied
as follow (names as string, or associated number can be used as `dist`
parameter):
0. uniform
- x1 : not used
- x2 : not used
1. linear_min
- x1 : not used
- x2 : not used
2. linear_max
- x1 : not used
- x2 : not used
3. triangle
- x1 : not used
- x2 : not used
4. expon_min
- x1 : slope {0 = no slope -> 10 = sharp slope}
- x2 : not used
5. expon_max
- x1 : slope {0 = no slope -> 10 = sharp slope}
- x2 : not used
6. biexpon
- x1 : bandwidth {0 = huge bandwidth -> 10 = narrow bandwidth}
- x2 : not used
7. cauchy
- x1 : bandwidth {0 = narrow bandwidth -> 10 = huge bandwidth}
- x2 : not used
8. weibull
- x1 : mean location {0 -> 1}
- x2 : shape {0.5 = linear min, 1.5 = expon min, 3.5 = gaussian}
9. gaussian
- x1 : mean location {0 -> 1}
- x2 : bandwidth {0 = narrow bandwidth -> 10 = huge bandwidth}
10. poisson
- x1 : gravity center {0 = low values -> 10 = high values}
- x2 : compress/expand range {0.1 = full compress -> 4 full expand}
11. walker
- x1 : maximum value {0.1 -> 1}
- x2 : maximum step {0.1 -> 1}
12. loopseg
- x1 : maximum value {0.1 -> 1}
- x2 : maximum step {0.1 -> 1}
>>> s = Server().boot()
>>> s.start()
>>> wav = SquareTable()
>>> env = CosTable([(0,0), (100,1), (500,.3), (8191,0)])
>>> met = Metro(.125, 12).play()
>>> amp = TrigEnv(met, table=env, mul=.2)
>>> pit = TrigXnoise(met, dist=4, x1=10, mul=1000, add=200)
>>> a = Osc(table=wav, freq=pit, mul=amp).out()
"""
def __init__(self, input, dist=0, x1=0.5, x2=0.5, mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._dist = dist
self._x1 = x1
self._x2 = x2
self._in_fader = InputFader(input)
in_fader, dist, x1, x2, mul, add, lmax = convertArgsToLists(self._in_fader, dist, x1, x2, mul, add)
for i, t in enumerate(dist):
if type(t) == StringType: dist[i] = XNOISE_DICT.get(t, 0)
self._base_objs = [TrigXnoise_base(wrap(in_fader,i), wrap(dist,i), wrap(x1,i), wrap(x2,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setDist(self, x):
"""
Replace the `dist` attribute.
:Args:
x : int
new `dist` attribute.
"""
self._dist = x
x, lmax = convertArgsToLists(x)
for i, t in enumerate(x):
if type(t) == StringType: x[i] = XNOISE_DICT.get(t, 0)
[obj.setType(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setX1(self, x):
"""
Replace the `x1` attribute.
:Args:
x : float or PyoObject
new `x1` attribute.
"""
self._x1 = x
x, lmax = convertArgsToLists(x)
[obj.setX1(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setX2(self, x):
"""
Replace the `x2` attribute.
:Args:
x : float or PyoObject
new `x2` attribute.
"""
self._x2= x
x, lmax = convertArgsToLists(x)
[obj.setX2(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def dist(self):
"""string or int. Distribution type."""
return self._dist
@dist.setter
def dist(self, x):
self.setDist(x)
@property
def x1(self):
"""float or PyoObject. First parameter."""
return self._x1
@x1.setter
def x1(self, x):
self.setX1(x)
@property
def x2(self):
"""float or PyoObject. Second parameter."""
return self._x2
@x2.setter
def x2(self, x):
self.setX2(x)
class TrigXnoiseMidi(PyoObject):
"""
Triggered X-class midi notes pseudo-random generator.
Xnoise implements a few of the most common noise distributions.
A new value is generated each time the object receive a trigger
in input. Each distribution generates integer values in the range
defined with `mrange` parameter and output can be scaled on midi
notes, hertz or transposition factor.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
dist : string of int, optional
Distribution type. Defaults to 0.
x1 : float or PyoObject, optional
First parameter. Defaults to 0.5.
x2 : float or PyoObject, optional
Second parameter. Defaults to 0.5.
scale : int {0, 1, 2}, optional
Output format. 0 = MIDI, 1 = Hertz, 2 = transposition factor.
Defaults to 0.
In the transposition mode, the central key (the key where there
is no transposition) is (`minrange` + `maxrange`) / 2.
mrange : tuple of int, optional
Minimum and maximum possible values, in Midi notes. Available
only at initialization time. Defaults to (0, 127).
.. note::
Available distributions are:
0. uniform
1. linear minimum
2. linear maximum
3. triangular
4. exponential minimum
5. exponential maximum
6. double (bi)exponential
7. cauchy
8. weibull
9. gaussian
10. poisson
11. walker (drunk)
12. loopseg (drunk with looped segments)
Depending on the distribution, `x1` and `x2` parameters are applied
as follow (names as string, or associated number can be used as `dist`
parameter):
0. uniform
- x1 : not used
- x2 : not used
1. linear_min
- x1 : not used
- x2 : not used
2. linear_max
- x1 : not used
- x2 : not used
3. triangle
- x1 : not used
- x2 : not used
4. expon_min
- x1 : slope {0 = no slope -> 10 = sharp slope}
- x2 : not used
5. expon_max
- x1 : slope {0 = no slope -> 10 = sharp slope}
- x2 : not used
6. biexpon
- x1 : bandwidth {0 = huge bandwidth -> 10 = narrow bandwidth}
- x2 : not used
7. cauchy
- x1 : bandwidth {0 = narrow bandwidth -> 10 = huge bandwidth}
- x2 : not used
8. weibull
- x1 : mean location {0 -> 1}
- x2 : shape {0.5 = linear min, 1.5 = expon min, 3.5 = gaussian}
9. gaussian
- x1 : mean location {0 -> 1}
- x2 : bandwidth {0 = narrow bandwidth -> 10 = huge bandwidth}
10. poisson
- x1 : gravity center {0 = low values -> 10 = high values}
- x2 : compress/expand range {0.1 = full compress -> 4 full expand}
11. walker
- x1 : maximum value {0.1 -> 1}
- x2 : maximum step {0.1 -> 1}
12. loopseg
- x1 : maximum value {0.1 -> 1}
- x2 : maximum step {0.1 -> 1}
>>> s = Server().boot()
>>> s.start()
>>> wav = SquareTable()
>>> env = CosTable([(0,0), (100,1), (500,.3), (8191,0)])
>>> met = Metro(.125, 12).play()
>>> amp = TrigEnv(met, table=env, mul=.2)
>>> pit = TrigXnoiseMidi(met, dist=4, x1=10, scale=1, mrange=(48,84))
>>> a = Osc(table=wav, freq=pit, mul=amp).out()
"""
def __init__(self, input, dist=0, x1=0.5, x2=0.5, scale=0, mrange=(0,127), mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._dist = dist
self._x1 = x1
self._x2 = x2
self._scale = scale
self._mrange = mrange
self._in_fader = InputFader(input)
in_fader, dist, x1, x2, scale, mrange, mul, add, lmax = convertArgsToLists(self._in_fader, dist, x1, x2, scale, mrange, mul, add)
for i, t in enumerate(dist):
if type(t) == StringType: dist[i] = XNOISE_DICT.get(t, 0)
self._base_objs = [TrigXnoiseMidi_base(wrap(in_fader,i), wrap(dist,i), wrap(x1,i), wrap(x2,i), wrap(scale,i), wrap(mrange,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setDist(self, x):
"""
Replace the `dist` attribute.
:Args:
x : int
new `dist` attribute.
"""
self._dist = x
x, lmax = convertArgsToLists(x)
for i, t in enumerate(x):
if type(t) == StringType: x[i] = XNOISE_DICT.get(t, 0)
[obj.setType(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setScale(self, x):
"""
Replace the `scale` attribute.
Possible values are:
0. Midi notes
1. Hertz
2. transposition factor (centralkey is (`minrange` + `maxrange`) / 2
:Args:
x : int {0, 1, 2}
new `scale` attribute.
"""
self._scale = x
x, lmax = convertArgsToLists(x)
[obj.setScale(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setRange(self, mini, maxi):
"""
Replace the `mrange` attribute.
:Args:
mini : int
minimum output midi range.
maxi : int
maximum output midi range.
"""
self._mrange = (mini, maxi)
mini, maxi, lmax = convertArgsToLists(mini, maxi)
[obj.setRange(wrap(mini,i), wrap(maxi,i)) for i, obj in enumerate(self._base_objs)]
def setX1(self, x):
"""
Replace the `x1` attribute.
:Args:
x : float or PyoObject
new `x1` attribute.
"""
self._x1 = x
x, lmax = convertArgsToLists(x)
[obj.setX1(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setX2(self, x):
"""
Replace the `x2` attribute.
:Args:
x : float or PyoObject
new `x2` attribute.
"""
self._x2= x
x, lmax = convertArgsToLists(x)
[obj.setX2(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def dist(self):
"""string or int. Distribution type."""
return self._dist
@dist.setter
def dist(self, x):
self.setDist(x)
@property
def x1(self):
"""float or PyoObject. First parameter."""
return self._x1
@x1.setter
def x1(self, x):
self.setX1(x)
@property
def x2(self):
"""float or PyoObject. Second parameter."""
return self._x2
@x2.setter
def x2(self, x):
self.setX2(x)
@property
def scale(self):
"""int. Output format."""
return self._scale
@scale.setter
def scale(self, x):
self.setScale(x)
class Counter(PyoObject):
"""
Integer count generator.
Counter keeps track of all triggers received, outputs the current
count constrained within `min` and `max` range, and can be set to
count up, down, or up-and-down.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
min : int, optional
Minimum value of the count, included in the count. Defaults to 0.
max : int, optional
Maximum value of the count. excluded of the count.
The counter will count up to max - 1. Defaults to 100.
dir : int {0, 1, 2}, optional
Direction of the count. Defaults to 0. Three possible values:
0. up
1. down
2. up-and-down
.. note::
The out() method is bypassed. Counter's signal can not be sent
to audio outs.
.. seealso::
:py:class:`Select`
>>> s = Server().boot()
>>> s.start()
>>> m = Metro(.125).play()
>>> c = Counter(m, min=3, max=8, dir=2, mul=100)
>>> a = Sine(freq=c, mul=.2).mix(2).out()
"""
def __init__(self, input, min=0, max=100, dir=0, mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._min = min
self._max = max
self._dir = dir
self._in_fader = InputFader(input)
in_fader, min, max, dir, mul, add, lmax = convertArgsToLists(self._in_fader, min, max, dir, mul, add)
self._base_objs = [Counter_base(wrap(in_fader,i), wrap(min,i), wrap(max,i), wrap(dir,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setMin(self, x):
"""
Replace the `min` attribute.
:Args:
x : int
new `min` attribute.
"""
self._min = x
x, lmax = convertArgsToLists(x)
[obj.setMin(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setMax(self, x):
"""
Replace the `max` attribute.
:Args:
x : int
new `max` attribute.
"""
self._max = x
x, lmax = convertArgsToLists(x)
[obj.setMax(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDir(self, x):
"""
Replace the `dir` attribute.
:Args:
x : int {0, 1, 2}
new `dir` attribute.
"""
self._dir = x
x, lmax = convertArgsToLists(x)
[obj.setDir(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def reset(self, value=None):
"""
Reset the current count of the counter. If `value` is None, the counter
resets to the beginning of the count.
:Args:
value : int, optional
Value where to reset the count. Defaults to None.
"""
value, lmax = convertArgsToLists(value)
[obj.reset(wrap(value,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 100, 'lin', 'min', self._min, res="int", dataOnly=True),
SLMap(0, 1000, 'lin', 'max', self._max, res="int", dataOnly=True),
SLMap(0, 2, 'lin', 'dir', self._dir, res="int", dataOnly=True),
SLMap(0, 1000, 'lin', 'mul', self._mul),
SLMap(0, 1000, 'lin', 'add', self._add)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def min(self):
"""int. Minimum value."""
return self._min
@min.setter
def min(self, x):
self.setMin(x)
@property
def max(self):
"""int. Maximum value."""
return self._max
@max.setter
def max(self, x):
self.setMax(x)
@property
def dir(self):
"""int. Direction of the count."""
return self._dir
@dir.setter
def dir(self, x):
self.setDir(x)
class Select(PyoObject):
"""
Sends trigger on matching integer values.
Select takes in input an audio signal containing integer numbers
and sends a trigger when the input matches `value` parameter. This
object is especially designed to be used with Counter object.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal. Must contains integer numbers.
value : int, optional
Value to be matched to send a trigger. Defaults to 0.
.. note::
The out() method is bypassed. Select's signal can not be sent
to audio outs.
.. seealso::
:py:class:`Counter`
>>> s = Server().boot()
>>> s.start()
>>> env = HannTable()
>>> m = Metro(.125, poly=2).play()
>>> te = TrigEnv(m, table=env, dur=.2, mul=.2)
>>> c = Counter(m, min=0, max=4)
>>> se = Select(c, 0)
>>> tr = TrigRand(se, 400, 600)
>>> a = Sine(freq=tr, mul=te).out()
"""
def __init__(self, input, value=0, mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._value = value
self._in_fader = InputFader(input)
in_fader, value, mul, add, lmax = convertArgsToLists(self._in_fader, value, mul, add)
self._base_objs = [Select_base(wrap(in_fader,i), wrap(value,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setValue(self, x):
"""
Replace the `value` attribute.
:Args:
x : int
new `value` attribute.
"""
self._value = x
x, lmax = convertArgsToLists(x)
[obj.setValue(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 100, 'lin', 'value', self._value, res="int", dataOnly=True)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoObject. Audio signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def value(self):
"""int. Matching value."""
return self._value
@value.setter
def value(self, x):
self.setValue(x)
class Change(PyoObject):
"""
Sends trigger that informs when input value has changed.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal. Must contains integer numbers.
.. note::
The out() method is bypassed. Change's signal can not be sent
to audio outs.
>>> s = Server().boot()
>>> s.start()
>>> t = CosTable([(0,0), (100,1), (500,.3), (8191,0)])
>>> a = XnoiseMidi(dist="loopseg", freq=[2, 3], x1=1, scale=1, mrange=(60,73))
>>> b = Change(a)
>>> amp = TrigEnv(b, table=t, dur=[.5,.333], mul=.3)
>>> out = SineLoop(freq=a, feedback=.05, mul=amp).out()
"""
def __init__(self, input, mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._in_fader = InputFader(input)
in_fader, mul, add, lmax = convertArgsToLists(self._in_fader, mul, add)
self._base_objs = [Change_base(wrap(in_fader,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
@property
def input(self):
"""PyoObject. Audio signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
class Thresh(PyoObject):
"""
Informs when a signal crosses a threshold.
Thresh sends a trigger when a signal crosses a threshold. The `dir`
parameter can be used to set the crossing mode, down-up, up-down, or
both.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
threshold : float or PyoObject, optional
Threshold value. Defaults to 0.
dir : int {0, 1, 2}, optional
There are three modes of using Thresh:
0. down-up (default)
sends a trigger when current value is higher than the
threshold, while old value was equal to or lower than
the threshold.
1. up-down
sends a trigger when current value is lower than the
threshold, while old value was equal to or higher than
the threshold.
2. both direction
sends a trigger in both the two previous cases.
.. note::
The out() method is bypassed. Thresh's signal can not be sent
to audio outs.
>>> s = Server().boot()
>>> s.start()
>>> a = Phasor(1)
>>> b = Thresh(a, threshold=[0.25, 0.5, 0.66], dir=0)
>>> t = LinTable([(0,0), (50,1), (250,.3), (8191,0)])
>>> env = TrigEnv(b, table=t, dur=.5, mul=.3)
>>> sine = Sine(freq=[500,600,700], mul=env).out()
"""
def __init__(self, input, threshold=0., dir=0, mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._threshold = threshold
self._dir = dir
self._in_fader = InputFader(input)
in_fader, threshold, dir, mul, add, lmax = convertArgsToLists(self._in_fader, threshold, dir, mul, add)
self._base_objs = [Thresh_base(wrap(in_fader,i), wrap(threshold,i), wrap(dir,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setThreshold(self, x):
"""
Replace the `threshold` attribute.
:Args:
x : float or PyoObject
new `threshold` attribute.
"""
self._threshold = x
x, lmax = convertArgsToLists(x)
[obj.setThreshold(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setDir(self, x):
"""
Replace the `dir` attribute.
:Args:
x : int {0, 1, 2}
new `dir` attribute.
"""
self._dir = x
x, lmax = convertArgsToLists(x)
[obj.setDir(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
@property
def input(self):
"""PyoObject. Audio signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def threshold(self):
"""float or PyoObject. Threshold value."""
return self._threshold
@threshold.setter
def threshold(self, x):
self.setThreshold(x)
@property
def dir(self):
"""int. User mode."""
return self._dir
@dir.setter
def dir(self, x):
self.setDir(x)
class Percent(PyoObject):
"""
Lets pass a certain percentage of the input triggers.
Percent looks at the triggers received in `input` and
lets them pass `percent` of the time.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
percent : float or PyoObject, optional
How much percentage of triggers to let pass,
between 0 and 100. Defaults to 50.
.. note::
The out() method is bypassed. Percent's signal can not
be sent to audio outs.
>>> s = Server().boot()
>>> s.start()
>>> t = CosTable([(0,0), (50,1), (250,.3), (8191,0)])
>>> met = Metro(time=.125, poly=2).play()
>>> trig = Percent(met, percent=50)
>>> amp = TrigEnv(trig, table=t, dur=.25, mul=.3)
>>> fr = TrigRand(trig, min=400, max=1000)
>>> freq = Port(fr, risetime=0.001, falltime=0.001)
>>> a = Sine(freq=freq, mul=amp).out()
"""
def __init__(self, input, percent=50., mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._percent = percent
self._in_fader = InputFader(input)
in_fader, percent, mul, add, lmax = convertArgsToLists(self._in_fader, percent, mul, add)
self._base_objs = [Percent_base(wrap(in_fader,i), wrap(percent,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setPercent(self, x):
"""
Replace the `percent` attribute.
:Args:
x : float or PyoObject
new `percent` attribute.
"""
self._percent = x
x, lmax = convertArgsToLists(x)
[obj.setPercent(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0., 100., 'lin', 'percent', self._percent)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoObject. Audio signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def percent(self):
"""float or PyoObject. Percentage value."""
return self._percent
@percent.setter
def percent(self, x):
self.setPercent(x)
class Timer(PyoObject):
"""
Reports elapsed time between two trigs.
A trigger in `input2` signal starts an internal timer. The next trigger
in `input` signal stops it and reports the elapsed time between the two
triggers. Useful for filtering triggers that are too close to each other.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Trigger signal. Stops the timer and reports elapsed time.
input2 : PyoObject
Trigger signal. Starts the timer if not already started.
.. note::
The `input` signal is evaluated before the `input2` signal, so it's
safe to stop and start the timer with the same trigger signal.
>>> s = Server().boot()
>>> s.start()
>>> cl = Cloud(density=20, poly=2).play()
>>> ti = Timer(cl, cl)
>>> # Minimum waiting time before a new trig
>>> cp = Compare(ti, comp=.05, mode=">")
>>> trig = cl * cp
>>> amp = TrigEnv(trig, table=HannTable(), dur=.05, mul=.25)
>>> freq = TrigChoice(trig, choice=[100,150,200,250,300,350,400])
>>> a = LFO(freq=freq, type=2, mul=amp).out()
"""
def __init__(self, input, input2, mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._input2 = input2
self._in_fader = InputFader(input)
self._in_fader2 = InputFader(input2)
in_fader, in_fader2, mul, add, lmax = convertArgsToLists(self._in_fader, self._in_fader2, mul, add)
self._base_objs = [Timer_base(wrap(in_fader,i), wrap(in_fader2,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Default to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setInput2(self, x, fadetime=0.05):
"""
Replace the `input2` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Default to 0.05.
"""
self._input2 = x
self._in_fader2.setInput(x, fadetime)
@property
def input(self):
"""PyoObject. Timer stop signal."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def input2(self):
"""PyoObject. Timer start signal."""
return self._input2
@input2.setter
def input2(self, x): self.setInput2(x)
class Iter(PyoObject):
"""
Triggers iterate over a list of values.
Iter loops over a list of user-defined values. When a trigger is received
in `input`, Iter moves up to the next value in the list, with wrap-around.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
choice : list of floats
Sequence of values over which to iterate.
init : float, optional
Initial value. Available at initialization time only.
Defaults to 0.
>>> s = Server().boot()
>>> s.start()
>>> l1 = [300, 350, 400, 450, 500, 550]
>>> l2 = [300, 350, 450, 500, 550]
>>> t = CosTable([(0,0), (50,1), (250,.3), (8191,0)])
>>> met = Metro(time=.125, poly=2).play()
>>> amp = TrigEnv(met, table=t, dur=.25, mul=.3)
>>> it = Iter(met, choice=[l1, l2])
>>> si = Sine(freq=it, mul=amp).out()
"""
def __init__(self, input, choice, init=0., mul=1, add=0):
PyoObject.__init__(self, mul, add)
if type(choice) != ListType:
print >> sys.stderr, 'TypeError: "choice" argument of %s must be a list.\n' % self.__class__.__name__
exit()
self._input = input
self._choice = choice
self._in_fader = InputFader(input)
in_fader, init, mul, add, lmax = convertArgsToLists(self._in_fader, init, mul, add)
if type(choice[0]) != ListType:
self._base_objs = [Iter_base(wrap(in_fader,i), choice, wrap(init,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
else:
choicelen = len(choice)
lmax = max(choicelen, lmax)
self._base_objs = [Iter_base(wrap(in_fader,i), wrap(choice,i), wrap(init,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setChoice(self, x):
"""
Replace the `choice` attribute.
:Args:
x : list of floats
new `choice` attribute.
"""
self._choice = x
if type(x[0]) != ListType:
[obj.setChoice(self._choice) for i, obj in enumerate(self._base_objs)]
else:
[obj.setChoice(wrap(self._choice,i)) for i, obj in enumerate(self._base_objs)]
def reset(self, x=0):
"""
Resets the current count.
:Args:
x : int, optional
Value where to reset the count. Defaults to 0.
"""
[obj.reset(x) for obj in self._base_objs]
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def choice(self):
"""list of floats. Possible values."""
return self._choice
@choice.setter
def choice(self, x):
self.setChoice(x)
class Count(PyoObject):
"""
Counts integers at audio rate.
Count generates a signal increasing by 1 each sample when it receives a
trigger. It can be used to do sample playback using TableIndex.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Trigger signal. Start or Restart the count.
min : int, optional
Minimum value of the count, included in the count. Defaults to 0.
max : int, optional
Maximum value of the count. excluded of the count. Defaults to 0.
A value of 0 eliminates the maximum, and the count continues
increasing without resetting.
>>> s = Server().boot()
>>> s.start()
>>> t = SndTable(SNDS_PATH+'/accord.aif')
>>> ind = Count(Trig().play(), [0,100], t.getSize())
>>> read = TableIndex(t, ind).out()
"""
def __init__(self, input, min=0, max=0, mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._min = min
self._max = max
self._in_fader = InputFader(input)
in_fader, min, max, mul, add, lmax = convertArgsToLists(self._in_fader, min, max, mul, add)
self._base_objs = [Count_base(wrap(in_fader,i), wrap(min,i), wrap(max,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New input signal.
fadetime : float, optional
Crossfade time between old and new input. Default to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setMin(self, x):
"""
Replace the `min` attribute.
:Args:
x : int
new `min` attribute.
"""
self._min = x
x, lmax = convertArgsToLists(x)
[obj.setMin(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def setMax(self, x):
"""
Replace the `max` attribute.
:Args:
x : int
new `max` attribute.
"""
self._max = x
x, lmax = convertArgsToLists(x)
[obj.setMax(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0, 10000, 'lin', 'min', self._min, res="int", dataOnly=True),
SLMap(10000, 1000000, 'lin', 'max', self._max, res="int", dataOnly=True),
SLMapMul(self._mul)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoObject. Trigger signal. Start/Restart the count."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def min(self):
"""int. Minimum value."""
return self._min
@min.setter
def min(self, x):
self.setMin(x)
@property
def max(self):
"""int. Maximum value."""
return self._max
@max.setter
def max(self, x):
self.setMax(x)
class NextTrig(PyoObject):
"""
A trigger in the second stream opens a gate only for the next one in the first stream.
When the gate is opened by a trigger in `input2` signal, the next trigger
in `input` signal is allowed to pass and automatically closes the gate.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Trigger signal. Trigger stream waiting for the gate to be opened.
input2 : PyoObject
Trigger signal. Trigger stream opening the gate.
.. note::
The `input` signal is evaluated before the `input2` signal, so it's
safe to send triggers in both inputs at the same time and wait for the
next one.
>>> s = Server().boot()
>>> s.start()
>>> mid = Urn(max=4, freq=4, add=60)
>>> sigL = SineLoop(freq=MToF(mid), feedback=.08, mul=0.3).out()
>>> first = NextTrig(Change(mid), mid["trig"])
>>> amp = TrigExpseg(first, [(0,0),(.01,.25),(1,0)])
>>> sigR = SineLoop(midiToHz(84), feedback=0.05, mul=amp).out(1)
"""
def __init__(self, input, input2, mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._input2 = input2
self._in_fader = InputFader(input)
self._in_fader2 = InputFader(input2)
in_fader, in_fader2, mul, add, lmax = convertArgsToLists(self._in_fader, self._in_fader2, mul, add)
self._base_objs = [NextTrig_base(wrap(in_fader,i), wrap(in_fader2,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Default to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setInput2(self, x, fadetime=0.05):
"""
Replace the `input2` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Default to 0.05.
"""
self._input2 = x
self._in_fader2.setInput(x, fadetime)
@property
def input(self):
"""PyoObject. Incoming trigger stream signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def input2(self):
"""PyoObject. Trigger stream opening the gate."""
return self._input2
@input2.setter
def input2(self, x):
self.setInput2(x)
class TrigVal(PyoObject):
"""
Outputs a previously defined value on a trigger signal.
Value defined at `value` argument is sent when a trigger signal
is detected in input.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Audio signal sending triggers.
value : float or PyoObject, optional
Next value. Defaults to 0.
init : float, optional
Initial value. Defaults to 0.
.. note::
The out() method is bypassed. TrigVal's signal can not be sent
to audio outs.
>>> s = Server().boot()
>>> s.start()
>>> def newfreq():
... val.value = (val.value + 50) % 500 + 100
>>> tr = Metro(1).play()
>>> val = TrigVal(tr, value=250)
>>> a = SineLoop(val, feedback=.1, mul=.3).out()
>>> trfunc = TrigFunc(tr, newfreq)
"""
def __init__(self, input, value=0., init=0., mul=1, add=0):
PyoObject.__init__(self, mul, add)
self._input = input
self._value = value
self._in_fader = InputFader(input)
in_fader, value, init, mul, add, lmax = convertArgsToLists(self._in_fader, value, init, mul, add)
self._base_objs = [TrigVal_base(wrap(in_fader,i), wrap(value,i), wrap(init,i), wrap(mul,i), wrap(add,i)) for i in range(lmax)]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setValue(self, x):
"""
Replace the `value` attribute.
:Args:
x : float or PyoObject
new `value` attribute.
"""
self._value = x
x, lmax = convertArgsToLists(x)
[obj.setValue(wrap(x,i)) for i, obj in enumerate(self._base_objs)]
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0., 1., 'lin', 'value', self._value),
SLMapMul(self._mul)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def value(self):
"""float or PyoObject. Next value."""
return self._value
@value.setter
def value(self, x):
self.setValue(x)
class Euclide(PyoObject):
"""
Euclidean rhythm generator.
This object generates euclidean trigger patterns, resulting in onsets
in the rhythm to be as equidistant as possible.
A trigger is an audio signal with a value of 1 surrounded by 0s.
The play() method starts the Euclide and is not called at the object
creation time.
:Parent: :py:class:`PyoObject`
:Args:
time : float or PyoObject, optional
Time, in seconds, between each beat of the pattern. Defaults to 0.125.
taps : int, optional
Number of beats in the generated pattern (measure length), max = 64.
Defaults to 16.
onsets : int, optional
Number of onsets (a positive tap) in the generated pattern.
Defaults to 10.
poly : int, optional
Beat polyphony. Denotes how many independent streams are
generated by the object, allowing overlapping processes.
Available only at initialization. Defaults to 1.
.. note::
Euclide outputs many signals identified with a string between brackets:
| obj['tap'] returns audio stream of the current tap of the measure.
| obj['amp'] returns audio stream of the current beat amplitude.
| obj['dur'] returns audio stream of the current beat duration in seconds.
| obj['end'] returns audio stream with a trigger just before the end of the measure.
obj without brackets returns the generated trigger stream of the measure.
The out() method is bypassed. Euclide's signal can not be sent to audio outs.
Euclide has no `mul` and `add` attributes.
>>> s = Server().boot()
>>> s.start()
>>> t = CosTable([(0,0), (100,1), (500,.3), (8191,0)])
>>> beat = Euclide(time=.125, taps=16, onsets=[8,7], poly=1).play()
>>> trmid = TrigXnoiseMidi(beat, dist=12, mrange=(60, 96))
>>> trhz = Snap(trmid, choice=[0,2,3,5,7,8,10], scale=1)
>>> tr2 = TrigEnv(beat, table=t, dur=beat['dur'], mul=beat['amp'])
>>> a = Sine(freq=trhz, mul=tr2*0.3).out()
"""
def __init__(self, time=.125, taps=16, onsets=10, poly=1):
PyoObject.__init__(self)
self._tap_dummy = []
self._amp_dummy = []
self._dur_dummy = []
self._end_dummy = []
self._time = time
self._taps = taps
self._onsets = onsets
self._poly = poly
time, taps, onsets, lmax = convertArgsToLists(time, taps, onsets)
self._base_players = [Beater_base(wrap(time,i), wrap(taps,i), wrap([100]*lmax,i), poly) for i in range(lmax)]
self._base_objs = [Beat_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._tap_objs = [BeatTapStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._amp_objs = [BeatAmpStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._dur_objs = [BeatDurStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._end_objs = [BeatEndStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
for i in range(lmax):
preset = [wrap(taps,i)] + self.__generate__(wrap(onsets,i), wrap(taps,i))
self._base_players[i].setPresets([preset])
self._base_players[i].recall(0)
def __generate__(self, m, k):
"""
Generates the euclidean rhythm for `m` onsets
in a measure of length `k` (number of taps).
Looping implementation, faster than recursive.
"""
if m > k: m = k
k -= m
mv, kv = [1], [0]
while k > 1:
if m > k:
m, k = k, m-k
mv, kv = mv+kv, mv
else:
m, k = m, k-m
mv, kv = mv+kv, kv
return mv * m + kv * k
def __getitem__(self, i):
if i == 'tap':
self._tap_dummy.append(Dummy([obj for obj in self._tap_objs]))
return self._tap_dummy[-1]
if i == 'amp':
self._amp_dummy.append(Dummy([obj for obj in self._amp_objs]))
return self._amp_dummy[-1]
if i == 'dur':
self._dur_dummy.append(Dummy([obj for obj in self._dur_objs]))
return self._dur_dummy[-1]
if i == 'end':
self._end_dummy.append(Dummy([obj for obj in self._end_objs]))
return self._end_dummy[-1]
if type(i) == SliceType:
return self._base_objs[i]
if i < len(self._base_objs):
return self._base_objs[i]
else:
print "'i' too large!"
def get(self, identifier="amp", all=False):
"""
Return the first sample of the current buffer as a float.
Can be used to convert audio stream to usable Python data.
"tap", "amp" or "dur" must be given to `identifier` to specify
which stream to get value from.
:Args:
identifier : string {"tap", "amp", "dur"}
Address string parameter identifying audio stream.
Defaults to "amp".
all : boolean, optional
If True, the first value of each object's stream
will be returned as a list.
If False, only the value of the first object's
stream will be returned as a float.
"""
if not all:
return self.__getitem__(identifier)[0]._getStream().getValue()
else:
return [obj._getStream().getValue() for obj in self.__getitem__(identifier).getBaseObjects()]
def setTime(self, x):
"""
Replace the `time` attribute.
:Args:
x : float or PyoObject
New `time` attribute.
"""
self._time = x
x, lmax = convertArgsToLists(x)
[obj.setTime(wrap(x,i)) for i, obj in enumerate(self._base_players)]
def setTaps(self, x):
"""
Replace the `taps` attribute.
:Args:
x : int
New `taps` attribute.
"""
self._taps = x
x, onsets, lmax = convertArgsToLists(x, self._onsets)
for i in range(len(self._base_players)):
preset = [wrap(x,i)] + self.__generate__(wrap(onsets,i), wrap(x,i))
self._base_players[i].setPresets([preset])
self._base_players[i].recall(0)
def setOnsets(self, x):
"""
Replace the `onsets` attribute.
:Args:
x : int
New `onsets` attribute.
"""
self._onsets = x
x, taps, lmax = convertArgsToLists(x, self._taps)
for i in range(len(self._base_players)):
preset = [wrap(taps,i)] + self.__generate__(wrap(x,i), wrap(taps,i))
self._base_players[i].setPresets([preset])
self._base_players[i].recall(0)
def reset(self):
"""
Reset internal counters to initialization values.
"""
[obj.reset() for obj in self._base_players]
def play(self, dur=0, delay=0):
dur, delay, lmax = convertArgsToLists(dur, delay)
self._tap_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._tap_objs)]
self._amp_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._amp_objs)]
self._dur_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._dur_objs)]
self._end_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._end_objs)]
return PyoObject.play(self, dur, delay)
def stop(self):
[obj.stop() for obj in self._tap_objs]
[obj.stop() for obj in self._amp_objs]
[obj.stop() for obj in self._dur_objs]
[obj.stop() for obj in self._end_objs]
return PyoObject.stop(self)
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setMul(self, x):
pass
def setAdd(self, x):
pass
def setSub(self, x):
pass
def setDiv(self, x):
pass
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.01, 1., 'lin', 'time', self._time),
SLMap(2, 64, 'lin', 'taps', self._taps, res="int", dataOnly=True),
SLMap(0, 64, 'lin', 'onsets', self._onsets, res="int", dataOnly=True)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def time(self):
"""float or PyoObject. Time, in seconds, between each beat."""
return self._time
@time.setter
def time(self, x): self.setTime(x)
@property
def taps(self):
"""int. Number of beats in the generated pattern."""
return self._taps
@taps.setter
def taps(self, x): self.setTaps(x)
@property
def onsets(self):
"""int. Number of onsets in the generated pattern."""
return self._onsets
@onsets.setter
def onsets(self, x): self.setOnsets(x)
class TrigBurst(PyoObject):
"""
Generates a time/amplitude expandable trigger pattern.
A trigger is an audio signal with a value of 1 surrounded by 0s.
When TrigBurst receives a trigger in its `input` argument, it starts
to output `count` triggers with a variable delay between each trigger
of the pattern. If `expand` is less than 1.0, the delay becomes shorter,
if it is greater than 1.0, the delay becomes longer.
:Parent: :py:class:`PyoObject`
:Args:
input : PyoObject
Input signal sending triggers.
time : float or PyoObject, optional
Base time, in seconds, between each trig of the serie. Defaults to 0.25.
count : int, optional
Number of trigs generated (length of the serie). Defaults to 10.
expand : float, optional
Timing power serie factor. Each delay before the next trig is the
current delay (starting with `time`) times `expand` factor. Defaults to 1.0.
ampfade : float, optional
Amplitude power serie factor. Each amplitude in the serie is the
current amplitude (starting at 1) times `ampfade` factor. Defaults to 1.0.
poly : int, optional
Voice polyphony. Denotes how many independent streams are
generated by the object, allowing overlapping processes.
Available only at initialization. Defaults to 1.
.. note::
TrigBurst outputs many signals identified with a string between brackets:
| obj['tap'] returns audio stream of the current tap of the serie.
| obj['amp'] returns audio stream of the current beat amplitude.
| obj['dur'] returns audio stream of the current beat duration in seconds.
| obj['end'] returns audio stream with a trigger just before the end of the serie.
obj without brackets returns the generated trigger stream of the serie.
The out() method is bypassed. TrigBurst's signal can not be sent to audio outs.
TrigBurst has no `mul` and `add` attributes.
>>> s = Server().boot()
>>> s.start()
>>> env = CosTable([(0,0), (100,0.5), (500, 0.3), (4096,0.3), (8192,0)])
>>> m = Metro(2).play()
>>> tb = TrigBurst(m, time=0.15, count=[15,20], expand=[0.92,0.9], ampfade=0.85)
>>> amp = TrigEnv(tb, env, dur=tb["dur"], mul=tb["amp"]*0.3)
>>> a = Sine([800,600], mul=amp)
>>> rev = STRev(a, inpos=[0,1], revtime=1.5, cutoff=5000, bal=0.1).out()
"""
def __init__(self, input, time=.25, count=10, expand=1.0, ampfade=1.0, poly=1):
PyoObject.__init__(self)
self._tap_dummy = []
self._amp_dummy = []
self._dur_dummy = []
self._end_dummy = []
self._input = input
self._time = time
self._count = count
self._expand = expand
self._ampfade = ampfade
self._poly = poly
self._in_fader = InputFader(input)
in_fader, time, count, expand, ampfade, lmax = convertArgsToLists(self._in_fader, time, count, expand, ampfade)
self._base_players = [TrigBurster_base(wrap(in_fader,i), wrap(time,i), wrap(count,i), wrap(expand,i), wrap(ampfade,i), poly) for i in range(lmax)]
self._base_objs = [TrigBurst_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._tap_objs = [TrigBurstTapStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._amp_objs = [TrigBurstAmpStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._dur_objs = [TrigBurstDurStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
self._end_objs = [TrigBurstEndStream_base(wrap(self._base_players,j), i) for i in range(poly) for j in range(lmax)]
def __getitem__(self, i):
if i == 'tap':
self._tap_dummy.append(Dummy([obj for obj in self._tap_objs]))
return self._tap_dummy[-1]
if i == 'amp':
self._amp_dummy.append(Dummy([obj for obj in self._amp_objs]))
return self._amp_dummy[-1]
if i == 'dur':
self._dur_dummy.append(Dummy([obj for obj in self._dur_objs]))
return self._dur_dummy[-1]
if i == 'end':
self._end_dummy.append(Dummy([obj for obj in self._end_objs]))
return self._end_dummy[-1]
if type(i) == SliceType:
return self._base_objs[i]
if i < len(self._base_objs):
return self._base_objs[i]
else:
print "'i' too large!"
def get(self, identifier="amp", all=False):
"""
Return the first sample of the current buffer as a float.
Can be used to convert audio stream to usable Python data.
"tap", "amp" or "dur" must be given to `identifier` to specify
which stream to get value from.
:Args:
identifier : string {"tap", "amp", "dur"}
Address string parameter identifying audio stream.
Defaults to "amp".
all : boolean, optional
If True, the first value of each object's stream
will be returned as a list.
If False, only the value of the first object's
stream will be returned as a float.
"""
if not all:
return self.__getitem__(identifier)[0]._getStream().getValue()
else:
return [obj._getStream().getValue() for obj in self.__getitem__(identifier).getBaseObjects()]
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x : PyoObject
New signal to process.
fadetime : float, optional
Crossfade time between old and new input. Defaults to 0.05.
"""
self._input = x
self._in_fader.setInput(x, fadetime)
def setTime(self, x):
"""
Replace the `time` attribute.
:Args:
x : float or PyoObject
New `time` attribute.
"""
self._time = x
x, lmax = convertArgsToLists(x)
[obj.setTime(wrap(x,i)) for i, obj in enumerate(self._base_players)]
def setCount(self, x):
"""
Replace the `count` attribute.
:Args:
x : int
New `count` attribute.
"""
self._count = x
x, lmax = convertArgsToLists(x)
[obj.setCount(wrap(x,i)) for i, obj in enumerate(self._base_players)]
def setExpand(self, x):
"""
Replace the `expand` attribute.
:Args:
x : int
New `expand` attribute.
"""
self._expand = x
x, lmax = convertArgsToLists(x)
[obj.setExpand(wrap(x,i)) for i, obj in enumerate(self._base_players)]
def setAmpfade(self, x):
"""
Replace the `ampfade` attribute.
:Args:
x : int
New `ampfade` attribute.
"""
self._ampfade = x
x, lmax = convertArgsToLists(x)
[obj.setAmpfade(wrap(x,i)) for i, obj in enumerate(self._base_players)]
def play(self, dur=0, delay=0):
dur, delay, lmax = convertArgsToLists(dur, delay)
self._tap_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._tap_objs)]
self._amp_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._amp_objs)]
self._dur_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._dur_objs)]
self._end_objs = [obj.play(wrap(dur,i), wrap(delay,i)) for i, obj in enumerate(self._end_objs)]
return PyoObject.play(self, dur, delay)
def stop(self):
[obj.stop() for obj in self._tap_objs]
[obj.stop() for obj in self._amp_objs]
[obj.stop() for obj in self._dur_objs]
[obj.stop() for obj in self._end_objs]
return PyoObject.stop(self)
def out(self, chnl=0, inc=1, dur=0, delay=0):
return self.play(dur, delay)
def setMul(self, x):
pass
def setAdd(self, x):
pass
def setSub(self, x):
pass
def setDiv(self, x):
pass
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMap(0.1, 1., 'lin', 'time', self._time, dataOnly=True),
SLMap(2, 128, 'lin', 'count', self._count, res="int", dataOnly=True),
SLMap(0.5, 2.0, 'lin', 'expand', self._expand, dataOnly=True),
SLMap(0.5, 1.0, 'lin', 'ampfade', self._ampfade, dataOnly=True)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoObject. Audio trigger signal."""
return self._input
@input.setter
def input(self, x): self.setInput(x)
@property
def time(self):
"""float or PyoObject. Base time, in seconds, between each trig."""
return self._time
@time.setter
def time(self, x): self.setTime(x)
@property
def count(self):
"""int. Number of triggers in the generated serie."""
return self._count
@count.setter
def count(self, x): self.setCount(x)
@property
def expand(self):
"""float. Time's power expansion factor."""
return self._expand
@expand.setter
def expand(self, x): self.setExpand(x)
@property
def ampfade(self):
"""float. Amplitude's power expansion factor."""
return self._ampfade
@ampfade.setter
def ampfade(self, x): self.setAmpfade(x) | wang1986one/pyo | pyolib/triggers.py | Python | gpl-3.0 | 113,883 | [
"Gaussian"
] | 094d5ac6edcf1e7d49d93520ea4a0d330ef3aaf836be4c3b4e13a1fc703cdd72 |
"""
__init__.py
State Estimation and Analysis for PYthon
Module for working with oceanographic data and models
Copyright (c)2017 University of Hawaii under the MIT-License.
Requires the following packages: joblib
Import classes include:
- :class:`~seapy.environ.opt`
- :class:`~seapy.progressbar.ProgressBar`
- :class:`~seapy.tidal_energy.energetics`
Imported functions include:
- :func:`~seapy.lib.adddim`
- :func:`~seapy.lib.chunker`
- :func:`~seapy.lib.convolve_mask`
- :func:`~seapy.lib.day2date`
- :func:`~seapy.lib.date2day`
- :func:`~seapy.lib.earth_angle`
- :func:`~seapy.lib.earth_distance`
- :func:`~seapy.lib.flatten`
- :func:`~seapy.lib.list_files`
- :func:`~seapy.lib.netcdf`
- :func:`~seapy.lib.rotate`
- :func:`~seapy.lib.today2day`
- :func:`~seapy.lib.unique_rows`
- :func:`~seapy.lib.vecfind`
- :func:`~seapy.oa.oasurf`
- :func:`~seapy.oa.oavol`
- :func:`~seapy.tidal_energy.tidal_energy`
- :func:`~seapy.progressbar.progress`
"""
from .lib import *
from . import roms
from . import model
from . import qserver
from . import mapping
from . import filt
from . import plot
from . import progressbar
from . import seawater
from . import tide
from .tidal_energy import tidal_energy
from .environ import opt
from .hawaii import hawaii
from .oa import *
| ocefpaf/seapy | seapy/__init__.py | Python | mit | 1,333 | [
"NetCDF"
] | 82cef707a4e8baa9cc6ce1c379befd4b0a568ec33dd6251c1fbacc97cb9c7507 |
"""Restricted Boltzmann machine module"""
# TODO:
# REGULARIZATION
# MULTINOMIAL
# ADD JOINT CATEGORY MODELING
import numpy as np
from params import SGDParams, RBMParams
from batches import Batches
import time
def draw_normal(mu):
return np.random.normal(mu)
def draw_bernoulli(p):
return (p > np.random.rand()).astype(float)
def sigmoid(z):
return 1 / (1 + np.exp(-1.*z))
def calc_reg_mat(p_hid):
return 1.
class RBMTrainer(SGDParams):
"""Stochastic Gradient Descent Trainer"""
def __init__(self, rbm, params):
super(RBMTrainer, self).__init__()
if params is None:
params = SGDParams()
for k, v in params.__dict__.items():
setattr(self,k,v)
self.rbm = rbm
# (FOR MOMENTUM)
self.dW_old = np.zeros_like(rbm.W)
self.da_old = np.zeros_like(rbm.a)
self.db_old = np.zeros_like(rbm.b)
self.log['error'] = []
self.log['lrate'] = []
def annealing(self):
pass
def print_progress(self):
print '\nEpoch %d' % (self.current_epoch + 1)
print 'Weight penalty: %f' % self.w_penalty
print 'Learning rate: %f' % self.log['lrate'][-1]
print 'Recon. error: %f' % self.log['error'][-1]
out_str = [];
for param in self.rbm.params:
print "{0:5s} = {1:5f}".format('|'+ param +'|',
np.linalg.norm(self.rbm.__getattribute__(param)))
def vis_learning(self):
print self.rbm.W.shape
# plt.figure(1)
# plt.clf()
# w = self.rbm.W[:,0]
# plt.imshow(w.reshape(28,28),cmap='gray',interpolation='none')
# plt.show()
# time.sleep(0.05)
def log_info(self,err):
self.log['error'].append(err)
self.log['lrate'].append(self.lrate)
def train(self, data):
"""Train RBM using stochastic graident descent"""
w_penalty0 = self.w_penalty
lrate0 = self.lrate
# TRANSFORM DATA INTO "ITERABLE" BATCHES
data = Batches(data, batch_sz=self.batch_sz)
states = None
for self.current_epoch in xrange(self.n_epoch):
# ANNEALING
if self.current_epoch > self.begin_anneal:
self.lrate = max(lrate0*((self.current_epoch-
self.begin_anneal)**-.25),1e-8)
# WEIGHT DECAY SCHEDULE
if self.current_epoch < self.begin_wd:
self.w_penalty = 0
else:
self.w_penalty = w_penalty0
# LOOP OVER BATCHED DATA
batch_err_tot = 0
while True:
states, batch_err = self.run_gibbs(data(), states)
gradients = self.calc_gradients(**states)
self.update_params(**gradients)
batch_err_tot += batch_err
if data.cnt == data.n_batches - 1:
break
self.log_info(batch_err_tot)
if (self.verbose):
self.print_progress()
if self.visualize & (self.current_epoch % self.display_every == 0):
self.log['gradients'] = gradients
self.log['states'] = states
self.vis_fun.vis(self)
return self.rbm, self.log
def run_gibbs(self, batch_data, states):
"""Runn Gibbs sampler for RBM (i.e. Contrastive Divergence)"""
# CALCULATE DATA-DRIVEN HIDDEN-UNIT REGULARIZATION
a_hid, reg_mat = self.rbm.hgv(batch_data,
self.rbm.sample_hid, calc_reg=True)
if self.pcd and (states != None):
a_hid = states['a_hid']
a_vis0 = batch_data.copy()
# REGULARIZE DATA-DRIVEN HIDDEN UNIT ACTIVATIONS
a_hid0 = (1. - self.reg_strength) * a_hid + self.reg_strength*reg_mat
gibbs_cnt = 1
while True:
# GO DOWN
a_vis = self.rbm.vgh(a_hid, self.rbm.sample_vis)
# GO BACK UP
if gibbs_cnt == self.rbm.n_gibbs:
a_hid, _ = self.rbm.hgv(a_vis,0);
break
else:
a_hid, _ = self.rbm.hgv(a_vis, self.rbm.sample_hid);
gibbs_cnt += 1
# PACKAGE STATES INTO DICT
states = {'a_vis0': a_vis0, 'a_hid0': a_hid0, 'a_vis': a_vis, 'a_hid': a_hid}
recon_error = ((a_vis - batch_data)**2).sum()
return states, recon_error
def calc_gradients(self, a_vis0, a_hid0, a_vis, a_hid):
"""Calculate Boltzmann machine gradients with repsect to model params"""
dW = (np.dot(a_vis0.T, a_hid0) - np.dot(a_vis.T, a_hid))/a_vis0.shape[0]
da = a_hid.mean(0)
db = a_vis.mean(0)
# PACKAGE GRADIENTS
gradients = {'dW': dW, 'da': da, 'db': db}
return gradients
def update_params(self, dW, da, db):
dW = self.momentum*self.dW_old + (1-self.momentum)*dW
self.rbm.W += self.lrate*dW
# WEIGHT REGULARIZATION
if self.w_penalty > 0: # (L2)
W_penalty = -self.lrate*self.w_penalty*self.rbm.W
elif self.w_penalty < 0: # (L1)
W_penalty = self.lrate*self.w_penalty*np.sign(self.rbm.W)
else:
W_penalty = 0
self.rbm.W += W_penalty
db = self.momentum*self.db_old + (1-self.momentum)*db
self.rbm.b += db
da = self.momentum*self.da_old + (1-self.momentum)*da
self.rbm.a += da
# (MOMENTUM MEMORY)
self.dW_old = dW
self.da_old = da
self.db_old = db
class RBM(RBMParams):
"""General Restricted Boltzmann Machine object"""
def __init__(self, params=None):
super(RBM, self).__init__()
if params is None:
params = RBMParams() # DEFAULT PARAMETERS OBJECT
for k, v in params.__dict__.items():
setattr(self,k,v)
w_scale = np.sqrt(6. / (2 * self.n_vis));
self.W = w_scale*(np.random.rand(self.n_vis, self.n_hid) - 0.5)
self.a = np.zeros(self.n_hid)
self.b = np.zeros(self.n_vis)
if self.input_type == 'binary':
self.sample_fun = draw_bernoulli
else:
self.sample_fun = draw_normal
def hgv(self, vis_state, sample_hid=False, calc_reg=False):
"""Return binary hidden unit states given visible states
"""
# p(h | v)
a_hid = sigmoid(np.dot(vis_state, self.W) + self.a)
# REGULARIZE HIDDEN UNIT STATES
if calc_reg:
reg_mat = calc_reg_mat(a_hid)
else:
reg_mat = None
# SAMPLE OR MEAN FIELD?
if sample_hid:
a_hid = draw_bernoulli(a_hid)
return a_hid, reg_mat
def vgh(self, hid_states, sample_vis=False):
"""Return visible unit states given hidden
"""
if self.input_type == 'binary':
a_vis = sigmoid(np.dot(self.W, hid_states.T) + self.b[:,None])
elif self.input_tyype == 'gaussian':
a_vis = np.dot(self.W, hid_states.T) + self.b[:,None]
if sample_vis:
a_vis = self.sample_fun(a_vis)
return a_vis.T
def draw_samples(self, a_vis0, n_samples=50, h_mask=1.):
"""Draw <n_samples> from current model given initial visible
states <a_vis0>. Can also anchor specific hidden units with
<h_mask>"""
pass
class Conditional(RBM):
"""Conditional RBM (aka Dynamic RBM) under construction"""
def __init__(self, params):
super(Conditional, self).__init__()
for k, v in params.iteritems():
if hasattr(self, k):
setattr(self,k,v)
if self.input_type == 'binary':
self.sample_fun = draw_bernoulli
else:
self.sample_fun = draw_normal
self.params = ['W','b','a','d']
class MCRBM(RBM):
"""Mean Covariance RBM under construction"""
def __init__(self, params):
super(MCRBM, self).__init__()
for k, v in params.iteritems():
if hasattr(self, k):
setattr(self,k,v)
if __name__ == '__main__':
r = RBM(n_vis,n_hid)
| ZenDevelopmentSystems/pedal | models/rbm.py | Python | bsd-2-clause | 8,322 | [
"Gaussian"
] | 161a88dae2516e8c9a1fd0252e4e620ba857030465cd8607410108229113e474 |
# Copyright: 2005-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Author(s): Brian Harring (ferringb@gentoo.org)
__all__ = ["Mapping", "MutableMapping", "UserDict", "ProtectedDict",
"LazyLoad", "slot_dict_class"]
import sys
import weakref
class Mapping(object):
"""
In python-3.0, the UserDict.DictMixin class has been replaced by
Mapping and MutableMapping from the collections module, but 2to3
doesn't currently account for this change:
http://bugs.python.org/issue2876
As a workaround for the above issue, use this class as a substitute
for UserDict.DictMixin so that code converted via 2to3 will run.
"""
__slots__ = ()
def __iter__(self):
return iter(self.keys())
def keys(self):
return list(self.__iter__())
def __contains__(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
def itervalues(self):
for _, v in self.items():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.items()))
def __len__(self):
return len(list(self))
if sys.hexversion >= 0x3000000:
items = iteritems
keys = __iter__
values = itervalues
class MutableMapping(Mapping):
"""
A mutable vesion of the Mapping class.
"""
__slots__ = ()
def clear(self):
for key in list(self):
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError("pop expected at most 2 arguments, got " + \
repr(1 + len(args)))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = next(iter(self.items()))
except StopIteration:
raise KeyError('container is empty')
del self[k]
return (k, v)
def update(self, *args, **kwargs):
if len(args) > 1:
raise TypeError(
"expected at most 1 positional argument, got " + \
repr(len(args)))
other = None
if args:
other = args[0]
if other is None:
pass
elif hasattr(other, 'iteritems'):
# Use getattr to avoid interference from 2to3.
for k, v in getattr(other, 'iteritems')():
self[k] = v
elif hasattr(other, 'items'):
# Use getattr to avoid interference from 2to3.
for k, v in getattr(other, 'items')():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
class UserDict(MutableMapping):
"""
Use this class as a substitute for UserDict.UserDict so that
code converted via 2to3 will run:
http://bugs.python.org/issue2876
"""
__slots__ = ('data',)
def __init__(self, *args, **kwargs):
self.data = {}
if len(args) > 1:
raise TypeError(
"expected at most 1 positional argument, got " + \
repr(len(args)))
if args:
self.update(args[0])
if kwargs:
self.update(kwargs)
def __repr__(self):
return repr(self.data)
def __contains__(self, key):
return key in self.data
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, item):
self.data[key] = item
def __delitem__(self, key):
del self.data[key]
def clear(self):
self.data.clear()
if sys.hexversion >= 0x3000000:
keys = __iter__
class OrderedDict(UserDict):
__slots__ = ('_order',)
def __init__(self, *args, **kwargs):
self._order = []
UserDict.__init__(self, *args, **kwargs)
def __iter__(self):
return iter(self._order)
def __setitem__(self, key, item):
if key in self:
self._order.remove(key)
UserDict.__setitem__(self, key, item)
self._order.append(key)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._order.remove(key)
def clear(self):
UserDict.clear(self)
del self._order[:]
if sys.hexversion >= 0x3000000:
keys = __iter__
class ProtectedDict(MutableMapping):
"""
given an initial dict, this wraps that dict storing changes in a secondary dict, protecting
the underlying dict from changes
"""
__slots__=("orig","new","blacklist")
def __init__(self, orig):
self.orig = orig
self.new = {}
self.blacklist = {}
def __setitem__(self, key, val):
self.new[key] = val
if key in self.blacklist:
del self.blacklist[key]
def __getitem__(self, key):
if key in self.new:
return self.new[key]
if key in self.blacklist:
raise KeyError(key)
return self.orig[key]
def __delitem__(self, key):
if key in self.new:
del self.new[key]
elif key in self.orig:
if key not in self.blacklist:
self.blacklist[key] = True
return
raise KeyError(key)
def __iter__(self):
for k in self.new:
yield k
for k in self.orig:
if k not in self.blacklist and k not in self.new:
yield k
def __contains__(self, key):
return key in self.new or (key not in self.blacklist and key in self.orig)
if sys.hexversion >= 0x3000000:
keys = __iter__
class LazyLoad(Mapping):
"""
Lazy loading of values for a dict
"""
__slots__=("pull", "d")
def __init__(self, pull_items_func, initial_items=[]):
self.d = {}
for k, v in initial_items:
self.d[k] = v
self.pull = pull_items_func
def __getitem__(self, key):
if key in self.d:
return self.d[key]
elif self.pull != None:
self.d.update(self.pull())
self.pull = None
return self.d[key]
def __iter__(self):
if self.pull is not None:
self.d.update(self.pull())
self.pull = None
return iter(self.d)
def __contains__(self, key):
if key in self.d:
return True
elif self.pull != None:
self.d.update(self.pull())
self.pull = None
return key in self.d
if sys.hexversion >= 0x3000000:
keys = __iter__
_slot_dict_classes = weakref.WeakValueDictionary()
def slot_dict_class(keys, prefix="_val_"):
"""
Generates mapping classes that behave similar to a dict but store values
as object attributes that are allocated via __slots__. Instances of these
objects have a smaller memory footprint than a normal dict object.
@param keys: Fixed set of allowed keys
@type keys: Iterable
@param prefix: a prefix to use when mapping
attribute names from keys
@type prefix: String
@rtype: SlotDict
@return: A class that constructs SlotDict instances
having the specified keys.
"""
if isinstance(keys, frozenset):
keys_set = keys
else:
keys_set = frozenset(keys)
v = _slot_dict_classes.get((keys_set, prefix))
if v is None:
class SlotDict(object):
allowed_keys = keys_set
_prefix = prefix
__slots__ = ("__weakref__",) + \
tuple(prefix + k for k in allowed_keys)
def __init__(self, *args, **kwargs):
if len(args) > 1:
raise TypeError(
"expected at most 1 positional argument, got " + \
repr(len(args)))
if args:
self.update(args[0])
if kwargs:
self.update(kwargs)
def __iter__(self):
for k, v in self.iteritems():
yield k
def __len__(self):
l = 0
for i in self.iteritems():
l += 1
return l
def keys(self):
return list(self)
def iteritems(self):
prefix = self._prefix
for k in self.allowed_keys:
try:
yield (k, getattr(self, prefix + k))
except AttributeError:
pass
def items(self):
return list(self.iteritems())
def itervalues(self):
for k, v in self.iteritems():
yield v
def values(self):
return list(self.itervalues())
def __delitem__(self, k):
try:
delattr(self, self._prefix + k)
except AttributeError:
raise KeyError(k)
def __setitem__(self, k, v):
setattr(self, self._prefix + k, v)
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def update(self, *args, **kwargs):
if len(args) > 1:
raise TypeError(
"expected at most 1 positional argument, got " + \
repr(len(args)))
other = None
if args:
other = args[0]
if other is None:
pass
elif hasattr(other, 'iteritems'):
# Use getattr to avoid interference from 2to3.
for k, v in getattr(other, 'iteritems')():
self[k] = v
elif hasattr(other, 'items'):
# Use getattr to avoid interference from 2to3.
for k, v in getattr(other, 'items')():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def __getitem__(self, k):
try:
return getattr(self, self._prefix + k)
except AttributeError:
raise KeyError(k)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, k):
return hasattr(self, self._prefix + k)
def pop(self, key, *args):
if len(args) > 1:
raise TypeError(
"pop expected at most 2 arguments, got " + \
repr(1 + len(args)))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError('container is empty')
del self[k]
return (k, v)
def copy(self):
c = self.__class__()
c.update(self)
return c
def clear(self):
for k in self.allowed_keys:
try:
delattr(self, self._prefix + k)
except AttributeError:
pass
def __str__(self):
return str(dict(self.iteritems()))
def __repr__(self):
return repr(dict(self.iteritems()))
if sys.hexversion >= 0x3000000:
items = iteritems
keys = __iter__
values = itervalues
v = SlotDict
_slot_dict_classes[v.allowed_keys] = v
return v
| devurandom/portage | pym/portage/cache/mappings.py | Python | gpl-2.0 | 10,177 | [
"Brian"
] | 599e2c8fb6b787a83ec5fc23571473b44eceec3b5a090d1d2bbdb0bffae41fca |
# This Python file uses the following encoding: utf-8
# Here we initialize the collection of all possible names.
# You might want to add or delete names before starting to seriously use idmas32.
# If you ruled out a gender/nationality in config.py, then the corresponding names are irrelevant.
import config
for na in config.nationalities:
config.firstnames['M'+na[0]] = []
config.firstnames['F'+na[0]] = []
config.lastnames[na[0]] = []
def addfirstname(g,n,name):
if g:
config.firstnames['M'+n].insert(0,name)
else:
config.firstnames['F'+n].insert(0,name)
def addlastname(n,name):
config.lastnames[n].insert(0,name)
# The rest of the file is a huge list of possible names
addfirstname(1,'American','Michael')
addfirstname(0,'American','Jennifer')
addfirstname(1,'American','Christopher')
addfirstname(0,'American','Jessica')
addfirstname(1,'American','Matthew')
addfirstname(0,'American','Ashley')
addfirstname(1,'American','Joshua')
addfirstname(0,'American','Amanda')
addfirstname(1,'American','David')
addfirstname(0,'American','Sarah')
addfirstname(1,'American','Daniel')
addfirstname(0,'American','Stephanie')
addfirstname(1,'American','James')
addfirstname(0,'American','Nicole')
addfirstname(1,'American','John')
addfirstname(0,'American','Melissa')
addfirstname(1,'American','Robert')
addfirstname(0,'American','Heather')
addfirstname(1,'American','Joseph')
addfirstname(0,'American','Elizabeth')
addfirstname(1,'American','Jason')
addfirstname(0,'American','Megan')
addfirstname(1,'American','Ryan')
addfirstname(0,'American','Crystal')
addfirstname(1,'American','Justin')
addfirstname(0,'American','Amy')
addfirstname(1,'American','Andrew')
addfirstname(0,'American','Tiffany')
addfirstname(1,'American','Brandon')
addfirstname(0,'American','Rachel')
addfirstname(1,'American','William')
addfirstname(0,'American','Danielle')
addfirstname(1,'American','Brian')
addfirstname(0,'American','Michelle')
addfirstname(1,'American','Adam')
addfirstname(0,'American','Amber')
addfirstname(1,'American','Jonathan')
addfirstname(0,'American','Laura')
addfirstname(1,'American','Nicholas')
addfirstname(0,'American','Christina')
addfirstname(1,'American','Anthony')
addfirstname(0,'American','Lauren')
addfirstname(1,'American','Eric')
addfirstname(0,'American','Kimberly')
addfirstname(1,'American','Steven')
addfirstname(0,'American','Rebecca')
addfirstname(1,'American','Kevin')
addfirstname(0,'American','Emily')
addfirstname(1,'American','Thomas')
addfirstname(0,'American','Erin')
addfirstname(1,'American','Timothy')
addfirstname(0,'American','Angela')
addfirstname(1,'American','Richard')
addfirstname(0,'American','Kelly')
addfirstname(1,'American','Jeffrey')
addfirstname(0,'American','Andrea')
addfirstname(1,'American','Aaron')
addfirstname(0,'American','Lisa')
addfirstname(1,'American','Benjamin')
addfirstname(0,'American','Katherine')
addfirstname(1,'American','Jeremy')
addfirstname(0,'American','Jamie')
addfirstname(1,'American','Mark')
addfirstname(0,'American','Sara')
addfirstname(1,'American','Charles')
addfirstname(0,'American','Mary')
addfirstname(1,'American','Kyle')
addfirstname(0,'American','Erica')
addfirstname(1,'American','Stephen')
addfirstname(0,'American','Lindsey')
addfirstname(1,'American','Patrick')
addfirstname(0,'American','Lindsay')
addfirstname(1,'American','Jacob')
addfirstname(0,'American','Shannon')
addfirstname(1,'American','Nathan')
addfirstname(0,'American','Kristen')
addfirstname(1,'American','Scott')
addfirstname(0,'American','Samantha')
addfirstname(1,'American','Paul')
addfirstname(0,'American','Alicia')
addfirstname(1,'American','Dustin')
addfirstname(0,'American','Brittany')
addfirstname(1,'American','Travis')
addfirstname(0,'American','April')
addfirstname(1,'American','Sean')
addfirstname(0,'American','Christine')
addfirstname(1,'American','Gregory')
addfirstname(0,'American','Katie')
addfirstname(1,'American','Zachary')
addfirstname(0,'American','Vanessa')
addfirstname(1,'American','Kenneth')
addfirstname(0,'American','Courtney')
addfirstname(1,'American','Tyler')
addfirstname(0,'American','Kristin')
addfirstname(1,'American','Bryan')
addfirstname(0,'American','Kathryn')
addfirstname(1,'American','Jose')
addfirstname(0,'American','Tara')
addfirstname(1,'American','Jesse')
addfirstname(0,'American','Maria')
addfirstname(1,'American','Chad')
addfirstname(0,'American','Allison')
addfirstname(1,'American','Derek')
addfirstname(0,'American','Julie')
addfirstname(1,'American','Bradley')
addfirstname(0,'American','Holly')
addfirstname(1,'American','Alexander')
addfirstname(0,'American','Jenna')
addfirstname(1,'American','Samuel')
addfirstname(0,'American','Natalie')
addfirstname(1,'American','Shawn')
addfirstname(0,'American','Jacqueline')
addfirstname(1,'American','Marcus')
addfirstname(0,'American','Anna')
addfirstname(1,'American','Edward')
addfirstname(0,'American','Monica')
addfirstname(1,'American','Jared')
addfirstname(0,'American','Victoria')
addfirstname(1,'American','Peter')
addfirstname(0,'American','Latoya')
addfirstname(1,'American','Keith')
addfirstname(0,'American','Brandi')
addfirstname(1,'American','Cody')
addfirstname(0,'American','Cynthia')
addfirstname(1,'American','Donald')
addfirstname(0,'American','Cassandra')
addfirstname(1,'American','Juan')
addfirstname(0,'American','Brandy')
addfirstname(1,'American','Jordan')
addfirstname(0,'American','Meghan')
addfirstname(1,'American','Joel')
addfirstname(0,'American','Patricia')
addfirstname(1,'American','Ronald')
addfirstname(0,'American','Stacy')
addfirstname(1,'American','George')
addfirstname(0,'American','Kristina')
addfirstname(1,'American','Corey')
addfirstname(0,'American','Catherine')
addfirstname(1,'American','Phillip')
addfirstname(0,'American','Veronica')
addfirstname(1,'American','Douglas')
addfirstname(0,'American','Kathleen')
addfirstname(1,'American','Brett')
addfirstname(0,'American','Diana')
addfirstname(1,'American','Raymond')
addfirstname(0,'American','Dana')
addfirstname(1,'American','Craig')
addfirstname(0,'American','Krystal')
addfirstname(1,'American','Gary')
addfirstname(0,'American','Leslie')
addfirstname(1,'American','Carlos')
addfirstname(0,'American','Valerie')
addfirstname(1,'American','Antonio')
addfirstname(0,'American','Natasha')
addfirstname(1,'American','Ian')
addfirstname(0,'American','Carrie')
addfirstname(1,'American','Derrick')
addfirstname(0,'American','Stacey')
addfirstname(1,'American','Nathaniel')
addfirstname(0,'American','Sheena')
addfirstname(1,'American','Casey')
addfirstname(0,'American','Erika')
addfirstname(1,'American','Philip')
addfirstname(0,'American','Karen')
addfirstname(1,'American','Cory')
addfirstname(0,'American','Leah')
addfirstname(1,'American','Erik')
addfirstname(0,'American','Chelsea')
addfirstname(1,'American','Shane')
addfirstname(0,'American','Melanie')
addfirstname(1,'American','Frank')
addfirstname(0,'American','Candice')
addfirstname(1,'American','Gabriel')
addfirstname(0,'American','Julia')
addfirstname(1,'American','Luis')
addfirstname(0,'American','Joanna')
addfirstname(1,'American','Brent')
addfirstname(0,'American','Alexis')
addfirstname(1,'American','Victor')
addfirstname(0,'American','Susan')
addfirstname(1,'American','Larry')
addfirstname(0,'American','Tracy')
addfirstname(1,'American','Todd')
addfirstname(0,'American','Margaret')
addfirstname(1,'American','Jeffery')
addfirstname(0,'American','Bethany')
addfirstname(1,'American','Alex')
addfirstname(0,'American','Sandra')
addfirstname(1,'American','Vincent')
addfirstname(0,'American','Kara')
addfirstname(1,'American','Curtis')
addfirstname(0,'American','Alexandra')
addfirstname(1,'American','Dennis')
addfirstname(0,'American','Nichole')
addfirstname(1,'American','Evan')
addfirstname(0,'American','Misty')
addfirstname(1,'American','Seth')
addfirstname(0,'American','Brooke')
addfirstname(1,'American','Randy')
addfirstname(0,'American','Kayla')
addfirstname(1,'American','Wesley')
addfirstname(0,'American','Candace')
addfirstname(1,'American','Jeremiah')
addfirstname(0,'American','Heidi')
addfirstname(1,'American','Blake')
addfirstname(0,'American','Rachael')
addfirstname(1,'American','Christian')
addfirstname(0,'American','Pamela')
addfirstname(1,'American','Alan')
addfirstname(0,'American','Katrina')
addfirstname(1,'American','Russell')
addfirstname(0,'American','Jillian')
addfirstname(1,'American','Adrian')
addfirstname(0,'American','Casey')
addfirstname(1,'American','Carl')
addfirstname(0,'American','Renee')
addfirstname(1,'American','Jesus')
addfirstname(0,'American','Gina')
addfirstname(1,'American','Luke')
addfirstname(0,'American','Alison')
addfirstname(1,'American','Jerry')
addfirstname(0,'American','Tina')
addfirstname(1,'American','Tony')
addfirstname(0,'American','Brenda')
addfirstname(1,'American','Miguel')
addfirstname(0,'American','Nancy')
addfirstname(1,'American','Troy')
addfirstname(0,'American','Miranda')
addfirstname(1,'American','Trevor')
addfirstname(0,'American','Denise')
addfirstname(1,'American','Mario')
addfirstname(0,'American','Lacey')
addfirstname(1,'American','Austin')
addfirstname(0,'American','Teresa')
addfirstname(1,'American','Terry')
addfirstname(0,'American','Caitlin')
addfirstname(1,'American','Andre')
addfirstname(0,'American','Linda')
addfirstname(1,'American','Johnny')
addfirstname(0,'American','Jill')
addfirstname(1,'American','Shaun')
addfirstname(0,'American','Theresa')
addfirstname(1,'American','Ricardo')
addfirstname(0,'American','Sabrina')
addfirstname(1,'American','Devin')
addfirstname(0,'American','Alisha')
addfirstname(1,'American','Lee')
addfirstname(0,'American','Whitney')
addfirstname(1,'American','Mitchell')
addfirstname(0,'American','Tamara')
addfirstname(1,'American','Lance')
addfirstname(0,'American','Hannah')
addfirstname(1,'American','Martin')
addfirstname(0,'American','Anne')
addfirstname(1,'American','Allen')
addfirstname(0,'American','Desiree')
addfirstname(1,'American','Bobby')
addfirstname(0,'American','Melinda')
addfirstname(1,'American','Lucas')
addfirstname(0,'American','Monique')
addfirstname(1,'American','Marc')
addfirstname(0,'American','Molly')
addfirstname(1,'American','Johnathan')
addfirstname(0,'American','Morgan')
addfirstname(1,'American','Chase')
addfirstname(0,'American','Krista')
addfirstname(1,'American','Jimmy')
addfirstname(0,'American','Jaclyn')
addfirstname(1,'American','Henry')
addfirstname(0,'American','Kristy')
addfirstname(1,'American','Danny')
addfirstname(0,'American','Jenny')
addfirstname(1,'American','Kristopher')
addfirstname(0,'American','Ebony')
addfirstname(1,'American','Ricky')
addfirstname(0,'American','Stefanie')
addfirstname(1,'American','Mathew')
addfirstname(0,'American','Robin')
addfirstname(1,'American','Billy')
addfirstname(0,'American','Tanya')
addfirstname(1,'American','Albert')
addfirstname(0,'American','Dawn')
addfirstname(1,'American','Lawrence')
addfirstname(0,'American','Tabitha')
addfirstname(1,'American','Jonathon')
addfirstname(0,'American','Lori')
addfirstname(1,'American','Jorge')
addfirstname(0,'American','Colleen')
addfirstname(1,'American','Clinton')
addfirstname(0,'American','Christy')
addfirstname(1,'American','Jon')
addfirstname(0,'American','Wendy')
addfirstname(1,'American','Manuel')
addfirstname(0,'American','Kelli')
addfirstname(1,'American','Randall')
addfirstname(0,'American','Meredith')
addfirstname(1,'American','Micheal')
addfirstname(0,'American','Jasmine')
addfirstname(1,'American','Clayton')
addfirstname(0,'American','Kendra')
addfirstname(1,'American','Francisco')
addfirstname(0,'American','Deanna')
addfirstname(1,'American','Jamie')
addfirstname(0,'American','Barbara')
addfirstname(1,'American','Drew')
addfirstname(0,'American','Tonya')
addfirstname(1,'American','Colin')
addfirstname(0,'American','Abigail')
addfirstname(1,'American','Ross')
addfirstname(0,'American','Carolyn')
addfirstname(1,'American','Garrett')
addfirstname(0,'American','Kari')
addfirstname(1,'American','Roger')
addfirstname(0,'American','Rebekah')
addfirstname(1,'American','Louis')
addfirstname(0,'American','Cassie')
addfirstname(1,'American','Arthur')
addfirstname(0,'American','Kristi')
addfirstname(1,'American','Isaac')
addfirstname(0,'American','Ann')
addfirstname(1,'American','Walter')
addfirstname(0,'American','Michele')
addfirstname(1,'American','Roberto')
addfirstname(0,'American','Angel')
addfirstname(1,'American','Caleb')
addfirstname(0,'American','Tasha')
addfirstname(1,'American','Cameron')
addfirstname(0,'American','Bridget')
addfirstname(1,'American','Rodney')
addfirstname(0,'American','Meagan')
addfirstname(1,'American','Willie')
addfirstname(0,'American','Adrienne')
addfirstname(1,'American','Joe')
addfirstname(0,'American','Beth')
addfirstname(1,'American','Maurice')
addfirstname(0,'American','Sharon')
addfirstname(1,'American','Grant')
addfirstname(0,'American','Caroline')
addfirstname(1,'American','Jermaine')
addfirstname(0,'American','Marissa')
addfirstname(1,'American','Darren')
addfirstname(0,'American','Ashlee')
addfirstname(1,'American','Jay')
addfirstname(0,'American','Priscilla')
addfirstname(1,'American','Jack')
addfirstname(0,'American','Marie')
addfirstname(1,'American','Wayne')
addfirstname(0,'American','Latasha')
addfirstname(1,'American','Bruce')
addfirstname(0,'American','Shana')
addfirstname(1,'American','Gerald')
addfirstname(0,'American','Brittney')
addfirstname(1,'American','Calvin')
addfirstname(0,'American','Kate')
addfirstname(1,'American','Oscar')
addfirstname(0,'American','Kelsey')
addfirstname(1,'American','Angel')
addfirstname(0,'American','Cindy')
addfirstname(1,'American','Taylor')
addfirstname(0,'American','Alyssa')
addfirstname(1,'American','Javier')
addfirstname(0,'American','Jaime')
addfirstname(1,'American','Roy')
addfirstname(0,'American','Tammy')
addfirstname(1,'American','Ruben')
addfirstname(0,'American','Regina')
addfirstname(1,'American','Edwin')
addfirstname(0,'American','Audrey')
addfirstname(1,'American','Darrell')
addfirstname(0,'American','Felicia')
addfirstname(1,'American','Brendan')
addfirstname(0,'American','Mallory')
addfirstname(1,'American','Reginald')
addfirstname(0,'American','Deborah')
addfirstname(1,'American','Hector')
addfirstname(0,'American','Virginia')
addfirstname(1,'American','Geoffrey')
addfirstname(0,'American','Carla')
addfirstname(1,'American','Alejandro')
addfirstname(0,'American','Cara')
addfirstname(1,'American','Sergio')
addfirstname(0,'American','Cristina')
addfirstname(1,'American','Fernando')
addfirstname(0,'American','Jacquelyn')
addfirstname(1,'American','Omar')
addfirstname(0,'American','Ana')
addfirstname(1,'American','Noah')
addfirstname(0,'American','Angelica')
addfirstname(1,'American','Brad')
addfirstname(0,'American','Marquita')
addfirstname(1,'American','Dominic')
addfirstname(0,'American','Kristine')
addfirstname(1,'American','Frederick')
addfirstname(0,'American','Brianna')
addfirstname(1,'American','Terrance')
addfirstname(0,'American','Claudia')
addfirstname(1,'American','Jerome')
addfirstname(0,'American','Mindy')
addfirstname(1,'American','Jaime')
addfirstname(0,'American','Nina')
addfirstname(1,'American','Eddie')
addfirstname(0,'American','Shawna')
addfirstname(1,'American','Cole')
addfirstname(0,'American','Carly')
addfirstname(1,'American','Ernest')
addfirstname(0,'American','Cheryl')
addfirstname(1,'American','Rafael')
addfirstname(0,'American','Carmen')
addfirstname(1,'American','Neil')
addfirstname(0,'American','Suzanne')
addfirstname(1,'American','Pedro')
addfirstname(0,'American','Bonnie')
addfirstname(1,'American','Micah')
addfirstname(0,'American','Janet')
addfirstname(1,'American','Raul')
addfirstname(0,'American','Abby')
addfirstname(1,'American','Ronnie')
addfirstname(0,'American','Dominique')
addfirstname(1,'American','Marvin')
addfirstname(0,'American','Aimee')
addfirstname(1,'American','Jessie')
addfirstname(0,'American','Jodi')
addfirstname(1,'American','Darryl')
addfirstname(0,'American','Janelle')
addfirstname(1,'American','Theodore')
addfirstname(0,'American','Shanna')
addfirstname(1,'American','Emmanuel')
addfirstname(0,'American','Trista')
addfirstname(1,'American','Eugene')
addfirstname(0,'American','Martha')
addfirstname(1,'American','Tyrone')
addfirstname(0,'American','Summer')
addfirstname(1,'American','Leonard')
addfirstname(0,'American','Lydia')
addfirstname(1,'American','Steve')
addfirstname(0,'American','Diane')
addfirstname(1,'American','Levi')
addfirstname(0,'American','Keri')
addfirstname(1,'American','Dale')
addfirstname(0,'American','Autumn')
addfirstname(1,'American','Glenn')
addfirstname(0,'American','Donna')
addfirstname(1,'American','Ramon')
addfirstname(0,'American','Ruth')
addfirstname(1,'American','Ivan')
addfirstname(0,'American','Olivia')
addfirstname(1,'American','Eduardo')
addfirstname(0,'American','Joy')
addfirstname(1,'American','Clifford')
addfirstname(0,'American','Daisy')
addfirstname(1,'American','Terrence')
addfirstname(0,'American','Kellie')
addfirstname(1,'American','Logan')
addfirstname(0,'American','Mayra')
addfirstname(1,'American','Kelly')
addfirstname(0,'American','Christie')
addfirstname(1,'American','Kurt')
addfirstname(0,'American','Krystle')
addfirstname(1,'American','Tommy')
addfirstname(0,'American','Paula')
addfirstname(1,'American','Dylan')
addfirstname(0,'American','Robyn')
addfirstname(1,'American','Barry')
addfirstname(0,'American','Amelia')
addfirstname(1,'American','Armando')
addfirstname(0,'American','Mandy')
addfirstname(1,'American','Nicolas')
addfirstname(0,'American','Melody')
addfirstname(1,'American','Julio')
addfirstname(0,'American','Sophia')
addfirstname(1,'American','Alberto')
addfirstname(0,'American','Toni')
addfirstname(1,'American','Julian')
addfirstname(0,'American','Yolanda')
addfirstname(1,'American','Spencer')
addfirstname(0,'American','Rose')
addfirstname(1,'American','Tyson')
addfirstname(0,'American','Naomi')
addfirstname(1,'American','Andres')
addfirstname(0,'American','Evelyn')
addfirstname(1,'American','Melvin')
addfirstname(0,'American','Gloria')
addfirstname(1,'American','Preston')
addfirstname(0,'American','Shelby')
addfirstname(1,'American','Francis')
addfirstname(0,'American','Randi')
addfirstname(1,'American','Karl')
addfirstname(0,'American','Debra')
addfirstname(1,'American','Byron')
addfirstname(0,'American','Trisha')
addfirstname(1,'American','Marco')
addfirstname(0,'American','Grace')
addfirstname(1,'American','Stanley')
addfirstname(0,'American','Hillary')
addfirstname(1,'American','Cedric')
addfirstname(0,'American','Nikki')
addfirstname(1,'American','Chance')
addfirstname(0,'American','Sylvia')
addfirstname(1,'American','Erick')
addfirstname(0,'American','Ashleigh')
addfirstname(1,'American','Tristan')
addfirstname(0,'American','Jessie')
addfirstname(1,'American','Dwayne')
addfirstname(0,'American','Haley')
addfirstname(1,'American','Harold')
addfirstname(0,'American','Stacie')
addfirstname(1,'American','Cesar')
addfirstname(0,'American','Claire')
addfirstname(1,'American','Devon')
addfirstname(0,'American','Rosa')
addfirstname(1,'American','Abraham')
addfirstname(0,'American','Adriana')
addfirstname(1,'American','Bryce')
addfirstname(0,'American','Shauna')
addfirstname(1,'American','Edgar')
addfirstname(0,'American','Rachelle')
addfirstname(1,'American','Brenton')
addfirstname(0,'American','Shelly')
addfirstname(1,'American','Terrell')
addfirstname(0,'American','Lacy')
addfirstname(1,'American','Warren')
addfirstname(0,'American','Charlene')
addfirstname(1,'American','Leon')
addfirstname(0,'American','Ellen')
addfirstname(1,'American','Franklin')
addfirstname(0,'American','Karla')
addfirstname(1,'American','Jarrod')
addfirstname(0,'American','Sonia')
addfirstname(1,'American','Marshall')
addfirstname(0,'American','Johanna')
addfirstname(1,'American','Earl')
addfirstname(0,'American','Traci')
addfirstname(1,'American','Antoine')
addfirstname(0,'American','Sheila')
addfirstname(1,'American','Gilbert')
addfirstname(0,'American','Carol')
addfirstname(1,'American','Courtney')
addfirstname(0,'American','Marisa')
addfirstname(1,'American','Ralph')
addfirstname(0,'American','Kerri')
addfirstname(1,'American','Arturo')
addfirstname(0,'American','Leigh')
addfirstname(1,'American','Rene')
addfirstname(0,'American','Esther')
addfirstname(1,'American','Ray')
addfirstname(0,'American','Hilary')
addfirstname(1,'American','Shannon')
addfirstname(0,'American','Sherry')
addfirstname(1,'American','Dean')
addfirstname(0,'American','Jeanette')
addfirstname(1,'American','Colby')
addfirstname(0,'American','Jocelyn')
addfirstname(1,'American','Alfred')
addfirstname(0,'American','Kasey')
addfirstname(1,'American','Jake')
addfirstname(0,'American','Savannah')
addfirstname(1,'American','Darnell')
addfirstname(0,'American','Taylor')
addfirstname(1,'American','Joey')
addfirstname(0,'American','Anita')
addfirstname(1,'American','Lorenzo')
addfirstname(0,'American','Helen')
addfirstname(1,'American','Andy')
addfirstname(0,'American','Frances')
addfirstname(1,'American','Branden')
addfirstname(0,'American','Sonya')
addfirstname(1,'American','Clarence')
addfirstname(0,'American','Katharine')
addfirstname(1,'American','Harry')
addfirstname(0,'American','Alexandria')
addfirstname(1,'American','Ethan')
addfirstname(0,'American','Maggie')
addfirstname(1,'American','Enrique')
addfirstname(0,'American','Tracey')
addfirstname(1,'American','Damien')
addfirstname(0,'American','Rhonda')
addfirstname(1,'American','Clint')
addfirstname(0,'American','Tabatha')
addfirstname(1,'American','Brady')
addfirstname(0,'American','Tamika')
addfirstname(1,'American','Morgan')
addfirstname(0,'American','Briana')
addfirstname(1,'American','Beau')
addfirstname(0,'American','Laurie')
addfirstname(1,'American','Stuart')
addfirstname(0,'American','Brianne')
addfirstname(1,'American','Alvin')
addfirstname(0,'American','Hope')
addfirstname(1,'American','Kirk')
addfirstname(0,'American','Gabrielle')
addfirstname(1,'American','Kelvin')
addfirstname(0,'American','Raquel')
addfirstname(1,'American','Alfredo')
addfirstname(0,'American','Terri')
addfirstname(1,'American','Howard')
addfirstname(0,'American','Kelley')
addfirstname(1,'American','Marcos')
addfirstname(0,'American','Alissa')
addfirstname(1,'American','Wade')
addfirstname(0,'American','Yvonne')
addfirstname(1,'American','Orlando')
addfirstname(0,'American','Annie')
addfirstname(1,'American','Nelson')
addfirstname(0,'American','Charity')
addfirstname(1,'American','Simon')
addfirstname(0,'American','Elisabeth')
addfirstname(1,'American','Clifton')
addfirstname(0,'American','Tia')
addfirstname(1,'American','Heath')
addfirstname(0,'American','Miriam')
addfirstname(1,'American','Dane')
addfirstname(0,'American','Charlotte')
addfirstname(1,'American','Daryl')
addfirstname(0,'American','Tessa')
addfirstname(1,'American','Max')
addfirstname(0,'American','Kristie')
addfirstname(1,'American','Salvador')
addfirstname(0,'American','Kira')
addfirstname(1,'American','Elijah')
addfirstname(0,'American','Latisha')
addfirstname(1,'American','Roderick')
addfirstname(0,'American','Angelina')
addfirstname(1,'American','Gerardo')
addfirstname(0,'American','Elise')
addfirstname(1,'American','Demetrius')
addfirstname(0,'American','Jana')
addfirstname(1,'American','Nickolas')
addfirstname(0,'American','Janice')
addfirstname(1,'American','Bryant')
addfirstname(0,'American','Carissa')
addfirstname(1,'American','Chris')
addfirstname(0,'American','Leticia')
addfirstname(1,'American','Bernard')
addfirstname(0,'American','Roxanne')
addfirstname(1,'American','Ernesto')
addfirstname(0,'American','Eva')
addfirstname(1,'American','Isaiah')
addfirstname(0,'American','Christa')
addfirstname(1,'American','Quentin')
addfirstname(0,'American','Irene')
addfirstname(1,'American','Darius')
addfirstname(0,'American','Kerry')
addfirstname(1,'American','Damon')
addfirstname(0,'American','Chelsey')
addfirstname(1,'American','Norman')
addfirstname(0,'American','Jane')
addfirstname(1,'American','Trent')
addfirstname(0,'American','Kirsten')
addfirstname(1,'American','Israel')
addfirstname(0,'American','Cecilia')
addfirstname(1,'American','Allan')
addfirstname(0,'American','Maureen')
addfirstname(1,'American','Johnathon')
addfirstname(0,'American','Staci')
addfirstname(1,'American','Landon')
addfirstname(0,'American','Yvette')
addfirstname(1,'American','Dwight')
addfirstname(0,'American','Brandie')
addfirstname(1,'American','Angelo')
addfirstname(0,'American','Lynn')
addfirstname(1,'American','Brock')
addfirstname(0,'American','Shelley')
addfirstname(1,'American','Neal')
addfirstname(0,'American','Katelyn')
addfirstname(1,'American','Quinton')
addfirstname(0,'American','Sandy')
addfirstname(1,'American','Lewis')
addfirstname(0,'American','Annette')
addfirstname(1,'American','Emanuel')
addfirstname(0,'American','Rochelle')
addfirstname(1,'American','Duane')
addfirstname(0,'American','Chasity')
addfirstname(1,'American','Dallas')
addfirstname(0,'American','Alice')
addfirstname(1,'American','Glen')
addfirstname(0,'American','Tricia')
addfirstname(1,'American','Vernon')
addfirstname(0,'American','Kaitlin')
addfirstname(1,'American','Lamar')
addfirstname(0,'American','Dorothy')
addfirstname(1,'American','Terence')
addfirstname(0,'American','Connie')
addfirstname(1,'American','Kent')
addfirstname(0,'American','Emma')
addfirstname(1,'American','Rory')
addfirstname(0,'American','Kathy')
addfirstname(1,'American','Ashley')
addfirstname(0,'American','Jade')
addfirstname(1,'American','Damian')
addfirstname(0,'American','Jennie')
addfirstname(1,'American','Charlie')
addfirstname(0,'American','Justine')
addfirstname(1,'American','Xavier')
addfirstname(0,'American','Nadia')
addfirstname(1,'American','Leroy')
addfirstname(0,'American','Bianca')
addfirstname(1,'American','Kenny')
addfirstname(0,'American','Lyndsey')
addfirstname(1,'American','Elliott')
addfirstname(0,'American','Tanisha')
addfirstname(1,'American','Collin')
addfirstname(0,'American','Alana')
addfirstname(1,'American','Nolan')
addfirstname(0,'American','Karina')
addfirstname(1,'American','Gavin')
addfirstname(0,'American','Antoinette')
addfirstname(1,'American','Graham')
addfirstname(0,'American','Amie')
addfirstname(1,'American','Jamal')
addfirstname(0,'American','Hollie')
addfirstname(1,'American','Kerry')
addfirstname(0,'American','Juanita')
addfirstname(1,'American','Trenton')
addfirstname(0,'American','Lorena')
addfirstname(1,'American','Rickey')
addfirstname(0,'American','Chrystal')
addfirstname(1,'American','Lonnie')
addfirstname(0,'American','Latonya')
addfirstname(1,'American','Eli')
addfirstname(0,'American','Elisa')
addfirstname(1,'American','Gordon')
addfirstname(0,'American','Gabriela')
addfirstname(1,'American','Julius')
addfirstname(0,'American','Ariel')
addfirstname(1,'American','Rudy')
addfirstname(0,'American','Faith')
addfirstname(1,'American','Felix')
addfirstname(0,'American','Clarissa')
addfirstname(1,'American','Dana')
addfirstname(0,'American','Camille')
addfirstname(1,'American','Pablo')
addfirstname(0,'American','Breanna')
addfirstname(1,'American','Tyrell')
addfirstname(0,'American','Aubrey')
addfirstname(1,'American','Noel')
addfirstname(0,'American','Jami')
addfirstname(1,'American','Jayson')
addfirstname(0,'American','Patrice')
addfirstname(1,'American','Marlon')
addfirstname(0,'American','Cortney')
addfirstname(1,'American','Alexis')
addfirstname(0,'American','Elaine')
addfirstname(1,'American','Bradford')
addfirstname(0,'American','Eileen')
addfirstname(1,'American','Kendrick')
addfirstname(0,'American','Sally')
addfirstname(1,'American','Don')
addfirstname(0,'American','Jenifer')
addfirstname(1,'American','Josiah')
addfirstname(0,'American','Bobbie')
addfirstname(1,'American','Fredrick')
addfirstname(0,'American','Lesley')
addfirstname(1,'American','Derick')
addfirstname(0,'American','Paige')
addfirstname(1,'American','Jarvis')
addfirstname(0,'American','Rita')
addfirstname(1,'American','Jamar')
addfirstname(0,'American','Leanne')
addfirstname(1,'American','Bret')
addfirstname(0,'American','Jordan')
addfirstname(1,'American','Hunter')
addfirstname(0,'American','Ruby')
addfirstname(1,'American','Marquis')
addfirstname(0,'American','Joyce')
addfirstname(1,'American','Elliot')
addfirstname(0,'American','Shari')
addfirstname(1,'American','Fred')
addfirstname(0,'American','Becky')
addfirstname(1,'American','Kurtis')
addfirstname(0,'American','Serena')
addfirstname(1,'American','Lionel')
addfirstname(0,'American','Katy')
addfirstname(1,'American','Rodolfo')
addfirstname(0,'American','Judith')
addfirstname(1,'American','Ben')
addfirstname(0,'American','Sherri')
addfirstname(1,'American','Rolando')
addfirstname(0,'American','Constance')
addfirstname(1,'American','Deandre')
addfirstname(0,'American','Destiny')
addfirstname(1,'American','Herbert')
addfirstname(0,'American','Ericka')
addfirstname(1,'American','Zachery')
addfirstname(0,'American','Nora')
addfirstname(1,'American','Robin')
addfirstname(0,'American','Jody')
addfirstname(1,'American','Lloyd')
addfirstname(0,'American','Jackie')
addfirstname(1,'American','Zachariah')
addfirstname(0,'American','Jolene')
addfirstname(1,'American','Leo')
addfirstname(0,'American','Allyson')
addfirstname(1,'American','Roland')
addfirstname(0,'American','Ryan')
addfirstname(1,'American','Darin')
addfirstname(0,'American','Elena')
addfirstname(1,'American','Kendall')
addfirstname(0,'American','Audra')
addfirstname(1,'American','Rocky')
addfirstname(0,'American','Alma')
addfirstname(1,'American','Abel')
addfirstname(0,'American','Guadalupe')
addfirstname(1,'American','Perry')
addfirstname(0,'American','Marisol')
addfirstname(1,'American','Guillermo')
addfirstname(0,'American','Lakeisha')
addfirstname(1,'American','Jarrett')
addfirstname(0,'American','Beverly')
addfirstname(1,'American','Cornelius')
addfirstname(0,'American','Christin')
addfirstname(1,'American','Miles')
addfirstname(0,'American','Gretchen')
addfirstname(1,'American','Pierre')
addfirstname(0,'American','Shayla')
addfirstname(1,'American','Roman')
addfirstname(0,'American','Taryn')
addfirstname(1,'American','Tanner')
addfirstname(0,'American','Keisha')
addfirstname(1,'American','Dominique')
addfirstname(0,'American','Terra')
addfirstname(1,'American','Jamaal')
addfirstname(0,'American','Jean')
addfirstname(1,'American','Stephan')
addfirstname(0,'American','Sheri')
addfirstname(1,'American','Dante')
addfirstname(0,'American','Judy')
addfirstname(1,'American','Alfonso')
addfirstname(0,'American','Meaghan')
addfirstname(1,'American','Josue')
addfirstname(0,'American','Alisa')
addfirstname(1,'American','Owen')
addfirstname(0,'American','Joanne')
addfirstname(1,'American','Loren')
addfirstname(0,'American','Celeste')
addfirstname(1,'American','Oliver')
addfirstname(0,'American','Devon')
addfirstname(1,'American','Greg')
addfirstname(0,'American','Janine')
addfirstname(1,'American','Rick')
addfirstname(0,'American','Marilyn')
addfirstname(1,'American','Saul')
addfirstname(0,'American','Michael')
addfirstname(1,'American','Ty')
addfirstname(0,'American','Lara')
addfirstname(1,'American','Donnie')
addfirstname(0,'American','Betty')
addfirstname(1,'American','Jarred')
addfirstname(0,'American','Celia')
addfirstname(1,'American','Ismael')
addfirstname(0,'American','Christen')
addfirstname(1,'American','Clay')
addfirstname(0,'American','Corinne')
addfirstname(1,'American','Freddie')
addfirstname(0,'American','Jena')
addfirstname(1,'American','Fabian')
addfirstname(0,'American','Laurel')
addfirstname(1,'American','Felipe')
addfirstname(0,'American','Kristal')
addfirstname(1,'American','Desmond')
addfirstname(0,'American','Rosemary')
addfirstname(1,'American','Dominick')
addfirstname(0,'American','Belinda')
addfirstname(1,'American','Milton')
addfirstname(0,'American','Bridgette')
addfirstname(1,'American','Antwan')
addfirstname(0,'American','Tameka')
addfirstname(1,'American','Gene')
addfirstname(0,'American','Blair')
addfirstname(1,'American','Gustavo')
addfirstname(0,'American','Callie')
addfirstname(1,'American','Jameson')
addfirstname(0,'American','Genevieve')
addfirstname(1,'American','Gilberto')
addfirstname(0,'American','Lakisha')
addfirstname(1,'American','Leslie')
addfirstname(0,'American','Shirley')
addfirstname(1,'American','Carlton')
addfirstname(0,'American','Maribel')
addfirstname(1,'American','Frankie')
addfirstname(0,'American','Iris')
addfirstname(1,'American','Jeff')
addfirstname(0,'American','Jodie')
addfirstname(1,'American','Darrin')
addfirstname(0,'American','Kendall')
addfirstname(1,'American','Jerrod')
addfirstname(0,'American','Elisha')
addfirstname(1,'American','Sam')
addfirstname(0,'American','Lillian')
addfirstname(1,'American','Rogelio')
addfirstname(0,'American','Sasha')
addfirstname(1,'American','Marques')
addfirstname(0,'American','Alyson')
addfirstname(1,'American','Nathanael')
addfirstname(0,'American','Angie')
addfirstname(1,'American','Quincy')
addfirstname(0,'American','Britney')
addfirstname(1,'American','Elias')
addfirstname(0,'American','Norma')
addfirstname(1,'American','Toby')
addfirstname(0,'American','Lena')
addfirstname(1,'American','Lester')
addfirstname(0,'American','Adrian')
addfirstname(1,'American','Guy')
addfirstname(0,'American','Darlene')
addfirstname(1,'American','Kellen')
addfirstname(0,'American','Julianne')
addfirstname(1,'American','Kory')
addfirstname(0,'American','Christian')
addfirstname(1,'American','Rusty')
addfirstname(0,'American','Tracie')
addfirstname(1,'American','Stefan')
addfirstname(0,'American','Betsy')
addfirstname(1,'American','Tomas')
addfirstname(0,'American','Larissa')
addfirstname(1,'American','Moses')
addfirstname(0,'American','Bobbi')
addfirstname(1,'American','Dewayne')
addfirstname(0,'American','Jasmin')
addfirstname(1,'American','Gerard')
addfirstname(0,'American','Shayna')
addfirstname(1,'American','Weston')
addfirstname(0,'American','Fallon')
addfirstname(1,'American','Dexter')
addfirstname(0,'American','Alejandra')
addfirstname(1,'American','Dusty')
addfirstname(0,'American','Mia')
addfirstname(1,'American','Donovan')
addfirstname(0,'American','Kaitlyn')
addfirstname(1,'American','Floyd')
addfirstname(0,'American','Dianna')
addfirstname(1,'American','Jimmie')
addfirstname(0,'American','Teri')
addfirstname(1,'American','Johnnie')
addfirstname(0,'American','Joann')
addfirstname(1,'American','Alonzo')
addfirstname(0,'American','Jayme')
addfirstname(1,'American','Ramiro')
addfirstname(0,'American','Ashlie')
addfirstname(1,'American','Jody')
addfirstname(0,'American','Josephine')
addfirstname(1,'American','Lamont')
addfirstname(0,'American','Noelle')
addfirstname(1,'American','Skyler')
addfirstname(0,'American','Chanel')
addfirstname(1,'American','Diego')
addfirstname(0,'American','Marlene')
addfirstname(1,'American','Esteban')
addfirstname(0,'American','Leann')
addfirstname(1,'American','Donte')
addfirstname(0,'American','Hayley')
addfirstname(1,'American','Sidney')
addfirstname(0,'American','Chandra')
addfirstname(1,'American','Chadwick')
addfirstname(0,'American','Ginger')
addfirstname(1,'American','Trey')
addfirstname(0,'American','Tiffani')
addfirstname(1,'American','Sheldon')
addfirstname(0,'American','Elyse')
addfirstname(1,'American','Clark')
addfirstname(0,'American','Lora')
addfirstname(1,'American','Jarod')
addfirstname(0,'American','Lucy')
addfirstname(1,'American','Brendon')
addfirstname(0,'American','Kacie')
addfirstname(1,'American','Hugo')
addfirstname(0,'American','Isabel')
addfirstname(1,'American','Everett')
addfirstname(0,'American','Marina')
addfirstname(1,'American','Salvatore')
addfirstname(0,'American','Yesenia')
addfirstname(1,'American','Cecil')
addfirstname(0,'American','Tera')
addfirstname(1,'American','Moises')
addfirstname(0,'American','Adrianne')
addfirstname(1,'American','Leonardo')
addfirstname(0,'American','Tiana')
addfirstname(1,'American','Maxwell')
addfirstname(0,'American','Brenna')
addfirstname(1,'American','Tracy')
addfirstname(0,'American','Vivian')
addfirstname(1,'American','Myron')
addfirstname(0,'American','Cassidy')
addfirstname(1,'American','Clyde')
addfirstname(0,'American','Raven')
addfirstname(1,'American','Sterling')
addfirstname(0,'American','Lea')
addfirstname(1,'American','Bryon')
addfirstname(0,'American','Dena')
addfirstname(1,'American','Jackie')
addfirstname(0,'American','Tiara')
addfirstname(1,'American','Reid')
addfirstname(0,'American','Chantel')
addfirstname(1,'American','Reynaldo')
addfirstname(0,'American','Margarita')
addfirstname(1,'American','Forrest')
addfirstname(0,'American','Trina')
addfirstname(1,'American','Jamel')
addfirstname(0,'American','Susana')
addfirstname(1,'American','Mike')
addfirstname(0,'American','Jacklyn')
addfirstname(1,'American','Ted')
addfirstname(0,'American','Tania')
addfirstname(1,'American','Chester')
addfirstname(0,'American','Tiffanie')
addfirstname(1,'American','Garry')
addfirstname(0,'American','Christopher')
addfirstname(1,'American','Ariel')
addfirstname(0,'American','Esmeralda')
addfirstname(1,'American','Malcolm')
addfirstname(0,'American','Anastasia')
addfirstname(1,'American','Stewart')
addfirstname(0,'American','Christi')
addfirstname(1,'American','Randolph')
addfirstname(0,'American','Tatiana')
addfirstname(1,'American','Aron')
addfirstname(0,'American','Billie')
addfirstname(1,'American','Herman')
addfirstname(0,'American','Daniela')
addfirstname(1,'American','Mason')
addfirstname(0,'American','Kim')
addfirstname(1,'American','Nick')
addfirstname(0,'American','Shameka')
addfirstname(1,'American','Quintin')
addfirstname(0,'American','Blanca')
addfirstname(1,'American','Giovanni')
addfirstname(0,'American','Lorraine')
addfirstname(1,'American','Blaine')
addfirstname(0,'American','Breanne')
addfirstname(1,'American','Scotty')
addfirstname(0,'American','Carolina')
addfirstname(1,'American','Dillon')
addfirstname(0,'American','Lacie')
addfirstname(1,'American','Colt')
addfirstname(0,'American','Lana')
addfirstname(1,'American','Kasey')
addfirstname(0,'American','Melisa')
addfirstname(1,'American','Jonah')
addfirstname(0,'American','Darcy')
addfirstname(1,'American','Santiago')
addfirstname(0,'American','Cathy')
addlastname('American','Smith')
addlastname('American','Johnson')
addlastname('American','Williams')
addlastname('American','Brown')
addlastname('American','Jones')
addlastname('American','Miller')
addlastname('American','Davis')
addlastname('American','Garcia')
addlastname('American','Rodriguez')
addlastname('American','Wilson')
addlastname('American','Martinez')
addlastname('American','Anderson')
addlastname('American','Taylor')
addlastname('American','Thomas')
addlastname('American','Hernandez')
addlastname('American','Moore')
addlastname('American','Martin')
addlastname('American','Jackson')
addlastname('American','Thompson')
addlastname('American','White')
addlastname('American','Lopez')
addlastname('American','Lee')
addlastname('American','Gonzalez')
addlastname('American','Harris')
addlastname('American','Clark')
addlastname('American','Lewis')
addlastname('American','Robinson')
addlastname('American','Walker')
addlastname('American','Perez')
addlastname('American','Hall')
addlastname('American','Young')
addlastname('American','Allen')
addlastname('American','Sanchez')
addlastname('American','Wright')
addlastname('American','King')
addlastname('American','Scott')
addlastname('American','Green')
addlastname('American','Baker')
addlastname('American','Adams')
addlastname('American','Nelson')
addlastname('American','Hill')
addlastname('American','Ramirez')
addlastname('American','Campbell')
addlastname('American','Mitchell')
addlastname('American','Roberts')
addlastname('American','Carter')
addlastname('American','Phillips')
addlastname('American','Evans')
addlastname('American','Turner')
addlastname('American','Torres')
addlastname('American','Parker')
addlastname('American','Collins')
addlastname('American','Edwards')
addlastname('American','Stewart')
addlastname('American','Flores')
addlastname('American','Morris')
addlastname('American','Nguyen')
addlastname('American','Murphy')
addlastname('American','Rivera')
addlastname('American','Cook')
addlastname('American','Rogers')
addlastname('American','Morgan')
addlastname('American','Peterson')
addlastname('American','Cooper')
addlastname('American','Reed')
addlastname('American','Bailey')
addlastname('American','Bell')
addlastname('American','Gomez')
addlastname('American','Kelly')
addlastname('American','Howard')
addlastname('American','Ward')
addlastname('American','Cox')
addlastname('American','Diaz')
addlastname('American','Richardson')
addlastname('American','Wood')
addlastname('American','Watson')
addlastname('American','Brooks')
addlastname('American','Bennett')
addlastname('American','Gray')
addlastname('American','James')
addlastname('American','Reyes')
addlastname('American','Cruz')
addlastname('American','Hughes')
addlastname('American','Price')
addlastname('American','Myers')
addlastname('American','Long')
addlastname('American','Foster')
addlastname('American','Sanders')
addlastname('American','Ross')
addlastname('American','Morales')
addlastname('American','Powell')
addlastname('American','Sullivan')
addlastname('American','Russell')
addlastname('American','Ortiz')
addlastname('American','Jenkins')
addlastname('American','Gutierrez')
addlastname('American','Perry')
addlastname('American','Butler')
addlastname('American','Barnes')
addlastname('American','Fisher')
addlastname('American','Henderson')
addlastname('American','Coleman')
addlastname('American','Simmons')
addlastname('American','Patterson')
addlastname('American','Jordan')
addlastname('American','Reynolds')
addlastname('American','Hamilton')
addlastname('American','Graham')
addlastname('American','Kim')
addlastname('American','Gonzales')
addlastname('American','Alexander')
addlastname('American','Ramos')
addlastname('American','Wallace')
addlastname('American','Griffin')
addlastname('American','West')
addlastname('American','Cole')
addlastname('American','Hayes')
addlastname('American','Chavez')
addlastname('American','Gibson')
addlastname('American','Bryant')
addlastname('American','Ellis')
addlastname('American','Stevens')
addlastname('American','Murray')
addlastname('American','Ford')
addlastname('American','Marshall')
addlastname('American','Owens')
addlastname('American','Mcdonald')
addlastname('American','Harrison')
addlastname('American','Ruiz')
addlastname('American','Kennedy')
addlastname('American','Wells')
addlastname('American','Alvarez')
addlastname('American','Woods')
addlastname('American','Mendoza')
addlastname('American','Castillo')
addlastname('American','Olson')
addlastname('American','Webb')
addlastname('American','Washington')
addlastname('American','Tucker')
addlastname('American','Freeman')
addlastname('American','Burns')
addlastname('American','Henry')
addlastname('American','Vasquez')
addlastname('American','Snyder')
addlastname('American','Simpson')
addlastname('American','Crawford')
addlastname('American','Jimenez')
addlastname('American','Porter')
addlastname('American','Mason')
addlastname('American','Shaw')
addlastname('American','Gordon')
addlastname('American','Wagner')
addlastname('American','Hunter')
addlastname('American','Romero')
addlastname('American','Hicks')
addlastname('American','Dixon')
addlastname('American','Hunt')
addlastname('American','Palmer')
addlastname('American','Robertson')
addlastname('American','Black')
addlastname('American','Holmes')
addlastname('American','Stone')
addlastname('American','Meyer')
addlastname('American','Boyd')
addlastname('American','Mills')
addlastname('American','Warren')
addlastname('American','Fox')
addlastname('American','Rose')
addlastname('American','Rice')
addlastname('American','Moreno')
addlastname('American','Schmidt')
addlastname('American','Patel')
addlastname('American','Ferguson')
addlastname('American','Nichols')
addlastname('American','Herrera')
addlastname('American','Medina')
addlastname('American','Ryan')
addlastname('American','Fernandez')
addlastname('American','Weaver')
addlastname('American','Daniels')
addlastname('American','Stephens')
addlastname('American','Gardner')
addlastname('American','Payne')
addlastname('American','Kelley')
addlastname('American','Dunn')
addlastname('American','Pierce')
addlastname('American','Arnold')
addlastname('American','Tran')
addlastname('American','Spencer')
addlastname('American','Peters')
addlastname('American','Hawkins')
addlastname('American','Grant')
addlastname('American','Hansen')
addlastname('American','Castro')
addlastname('American','Hoffman')
addlastname('American','Hart')
addlastname('American','Elliott')
addlastname('American','Cunningham')
addlastname('American','Knight')
addlastname('American','Bradley')
addlastname('American','Carroll')
addlastname('American','Hudson')
addlastname('American','Duncan')
addlastname('American','Armstrong')
addlastname('American','Berry')
addlastname('American','Andrews')
addlastname('American','Johnston')
addlastname('American','Ray')
addlastname('American','Lane')
addlastname('American','Riley')
addlastname('American','Carpenter')
addlastname('American','Perkins')
addlastname('American','Aguilar')
addlastname('American','Silva')
addlastname('American','Richards')
addlastname('American','Willis')
addlastname('American','Matthews')
addlastname('American','Chapman')
addlastname('American','Lawrence')
addlastname('American','Garza')
addlastname('American','Vargas')
addlastname('American','Watkins')
addlastname('American','Wheeler')
addlastname('American','Larson')
addlastname('American','Carlson')
addlastname('American','Harper')
addlastname('American','George')
addlastname('American','Greene')
addlastname('American','Burke')
addlastname('American','Guzman')
addlastname('American','Morrison')
addlastname('American','Munoz')
addlastname('American','Jacobs')
addlastname('American','Obrien')
addlastname('American','Lawson')
addlastname('American','Franklin')
addlastname('American','Lynch')
addlastname('American','Bishop')
addlastname('American','Carr')
addlastname('American','Salazar')
addlastname('American','Austin')
addlastname('American','Mendez')
addlastname('American','Gilbert')
addlastname('American','Jensen')
addlastname('American','Williamson')
addlastname('American','Montgomery')
addlastname('American','Harvey')
addlastname('American','Oliver')
addlastname('American','Howell')
addlastname('American','Dean')
addlastname('American','Hanson')
addlastname('American','Weber')
addlastname('American','Garrett')
addlastname('American','Sims')
addlastname('American','Burton')
addlastname('American','Fuller')
addlastname('American','Soto')
addlastname('American','Mccoy')
addlastname('American','Welch')
addlastname('American','Chen')
addlastname('American','Schultz')
addlastname('American','Walters')
addlastname('American','Reid')
addlastname('American','Fields')
addlastname('American','Walsh')
addlastname('American','Little')
addlastname('American','Fowler')
addlastname('American','Bowman')
addlastname('American','Davidson')
addlastname('American','May')
addlastname('American','Day')
addlastname('American','Schneider')
addlastname('American','Newman')
addlastname('American','Brewer')
addlastname('American','Lucas')
addlastname('American','Holland')
addlastname('American','Wong')
addlastname('American','Banks')
addlastname('American','Santos')
addlastname('American','Curtis')
addlastname('American','Pearson')
addlastname('American','Delgado')
addlastname('American','Valdez')
addlastname('American','Pena')
addlastname('American','Rios')
addlastname('American','Douglas')
addlastname('American','Sandoval')
addlastname('American','Barrett')
addlastname('American','Hopkins')
addlastname('American','Keller')
addlastname('American','Guerrero')
addlastname('American','Stanley')
addlastname('American','Bates')
addlastname('American','Alvarado')
addlastname('American','Beck')
addlastname('American','Ortega')
addlastname('American','Wade')
addlastname('American','Estrada')
addlastname('American','Contreras')
addlastname('American','Barnett')
addlastname('American','Caldwell')
addlastname('American','Santiago')
addlastname('American','Lambert')
addlastname('American','Powers')
addlastname('American','Chambers')
addlastname('American','Nunez')
addlastname('American','Craig')
addlastname('American','Leonard')
addlastname('American','Lowe')
addlastname('American','Rhodes')
addlastname('American','Byrd')
addlastname('American','Gregory')
addlastname('American','Shelton')
addlastname('American','Frazier')
addlastname('American','Becker')
addlastname('American','Maldonado')
addlastname('American','Fleming')
addlastname('American','Vega')
addlastname('American','Sutton')
addlastname('American','Cohen')
addlastname('American','Jennings')
addlastname('American','Parks')
addlastname('American','Mcdaniel')
addlastname('American','Watts')
addlastname('American','Barker')
addlastname('American','Norris')
addlastname('American','Vaughn')
addlastname('American','Vazquez')
addlastname('American','Holt')
addlastname('American','Schwartz')
addlastname('American','Steele')
addlastname('American','Benson')
addlastname('American','Neal')
addlastname('American','Dominguez')
addlastname('American','Horton')
addlastname('American','Terry')
addlastname('American','Wolfe')
addlastname('American','Hale')
addlastname('American','Lyons')
addlastname('American','Graves')
addlastname('American','Haynes')
addlastname('American','Miles')
addlastname('American','Park')
addlastname('American','Warner')
addlastname('American','Padilla')
addlastname('American','Bush')
addlastname('American','Thornton')
addlastname('American','Mccarthy')
addlastname('American','Mann')
addlastname('American','Zimmerman')
addlastname('American','Erickson')
addlastname('American','Fletcher')
addlastname('American','Mckinney')
addlastname('American','Page')
addlastname('American','Dawson')
addlastname('American','Joseph')
addlastname('American','Marquez')
addlastname('American','Reeves')
addlastname('American','Klein')
addlastname('American','Espinoza')
addlastname('American','Baldwin')
addlastname('American','Moran')
addlastname('American','Love')
addlastname('American','Robbins')
addlastname('American','Higgins')
addlastname('American','Ball')
addlastname('American','Cortez')
addlastname('American','Le')
addlastname('American','Griffith')
addlastname('American','Bowen')
addlastname('American','Sharp')
addlastname('American','Cummings')
addlastname('American','Ramsey')
addlastname('American','Hardy')
addlastname('American','Swanson')
addlastname('American','Barber')
addlastname('American','Acosta')
addlastname('American','Luna')
addlastname('American','Chandler')
addlastname('American','Blair')
addlastname('American','Daniel')
addlastname('American','Cross')
addlastname('American','Simon')
addlastname('American','Dennis')
addlastname('American','Oconnor')
addlastname('American','Quinn')
addlastname('American','Gross')
addlastname('American','Navarro')
addlastname('American','Moss')
addlastname('American','Fitzgerald')
addlastname('American','Doyle')
addlastname('American','Mclaughlin')
addlastname('American','Rojas')
addlastname('American','Rodgers')
addlastname('American','Stevenson')
addlastname('American','Singh')
addlastname('American','Yang')
addlastname('American','Figueroa')
addlastname('American','Harmon')
addlastname('American','Newton')
addlastname('American','Paul')
addlastname('American','Manning')
addlastname('American','Garner')
addlastname('American','Mcgee')
addlastname('American','Reese')
addlastname('American','Francis')
addlastname('American','Burgess')
addlastname('American','Adkins')
addlastname('American','Goodman')
addlastname('American','Curry')
addlastname('American','Brady')
addlastname('American','Christensen')
addlastname('American','Potter')
addlastname('American','Walton')
addlastname('American','Goodwin')
addlastname('American','Mullins')
addlastname('American','Molina')
addlastname('American','Webster')
addlastname('American','Fischer')
addlastname('American','Campos')
addlastname('American','Avila')
addlastname('American','Sherman')
addlastname('American','Todd')
addlastname('American','Chang')
addlastname('American','Blake')
addlastname('American','Malone')
addlastname('American','Wolf')
addlastname('American','Hodges')
addlastname('American','Juarez')
addlastname('American','Gill')
addlastname('American','Farmer')
addlastname('American','Hines')
addlastname('American','Gallagher')
addlastname('American','Duran')
addlastname('American','Hubbard')
addlastname('American','Cannon')
addlastname('American','Miranda')
addlastname('American','Wang')
addlastname('American','Saunders')
addlastname('American','Tate')
addlastname('American','Mack')
addlastname('American','Hammond')
addlastname('American','Carrillo')
addlastname('American','Townsend')
addlastname('American','Wise')
addlastname('American','Ingram')
addlastname('American','Barton')
addlastname('American','Mejia')
addlastname('American','Ayala')
addlastname('American','Schroeder')
addlastname('American','Hampton')
addlastname('American','Rowe')
addlastname('American','Parsons')
addlastname('American','Frank')
addlastname('American','Waters')
addlastname('American','Strickland')
addlastname('American','Osborne')
addlastname('American','Maxwell')
addlastname('American','Chan')
addlastname('American','Deleon')
addlastname('American','Norman')
addlastname('American','Harrington')
addlastname('American','Casey')
addlastname('American','Patton')
addlastname('American','Logan')
addlastname('American','Bowers')
addlastname('American','Mueller')
addlastname('American','Glover')
addlastname('American','Floyd')
addlastname('American','Hartman')
addlastname('American','Buchanan')
addlastname('American','Cobb')
addlastname('American','French')
addlastname('American','Kramer')
addlastname('American','Mccormick')
addlastname('American','Clarke')
addlastname('American','Tyler')
addlastname('American','Gibbs')
addlastname('American','Moody')
addlastname('American','Conner')
addlastname('American','Sparks')
addlastname('American','Mcguire')
addlastname('American','Leon')
addlastname('American','Bauer')
addlastname('American','Norton')
addlastname('American','Pope')
addlastname('American','Flynn')
addlastname('American','Hogan')
addlastname('American','Robles')
addlastname('American','Salinas')
addlastname('American','Yates')
addlastname('American','Lindsey')
addlastname('American','Lloyd')
addlastname('American','Marsh')
addlastname('American','Mcbride')
addlastname('American','Owen')
addlastname('American','Solis')
addlastname('American','Pham')
addlastname('American','Lang')
addlastname('American','Pratt')
addlastname('American','Lara')
addlastname('American','Brock')
addlastname('American','Ballard')
addlastname('American','Trujillo')
addlastname('American','Shaffer')
addlastname('American','Drake')
addlastname('American','Roman')
addlastname('American','Aguirre')
addlastname('American','Morton')
addlastname('American','Stokes')
addlastname('American','Lamb')
addlastname('American','Pacheco')
addlastname('American','Patrick')
addlastname('American','Cochran')
addlastname('American','Shepherd')
addlastname('American','Cain')
addlastname('American','Burnett')
addlastname('American','Hess')
addlastname('American','Li')
addlastname('American','Cervantes')
addlastname('American','Olsen')
addlastname('American','Briggs')
addlastname('American','Ochoa')
addlastname('American','Cabrera')
addlastname('American','Velasquez')
addlastname('American','Montoya')
addlastname('American','Roth')
addlastname('American','Meyers')
addlastname('American','Cardenas')
addlastname('American','Fuentes')
addlastname('American','Weiss')
addlastname('American','Hoover')
addlastname('American','Wilkins')
addlastname('American','Nicholson')
addlastname('American','Underwood')
addlastname('American','Short')
addlastname('American','Carson')
addlastname('American','Morrow')
addlastname('American','Colon')
addlastname('American','Holloway')
addlastname('American','Summers')
addlastname('American','Bryan')
addlastname('American','Petersen')
addlastname('American','Mckenzie')
addlastname('American','Serrano')
addlastname('American','Wilcox')
addlastname('American','Carey')
addlastname('American','Clayton')
addlastname('American','Poole')
addlastname('American','Calderon')
addlastname('American','Gallegos')
addlastname('American','Greer')
addlastname('American','Rivas')
addlastname('American','Guerra')
addlastname('American','Decker')
addlastname('American','Collier')
addlastname('American','Wall')
addlastname('American','Whitaker')
addlastname('American','Bass')
addlastname('American','Flowers')
addlastname('American','Davenport')
addlastname('American','Conley')
addlastname('American','Houston')
addlastname('American','Huff')
addlastname('American','Copeland')
addlastname('American','Hood')
addlastname('American','Monroe')
addlastname('American','Massey')
addlastname('American','Roberson')
addlastname('American','Combs')
addlastname('American','Franco')
addlastname('American','Larsen')
addlastname('American','Pittman')
addlastname('American','Randall')
addlastname('American','Skinner')
addlastname('American','Wilkinson')
addlastname('American','Kirby')
addlastname('American','Cameron')
addlastname('American','Bridges')
addlastname('American','Anthony')
addlastname('American','Richard')
addlastname('American','Kirk')
addlastname('American','Bruce')
addlastname('American','Singleton')
addlastname('American','Mathis')
addlastname('American','Bradford')
addlastname('American','Boone')
addlastname('American','Abbott')
addlastname('American','Charles')
addlastname('American','Allison')
addlastname('American','Sweeney')
addlastname('American','Atkinson')
addlastname('American','Horn')
addlastname('American','Jefferson')
addlastname('American','Rosales')
addlastname('American','York')
addlastname('American','Christian')
addlastname('American','Phelps')
addlastname('American','Farrell')
addlastname('American','Castaneda')
addlastname('American','Nash')
addlastname('American','Dickerson')
addlastname('American','Bond')
addlastname('American','Wyatt')
addlastname('American','Foley')
addlastname('American','Chase')
addlastname('American','Gates')
addlastname('American','Vincent')
addlastname('American','Mathews')
addlastname('American','Hodge')
addlastname('American','Garrison')
addlastname('American','Trevino')
addlastname('American','Villarreal')
addlastname('American','Heath')
addlastname('American','Dalton')
addlastname('American','Valencia')
addlastname('American','Callahan')
addlastname('American','Hensley')
addlastname('American','Atkins')
addlastname('American','Huffman')
addlastname('American','Roy')
addlastname('American','Boyer')
addlastname('American','Shields')
addlastname('American','Lin')
addlastname('American','Hancock')
addlastname('American','Grimes')
addlastname('American','Glenn')
addlastname('American','Cline')
addlastname('American','Delacruz')
addlastname('American','Camacho')
addlastname('American','Dillon')
addlastname('American','Parrish')
addlastname('American','Oneill')
addlastname('American','Melton')
addlastname('American','Booth')
addlastname('American','Kane')
addlastname('American','Berg')
addlastname('American','Harrell')
addlastname('American','Pitts')
addlastname('American','Savage')
addlastname('American','Wiggins')
addlastname('American','Brennan')
addlastname('American','Salas')
addlastname('American','Marks')
addlastname('American','Russo')
addlastname('American','Sawyer')
addlastname('American','Baxter')
addlastname('American','Golden')
addlastname('American','Hutchinson')
addlastname('American','Liu')
addlastname('American','Walter')
addlastname('American','Mcdowell')
addlastname('American','Wiley')
addlastname('American','Rich')
addlastname('American','Humphrey')
addlastname('American','Johns')
addlastname('American','Koch')
addlastname('American','Suarez')
addlastname('American','Hobbs')
addlastname('American','Beard')
addlastname('American','Gilmore')
addlastname('American','Ibarra')
addlastname('American','Keith')
addlastname('American','Macias')
addlastname('American','Khan')
addlastname('American','Andrade')
addlastname('American','Ware')
addlastname('American','Stephenson')
addlastname('American','Henson')
addlastname('American','Wilkerson')
addlastname('American','Dyer')
addlastname('American','Mcclure')
addlastname('American','Blackwell')
addlastname('American','Mercado')
addlastname('American','Tanner')
addlastname('American','Eaton')
addlastname('American','Clay')
addlastname('American','Barron')
addlastname('American','Beasley')
addlastname('American','Oneal')
addlastname('American','Preston')
addlastname('American','Small')
addlastname('American','Wu')
addlastname('American','Zamora')
addlastname('American','Macdonald')
addlastname('American','Vance')
addlastname('American','Snow')
addlastname('American','Mcclain')
addlastname('American','Stafford')
addlastname('American','Orozco')
addlastname('American','Barry')
addlastname('American','English')
addlastname('American','Shannon')
addlastname('American','Kline')
addlastname('American','Jacobson')
addlastname('American','Woodard')
addlastname('American','Huang')
addlastname('American','Kemp')
addlastname('American','Mosley')
addlastname('American','Prince')
addlastname('American','Merritt')
addlastname('American','Hurst')
addlastname('American','Villanueva')
addlastname('American','Roach')
addlastname('American','Nolan')
addlastname('American','Lam')
addlastname('American','Yoder')
addlastname('American','Mccullough')
addlastname('American','Lester')
addlastname('American','Santana')
addlastname('American','Valenzuela')
addlastname('American','Winters')
addlastname('American','Barrera')
addlastname('American','Leach')
addlastname('American','Orr')
addlastname('American','Berger')
addlastname('American','Mckee')
addlastname('American','Strong')
addlastname('American','Conway')
addlastname('American','Stein')
addlastname('American','Whitehead')
addlastname('American','Bullock')
addlastname('American','Escobar')
addlastname('American','Knox')
addlastname('American','Meadows')
addlastname('American','Solomon')
addlastname('American','Velez')
addlastname('American','Odonnell')
addlastname('American','Kerr')
addlastname('American','Stout')
addlastname('American','Blankenship')
addlastname('American','Browning')
addlastname('American','Kent')
addlastname('American','Lozano')
addlastname('American','Bartlett')
addlastname('American','Pruitt')
addlastname('American','Buck')
addlastname('American','Barr')
addlastname('American','Gaines')
addlastname('American','Durham')
addlastname('American','Gentry')
addlastname('American','Mcintyre')
addlastname('American','Sloan')
addlastname('American','Melendez')
addlastname('American','Rocha')
addlastname('American','Herman')
addlastname('American','Sexton')
addlastname('American','Moon')
addlastname('American','Hendricks')
addlastname('American','Rangel')
addlastname('American','Stark')
addlastname('American','Lowery')
addlastname('American','Hardin')
addlastname('American','Hull')
addlastname('American','Sellers')
addlastname('American','Ellison')
addlastname('American','Calhoun')
addlastname('American','Gillespie')
addlastname('American','Mora')
addlastname('American','Knapp')
addlastname('American','Mccall')
addlastname('American','Morse')
addlastname('American','Dorsey')
addlastname('American','Weeks')
addlastname('American','Nielsen')
addlastname('American','Livingston')
addlastname('American','Leblanc')
addlastname('American','Mclean')
addlastname('American','Bradshaw')
addlastname('American','Glass')
addlastname('American','Middleton')
addlastname('American','Buckley')
addlastname('American','Schaefer')
addlastname('American','Frost')
addlastname('American','Howe')
addlastname('American','House')
addlastname('American','Mcintosh')
addlastname('American','Ho')
addlastname('American','Pennington')
addlastname('American','Reilly')
addlastname('American','Hebert')
addlastname('American','Mcfarland')
addlastname('American','Hickman')
addlastname('American','Noble')
addlastname('American','Spears')
addlastname('American','Conrad')
addlastname('American','Arias')
addlastname('American','Galvan')
addlastname('American','Velazquez')
addlastname('American','Huynh')
addlastname('American','Frederick')
addlastname('American','Randolph')
addlastname('American','Cantu')
addlastname('American','Fitzpatrick')
addlastname('American','Mahoney')
addlastname('American','Peck')
addlastname('American','Villa')
addlastname('American','Michael')
addlastname('American','Donovan')
addlastname('American','Mcconnell')
addlastname('American','Walls')
addlastname('American','Boyle')
addlastname('American','Mayer')
addlastname('American','Zuniga')
addlastname('American','Giles')
addlastname('American','Pineda')
addlastname('American','Pace')
addlastname('American','Hurley')
addlastname('American','Mays')
addlastname('American','Mcmillan')
addlastname('American','Crosby')
addlastname('American','Ayers')
addlastname('American','Case')
addlastname('American','Bentley')
addlastname('American','Shepard')
addlastname('American','Everett')
addlastname('American','Pugh')
addlastname('American','David')
addlastname('American','Mcmahon')
addlastname('American','Dunlap')
addlastname('American','Bender')
addlastname('American','Hahn')
addlastname('American','Harding')
addlastname('American','Acevedo')
addlastname('American','Raymond')
addlastname('American','Blackburn')
addlastname('American','Duffy')
addlastname('American','Landry')
addlastname('American','Dougherty')
addlastname('American','Bautista')
addlastname('American','Shah')
addlastname('American','Potts')
addlastname('American','Arroyo')
addlastname('American','Valentine')
addlastname('American','Meza')
addlastname('American','Gould')
addlastname('American','Vaughan')
addlastname('American','Fry')
addlastname('American','Rush')
addlastname('American','Avery')
addlastname('American','Herring')
addlastname('American','Dodson')
addlastname('American','Clements')
addlastname('American','Sampson')
addlastname('American','Tapia')
addlastname('American','Bean')
addlastname('American','Lynn')
addlastname('American','Crane')
addlastname('American','Farley')
addlastname('American','Cisneros')
addlastname('American','Benton')
addlastname('American','Ashley')
addlastname('American','Mckay')
addlastname('American','Finley')
addlastname('American','Best')
addlastname('American','Blevins')
addlastname('American','Friedman')
addlastname('American','Moses')
addlastname('American','Sosa')
addlastname('American','Blanchard')
addlastname('American','Huber')
addlastname('American','Frye')
addlastname('American','Krueger')
addlastname('American','Bernard')
addlastname('American','Rosario')
addlastname('American','Rubio')
addlastname('American','Mullen')
addlastname('American','Benjamin')
addlastname('American','Haley')
addlastname('American','Chung')
addlastname('American','Moyer')
addlastname('American','Choi')
addlastname('American','Horne')
addlastname('American','Yu')
addlastname('American','Woodward')
addlastname('American','Ali')
addlastname('American','Nixon')
addlastname('American','Hayden')
addlastname('American','Rivers')
addlastname('American','Estes')
addlastname('American','Mccarty')
addlastname('American','Richmond')
addlastname('American','Stuart')
addlastname('American','Maynard')
addlastname('American','Brandt')
addlastname('American','Oconnell')
addlastname('American','Hanna')
addlastname('American','Sanford')
addlastname('American','Sheppard')
addlastname('American','Church')
addlastname('American','Burch')
addlastname('American','Levy')
addlastname('American','Rasmussen')
addlastname('American','Coffey')
addlastname('American','Ponce')
addlastname('American','Faulkner')
addlastname('American','Donaldson')
addlastname('American','Schmitt')
addlastname('American','Novak')
addlastname('American','Costa')
addlastname('American','Montes')
addlastname('American','Booker')
addlastname('American','Cordova')
addlastname('American','Waller')
addlastname('American','Arellano')
addlastname('American','Maddox')
addlastname('American','Mata')
addlastname('American','Bonilla')
addlastname('American','Stanton')
addlastname('American','Compton')
addlastname('American','Kaufman')
addlastname('American','Dudley')
addlastname('American','Mcpherson')
addlastname('American','Beltran')
addlastname('American','Dickson')
addlastname('American','Mccann')
addlastname('American','Villegas')
addlastname('American','Proctor')
addlastname('American','Hester')
addlastname('American','Cantrell')
addlastname('American','Daugherty')
addlastname('American','Cherry')
addlastname('American','Bray')
addlastname('American','Davila')
addlastname('American','Rowland')
addlastname('American','Levine')
addlastname('American','Madden')
addlastname('American','Spence')
addlastname('American','Good')
addlastname('American','Irwin')
addlastname('American','Werner')
addlastname('American','Krause')
addlastname('American','Petty')
addlastname('American','Whitney')
addlastname('American','Baird')
addlastname('American','Hooper')
addlastname('American','Pollard')
addlastname('American','Zavala')
addlastname('American','Jarvis')
addlastname('American','Holden')
addlastname('American','Haas')
addlastname('American','Hendrix')
addlastname('American','Mcgrath')
addlastname('American','Bird')
addlastname('American','Lucero')
addlastname('American','Terrell')
addlastname('American','Riggs')
addlastname('American','Joyce')
addlastname('American','Mercer')
addlastname('American','Rollins')
addlastname('American','Galloway')
addlastname('American','Duke')
addlastname('American','Odom')
addlastname('American','Andersen')
addlastname('American','Downs')
addlastname('American','Hatfield')
addlastname('American','Benitez')
addlastname('American','Archer')
addlastname('American','Huerta')
addlastname('American','Travis')
addlastname('American','Mcneil')
addlastname('American','Hinton')
addlastname('American','Zhang')
addlastname('American','Hays')
addlastname('American','Mayo')
addlastname('American','Fritz')
addlastname('American','Branch')
addlastname('American','Mooney')
addlastname('American','Ewing')
addlastname('American','Ritter')
addlastname('American','Esparza')
addlastname('American','Frey')
addlastname('American','Braun')
addlastname('American','Gay')
addlastname('American','Riddle')
addlastname('American','Haney')
addlastname('American','Kaiser')
addlastname('American','Holder')
addlastname('American','Chaney')
addlastname('American','Mcknight')
addlastname('American','Gamble')
addlastname('American','Vang')
addlastname('American','Cooley')
addlastname('American','Carney')
addlastname('American','Cowan')
addlastname('American','Forbes')
addlastname('American','Ferrell')
addlastname('American','Davies')
addlastname('American','Barajas')
addlastname('American','Shea')
addlastname('American','Osborn')
addlastname('American','Bright')
addlastname('American','Cuevas')
addlastname('American','Bolton')
addlastname('American','Murillo')
addlastname('American','Lutz')
addlastname('American','Duarte')
addlastname('American','Kidd')
addlastname('American','Key')
addlastname('American','Cooke')
addfirstname(0,'Brazilian','Amanda')
addfirstname(1,'Brazilian','Lucas')
addfirstname(0,'Brazilian','Mariana')
addfirstname(1,'Brazilian','Gabriel')
addfirstname(0,'Brazilian','Julia')
addfirstname(1,'Brazilian','Matheus')
addfirstname(0,'Brazilian','Gabriela')
addfirstname(1,'Brazilian','Vinicius')
addfirstname(0,'Brazilian','Ana')
addfirstname(1,'Brazilian','Luiz')
addfirstname(0,'Brazilian','Larissa')
addfirstname(1,'Brazilian','João')
addfirstname(0,'Brazilian','Thaís')
addfirstname(1,'Brazilian','Rafael')
addfirstname(0,'Brazilian','Juliana')
addfirstname(1,'Brazilian','Pedro')
addfirstname(0,'Brazilian','Jessica')
addfirstname(1,'Brazilian','Leonardo')
addfirstname(0,'Brazilian','Fernanda')
addfirstname(1,'Brazilian','Bruno')
addfirstname(0,'Brazilian','Leticia')
addfirstname(1,'Brazilian','Daniel')
addfirstname(0,'Brazilian','Bruna')
addfirstname(1,'Brazilian','Felipe')
addfirstname(0,'Brazilian','Camila')
addfirstname(1,'Brazilian','Guilherme')
addfirstname(0,'Brazilian','Natália')
addfirstname(1,'Brazilian','Paulo')
addfirstname(0,'Brazilian','Beatriz')
addfirstname(1,'Brazilian','Gustavo')
addfirstname(0,'Brazilian','Maria')
addfirstname(1,'Brazilian','Mateus')
addfirstname(0,'Brazilian','Rafaela')
addfirstname(1,'Brazilian','Douglas')
addfirstname(0,'Brazilian','Vitoria')
addfirstname(1,'Brazilian','Fernando')
addfirstname(0,'Brazilian','Luana')
addfirstname(1,'Brazilian','Igor')
addfirstname(0,'Brazilian','Laura')
addfirstname(1,'Brazilian','Thiago')
addfirstname(0,'Brazilian','Caroline')
addfirstname(1,'Brazilian','Anderson')
addfirstname(0,'Brazilian','Carolina')
addfirstname(1,'Brazilian','José')
addfirstname(0,'Brazilian','Bárbara')
addfirstname(1,'Brazilian','Marcos')
addfirstname(0,'Brazilian','Milena')
addfirstname(1,'Brazilian','João Pedro')
addfirstname(0,'Brazilian','Raquel')
addfirstname(1,'Brazilian','Vitor')
addfirstname(0,'Brazilian','Clara')
addfirstname(1,'Brazilian','Leandro')
addfirstname(0,'Brazilian','Luiza')
addfirstname(1,'Brazilian','Eduardo')
addfirstname(0,'Brazilian','Isabela')
addfirstname(1,'Brazilian','Caio')
addfirstname(0,'Brazilian','Gabrielle')
addfirstname(1,'Brazilian','Arthur')
addfirstname(0,'Brazilian','Sarah')
addfirstname(1,'Brazilian','Flavio')
addfirstname(0,'Brazilian','Debora')
addfirstname(1,'Brazilian','Diego')
addfirstname(0,'Brazilian','Bianca')
addfirstname(1,'Brazilian','Alex')
addfirstname(0,'Brazilian','Daniela')
addfirstname(1,'Brazilian','Carlos')
addfirstname(0,'Brazilian','Anna')
addfirstname(1,'Brazilian','Willian')
addfirstname(0,'Brazilian','Alessandra')
addfirstname(1,'Brazilian','Fábio')
addfirstname(0,'Brazilian','Maria Eduarda')
addfirstname(1,'Brazilian','Luan')
addfirstname(0,'Brazilian','Aline')
addfirstname(1,'Brazilian','Murilo')
addfirstname(0,'Brazilian','Lívia')
addfirstname(1,'Brazilian','Henrique')
addfirstname(0,'Brazilian','Lara')
addfirstname(1,'Brazilian','Andre')
addfirstname(0,'Brazilian','Andressa')
addfirstname(1,'Brazilian','João Vitor')
addfirstname(0,'Brazilian','Vanessa')
addfirstname(1,'Brazilian','Jeferson')
addfirstname(0,'Brazilian','Patricia')
addfirstname(1,'Brazilian','Luciano')
addfirstname(0,'Brazilian','Brenda')
addfirstname(1,'Brazilian','Raphael')
addfirstname(0,'Brazilian','Cristina')
addfirstname(1,'Brazilian','Renan')
addfirstname(0,'Brazilian','Flavia')
addfirstname(1,'Brazilian','Raul')
addfirstname(0,'Brazilian','Ana Luiza')
addfirstname(1,'Brazilian','Alexandre')
addfirstname(0,'Brazilian','Aléxia')
addfirstname(1,'Brazilian','Jonathan')
addfirstname(0,'Brazilian','Ana Clara')
addfirstname(1,'Brazilian','Pablo')
addfirstname(0,'Brazilian','Daiane')
addfirstname(1,'Brazilian','Jean')
addfirstname(0,'Brazilian','Maria Clara')
addfirstname(1,'Brazilian','Erick')
addfirstname(0,'Brazilian','Sabrina')
addfirstname(1,'Brazilian','Hugo')
addfirstname(0,'Brazilian','Giovanna')
addfirstname(1,'Brazilian','Wesley')
addfirstname(0,'Brazilian','Mary')
addfirstname(1,'Brazilian','Adriano')
addfirstname(0,'Brazilian','Tainara')
addfirstname(1,'Brazilian','Fabrício')
addfirstname(0,'Brazilian','Mayara')
addfirstname(1,'Brazilian','Ricardo')
addfirstname(0,'Brazilian','Ana Carolina')
addfirstname(1,'Brazilian','Victor')
addfirstname(0,'Brazilian','Carol')
addfirstname(1,'Brazilian','Marcelo')
addfirstname(0,'Brazilian','Fabiana')
addfirstname(1,'Brazilian','Ramon')
addfirstname(0,'Brazilian','Nathalia')
addfirstname(1,'Brazilian','Diogo')
addfirstname(0,'Brazilian','Michele')
addfirstname(1,'Brazilian','Jefferson')
addfirstname(0,'Brazilian','Monique')
addfirstname(1,'Brazilian','Italo')
addfirstname(0,'Brazilian','Sofia')
addfirstname(1,'Brazilian','Luis')
addfirstname(0,'Brazilian','Gabi')
addfirstname(1,'Brazilian','Luiz Henrique')
addfirstname(0,'Brazilian','Paula')
addfirstname(1,'Brazilian','Tiago')
addfirstname(0,'Brazilian','Pamela')
addfirstname(1,'Brazilian','Geovanne')
addfirstname(0,'Brazilian','Luciana')
addfirstname(1,'Brazilian','Junior')
addfirstname(0,'Brazilian','Marcela')
addfirstname(1,'Brazilian','Vagner')
addfirstname(0,'Brazilian','Luisa')
addfirstname(1,'Brazilian','Patrick')
addfirstname(0,'Brazilian','Raissa')
addfirstname(1,'Brazilian','Maicon')
addfirstname(0,'Brazilian','Izabella')
addfirstname(1,'Brazilian','Nícolas')
addfirstname(0,'Brazilian','Lais')
addfirstname(1,'Brazilian','Dhiogo')
addfirstname(0,'Brazilian','Carla')
addfirstname(1,'Brazilian','Rodrigo')
addfirstname(0,'Brazilian','Sara')
addfirstname(1,'Brazilian','Kaue')
addfirstname(0,'Brazilian','Stephanie')
addfirstname(1,'Brazilian','Sidney')
addfirstname(0,'Brazilian','Lêe Mendonça')
addfirstname(1,'Brazilian','Francisco')
addfirstname(0,'Brazilian','Yumi')
addfirstname(1,'Brazilian','Robson')
addfirstname(0,'Brazilian','Mariane')
addfirstname(1,'Brazilian','Julio')
addfirstname(0,'Brazilian','Taynara')
addfirstname(1,'Brazilian','Danilo')
addfirstname(0,'Brazilian','Alice')
addfirstname(1,'Brazilian','Jose Matheus')
addfirstname(0,'Brazilian','Maisa')
addfirstname(1,'Brazilian','Aline')
addfirstname(0,'Brazilian','Erika')
addfirstname(1,'Brazilian','Josue')
addfirstname(0,'Brazilian','Helena')
addfirstname(1,'Brazilian','Giliard')
addfirstname(0,'Brazilian','Nicole')
addfirstname(1,'Brazilian','Claudio')
addfirstname(0,'Brazilian','Karina')
addfirstname(1,'Brazilian','Jorge')
addfirstname(0,'Brazilian','Yasmin')
addfirstname(1,'Brazilian','Nathan')
addfirstname(0,'Brazilian','Veronica')
addfirstname(1,'Brazilian','Roberto')
addfirstname(0,'Brazilian','Alana')
addfirstname(1,'Brazilian','Alexsandro')
addfirstname(0,'Brazilian','Nayara')
addfirstname(1,'Brazilian','Davi')
addfirstname(0,'Brazilian','Ana Paula')
addfirstname(1,'Brazilian','Augusto')
addfirstname(0,'Brazilian','Tatiane')
addfirstname(1,'Brazilian','Alef')
addfirstname(0,'Brazilian','Layla')
addfirstname(1,'Brazilian','Breno')
addfirstname(0,'Brazilian','Babi')
addfirstname(1,'Brazilian','Robert')
addfirstname(0,'Brazilian','Kelly')
addfirstname(1,'Brazilian','Luis Felipe')
addfirstname(0,'Brazilian','Isabelle')
addfirstname(1,'Brazilian','João Eduardo')
addfirstname(0,'Brazilian','Gabriele')
addfirstname(1,'Brazilian','Jacques')
addfirstname(0,'Brazilian','Talita')
addfirstname(1,'Brazilian','Natan')
addfirstname(0,'Brazilian','Naiara')
addfirstname(1,'Brazilian','Braian')
addfirstname(0,'Brazilian','Angelica')
addfirstname(1,'Brazilian','Luiz Felipe')
addfirstname(0,'Brazilian','Evelyn')
addfirstname(1,'Brazilian','Murillo')
addfirstname(0,'Brazilian','Geovanna')
addlastname('Brazilian','Silva')
addlastname('Brazilian','Santos')
addlastname('Brazilian','Souza')
addlastname('Brazilian','Sousa')
addlastname('Brazilian','Oliveira')
addlastname('Brazilian','Pereira')
addlastname('Brazilian','Lima')
addlastname('Brazilian','Carvalho')
addlastname('Brazilian','Ferreira')
addlastname('Brazilian','Rodrigues')
addlastname('Brazilian','Rodriguez')
addlastname('Brazilian','Almeida')
addlastname('Brazilian','Costa')
addlastname('Brazilian','Gomes')
addlastname('Brazilian','Gomez')
addlastname('Brazilian','Martins')
addlastname('Brazilian','Araújo')
addlastname('Brazilian','Melo')
addlastname('Brazilian','Mello')
addlastname('Brazilian','Barbosa')
addlastname('Brazilian','Barboza')
addlastname('Brazilian','Ribeiro')
addlastname('Brazilian','Alves')
addlastname('Brazilian','Alvaréz')
addlastname('Brazilian','Cardoso')
addlastname('Brazilian','Schmitz')
addlastname('Brazilian','Schmidt')
addlastname('Brazilian','Rocha')
addlastname('Brazilian','Correia')
addlastname('Brazilian','Correa')
addlastname('Brazilian','Dias')
addlastname('Brazilian','Díaz')
addlastname('Brazilian','Teixeira')
addlastname('Brazilian','Fernandes')
addlastname('Brazilian','Fernandez')
addlastname('Brazilian','Azevedo')
addlastname('Brazilian','Cavalcante')
addlastname('Brazilian','Cavalcanti')
addlastname('Brazilian','Montes')
addlastname('Brazilian','Morais')
addlastname('Brazilian','Moraes')
addlastname('Brazilian','Gonçalves')
addlastname('Brazilian','Gonzáles')
addlastname('Brazilian','Gónzalez')
addfirstname(1,'British','Christopher')
addfirstname(1,'British','James')
addfirstname(1,'British','David')
addfirstname(1,'British','Daniel')
addfirstname(1,'British','Michael')
addfirstname(1,'British','Matthew')
addfirstname(1,'British','Andrew')
addfirstname(1,'British','Richard')
addfirstname(1,'British','Paul')
addfirstname(1,'British','Mark')
addfirstname(1,'British','Thomas')
addfirstname(1,'British','Adam')
addfirstname(1,'British','Robert')
addfirstname(1,'British','John')
addfirstname(1,'British','Lee')
addfirstname(1,'British','Benjamin')
addfirstname(1,'British','Steven')
addfirstname(1,'British','Jonathan')
addfirstname(1,'British','Craig')
addfirstname(1,'British','Stephen')
addfirstname(1,'British','Simon')
addfirstname(1,'British','Nicholas')
addfirstname(1,'British','Peter')
addfirstname(1,'British','Anthony')
addfirstname(1,'British','Alexander')
addfirstname(1,'British','Gary')
addfirstname(1,'British','Ian')
addfirstname(1,'British','Ryan')
addfirstname(1,'British','Luke')
addfirstname(1,'British','Jamie')
addfirstname(1,'British','Stuart')
addfirstname(1,'British','Philip')
addfirstname(1,'British','Darren')
addfirstname(1,'British','William')
addfirstname(1,'British','Gareth')
addfirstname(1,'British','Martin')
addfirstname(1,'British','Kevin')
addfirstname(1,'British','Scott')
addfirstname(1,'British','Dean')
addfirstname(1,'British','Joseph')
addfirstname(1,'British','Jason')
addfirstname(1,'British','Neil')
addfirstname(1,'British','Samuel')
addfirstname(1,'British','Carl')
addfirstname(1,'British','Ben')
addfirstname(1,'British','Sean')
addfirstname(1,'British','Timothy')
addfirstname(1,'British','Oliver')
addfirstname(1,'British','Ashley')
addfirstname(1,'British','Wayne')
addfirstname(1,'British','Edward')
addfirstname(1,'British','Shaun')
addfirstname(1,'British','Aaron')
addfirstname(1,'British','Mohammed')
addfirstname(1,'British','Gavin')
addfirstname(1,'British','Liam')
addfirstname(1,'British','Nathan')
addfirstname(1,'British','Alan')
addfirstname(1,'British','Graham')
addfirstname(1,'British','Ross')
addfirstname(1,'British','Karl')
addfirstname(1,'British','Marc')
addfirstname(1,'British','Adrian')
addfirstname(1,'British','Phillip')
addfirstname(1,'British','Patrick')
addfirstname(1,'British','Lewis')
addfirstname(1,'British','Colin')
addfirstname(1,'British','Russell')
addfirstname(1,'British','Charles')
addfirstname(1,'British','Shane')
addfirstname(1,'British','George')
addfirstname(1,'British','Sam')
addfirstname(1,'British','Mathew')
addfirstname(1,'British','Jack')
addfirstname(1,'British','Ricky')
addfirstname(1,'British','Dale')
addfirstname(1,'British','Tony')
addfirstname(1,'British','Joshua')
addfirstname(1,'British','Alex')
addfirstname(1,'British','Dominic')
addfirstname(1,'British','Barry')
addfirstname(1,'British','Leon')
addfirstname(1,'British','Mohammad')
addfirstname(1,'British','Terry')
addfirstname(1,'British','Gregory')
addfirstname(1,'British','Danny')
addfirstname(1,'British','Brian')
addfirstname(1,'British','Keith')
addfirstname(1,'British','Antony')
addfirstname(1,'British','Kieran')
addfirstname(1,'British','Justin')
addfirstname(1,'British','Bradley')
addfirstname(1,'British','Jordan')
addfirstname(1,'British','Martyn')
addfirstname(1,'British','Leigh')
addfirstname(1,'British','Abdul')
addfirstname(1,'British','Damien')
addfirstname(1,'British','Stewart')
addfirstname(1,'British','Robin')
addfirstname(1,'British','Iain')
addfirstname(0,'British','Sarah')
addfirstname(0,'British','Laura')
addfirstname(0,'British','Gemma')
addfirstname(0,'British','Emma')
addfirstname(0,'British','Rebecca')
addfirstname(0,'British','Claire')
addfirstname(0,'British','Victoria')
addfirstname(0,'British','Samantha')
addfirstname(0,'British','Rachel')
addfirstname(0,'British','Amy')
addfirstname(0,'British','Jennifer')
addfirstname(0,'British','Nicola')
addfirstname(0,'British','Katie')
addfirstname(0,'British','Lisa')
addfirstname(0,'British','Kelly')
addfirstname(0,'British','Natalie')
addfirstname(0,'British','Louise')
addfirstname(0,'British','Michelle')
addfirstname(0,'British','Hayley')
addfirstname(0,'British','Hannah')
addfirstname(0,'British','Helen')
addfirstname(0,'British','Charlotte')
addfirstname(0,'British','Joanne')
addfirstname(0,'British','Lucy')
addfirstname(0,'British','Elizabeth')
addfirstname(0,'British','Leanne')
addfirstname(0,'British','Danielle')
addfirstname(0,'British','Donna')
addfirstname(0,'British','Katherine')
addfirstname(0,'British','Clare')
addfirstname(0,'British','Stephanie')
addfirstname(0,'British','Stacey')
addfirstname(0,'British','Lauren')
addfirstname(0,'British','Joanna')
addfirstname(0,'British','Kerry')
addfirstname(0,'British','Emily')
addfirstname(0,'British','Catherine')
addfirstname(0,'British','Sophie')
addfirstname(0,'British','Anna')
addfirstname(0,'British','Jessica')
addfirstname(0,'British','Zoe')
addfirstname(0,'British','Kirsty')
addfirstname(0,'British','Kimberley')
addfirstname(0,'British','Kate')
addfirstname(0,'British','Jenna')
addfirstname(0,'British','Caroline')
addfirstname(0,'British','Natasha')
addfirstname(0,'British','Rachael')
addfirstname(0,'British','Amanda')
addfirstname(0,'British','Kathryn')
addfirstname(0,'British','Karen')
addfirstname(0,'British','Alexandra')
addfirstname(0,'British','Jodie')
addfirstname(0,'British','Alison')
addfirstname(0,'British','Sara')
addfirstname(0,'British','Jemma')
addfirstname(0,'British','Carly')
addfirstname(0,'British','Heather')
addfirstname(0,'British','Holly')
addfirstname(0,'British','Ruth')
addfirstname(0,'British','Fiona')
addfirstname(0,'British','Melissa')
addfirstname(0,'British','Angela')
addfirstname(0,'British','Suzanne')
addfirstname(0,'British','Katy')
addfirstname(0,'British','Marie')
addfirstname(0,'British','Naomi')
addfirstname(0,'British','Cheryl')
addfirstname(0,'British','Melanie')
addfirstname(0,'British','Sally')
addfirstname(0,'British','Julie')
addfirstname(0,'British','Charlene')
addfirstname(0,'British','Jade')
addfirstname(0,'British','Sian')
addfirstname(0,'British','Tracey')
addfirstname(0,'British','Eleanor')
addfirstname(0,'British','Deborah')
addfirstname(0,'British','Maria')
addfirstname(0,'British','Lindsey')
addfirstname(0,'British','Abigail')
addfirstname(0,'British','Lindsay')
addfirstname(0,'British','Susan')
addfirstname(0,'British','Alice')
addfirstname(0,'British','Georgina')
addfirstname(0,'British','Aimee')
addfirstname(0,'British','Jane')
addfirstname(0,'British','Kim')
addfirstname(0,'British','Carla')
addfirstname(0,'British','Christine')
addfirstname(0,'British','Dawn')
addfirstname(0,'British','Tanya')
addfirstname(0,'British','Jenny')
addfirstname(0,'British','Andrea')
addfirstname(0,'British','Lyndsey')
addfirstname(0,'British','Jacqueline')
addfirstname(0,'British','Lynsey')
addfirstname(0,'British','Chloe')
addfirstname(0,'British','Mary')
addfirstname(0,'British','Leah')
addfirstname(0,'British','Toni')
addlastname('British','Smith')
addlastname('British','Jones')
addlastname('British','Williams')
addlastname('British','Brown')
addlastname('British','Taylor')
addlastname('British','Davies')
addlastname('British','Wilson')
addlastname('British','Evans')
addlastname('British','Thomas')
addlastname('British','Johnson')
addlastname('British','Roberts')
addlastname('British','Walker')
addlastname('British','Wright')
addlastname('British','Robinson')
addlastname('British','Thompson')
addlastname('British','White')
addlastname('British','Hughes')
addlastname('British','Edwards')
addlastname('British','Green')
addlastname('British','Hall')
addlastname('British','Wood')
addlastname('British','Harris')
addlastname('British','Lewis')
addlastname('British','Martin')
addlastname('British','Jackson')
addlastname('British','Clarke')
addlastname('British','Clark')
addlastname('British','Turner')
addlastname('British','Hill')
addlastname('British','Scott')
addlastname('British','Cooper')
addlastname('British','Morris')
addlastname('British','Ward')
addlastname('British','Moore')
addlastname('British','King')
addlastname('British','Watson')
addlastname('British','Baker')
addlastname('British','Harrison')
addlastname('British','Morgan')
addlastname('British','Patel')
addlastname('British','Young')
addlastname('British','Allen')
addlastname('British','Mitchell')
addlastname('British','James')
addlastname('British','Anderson')
addlastname('British','Phillips')
addlastname('British','Lee')
addlastname('British','Bell')
addlastname('British','Parker')
addlastname('British','Davis')
addfirstname(1,'Mexican','Abelardo')
addfirstname(1,'Mexican','Alejándro')
addfirstname(1,'Mexican','Alejandro')
addfirstname(1,'Mexican','Aleta')
addfirstname(1,'Mexican','Alonso')
addfirstname(1,'Mexican','Anton')
addfirstname(1,'Mexican','Antonin')
addfirstname(1,'Mexican','Arlo')
addfirstname(1,'Mexican','Arrio')
addfirstname(1,'Mexican','Artemio')
addfirstname(1,'Mexican','Badilón')
addfirstname(1,'Mexican','Bebe')
addfirstname(1,'Mexican','Beinvenido')
addfirstname(1,'Mexican','Bembé')
addfirstname(1,'Mexican','Bertín')
addfirstname(1,'Mexican','Buinton')
addfirstname(1,'Mexican','Carlito')
addfirstname(1,'Mexican','Carlos')
addfirstname(1,'Mexican','Cedro')
addfirstname(1,'Mexican','Ché')
addfirstname(1,'Mexican','Chago')
addfirstname(1,'Mexican','Chale')
addfirstname(1,'Mexican','Charro')
addfirstname(1,'Mexican','Chavez')
addfirstname(1,'Mexican','Checha')
addfirstname(1,'Mexican','Cheche')
addfirstname(1,'Mexican','Chenche')
addfirstname(1,'Mexican','Chencho')
addfirstname(1,'Mexican','Chicho')
addfirstname(1,'Mexican','Chilo')
addfirstname(1,'Mexican','Chuminga')
addfirstname(1,'Mexican','Chumo')
addfirstname(1,'Mexican','Cid')
addfirstname(1,'Mexican','Cidro')
addfirstname(1,'Mexican','Cisco')
addfirstname(1,'Mexican','Clodoveo')
addfirstname(1,'Mexican','Cortez')
addfirstname(1,'Mexican','Dacey')
addfirstname(1,'Mexican','Dalmacio')
addfirstname(1,'Mexican','Dantel')
addfirstname(1,'Mexican','Dario')
addfirstname(1,'Mexican','Dasio')
addfirstname(1,'Mexican','Dato')
addfirstname(1,'Mexican','Deanté')
addfirstname(1,'Mexican','Decarlos')
addfirstname(1,'Mexican','Decimus')
addfirstname(1,'Mexican','Decio')
addfirstname(1,'Mexican','Decoroso')
addfirstname(1,'Mexican','Delmar')
addfirstname(1,'Mexican','Demián')
addfirstname(1,'Mexican','Deocaro')
addfirstname(1,'Mexican','Deodato')
addfirstname(1,'Mexican','Deseado')
addfirstname(1,'Mexican','Desiderio')
addfirstname(1,'Mexican','Desierto')
addfirstname(1,'Mexican','Deuce')
addfirstname(1,'Mexican','Dexter')
addfirstname(1,'Mexican','Diego')
addfirstname(1,'Mexican','Dominic')
addfirstname(1,'Mexican','Donaldo')
addfirstname(1,'Mexican','Duardo')
addfirstname(1,'Mexican','Duran')
addfirstname(1,'Mexican','Edgardo')
addfirstname(1,'Mexican','Edmundo')
addfirstname(1,'Mexican','Efrén')
addfirstname(1,'Mexican','Elonzo')
addfirstname(1,'Mexican','Elvío')
addfirstname(1,'Mexican','Emerenciano')
addfirstname(1,'Mexican','Emilien')
addfirstname(1,'Mexican','Emilio')
addfirstname(1,'Mexican','Erón')
addfirstname(1,'Mexican','Ermano')
addfirstname(1,'Mexican','Ermino')
addfirstname(1,'Mexican','Ernesto')
addfirstname(1,'Mexican','Estévan')
addfirstname(1,'Mexican','Esteban')
addfirstname(1,'Mexican','Evarado')
addfirstname(1,'Mexican','Ezio')
addfirstname(1,'Mexican','Fabián')
addfirstname(1,'Mexican','Farón')
addfirstname(1,'Mexican','Faroh')
addfirstname(1,'Mexican','Farruco')
addfirstname(1,'Mexican','Favian')
addfirstname(1,'Mexican','Federico')
addfirstname(1,'Mexican','Felipe')
addfirstname(1,'Mexican','Felisardo')
addfirstname(1,'Mexican','Fermín')
addfirstname(1,'Mexican','Fico')
addfirstname(1,'Mexican','Fidel')
addfirstname(1,'Mexican','Fidencio')
addfirstname(1,'Mexican','Filiberto')
addfirstname(1,'Mexican','Floriano')
addfirstname(1,'Mexican','Focio')
addfirstname(1,'Mexican','Fraco')
addfirstname(1,'Mexican','Francisco')
addfirstname(1,'Mexican','Fraterno')
addfirstname(1,'Mexican','Frederico')
addfirstname(1,'Mexican','Fredo')
addfirstname(1,'Mexican','Galeaso')
addfirstname(1,'Mexican','Galeno')
addfirstname(1,'Mexican','Gallan')
addfirstname(1,'Mexican','Ganimedes')
addfirstname(1,'Mexican','García')
addfirstname(1,'Mexican','Genaro')
addfirstname(1,'Mexican','Gencio')
addfirstname(1,'Mexican','Geraldo')
addfirstname(1,'Mexican','Gerardo')
addfirstname(1,'Mexican','Gilberto')
addfirstname(1,'Mexican','Gonzalo')
addfirstname(1,'Mexican','Graciano')
addfirstname(1,'Mexican','Grau')
addfirstname(1,'Mexican','Grimoaldo')
addfirstname(1,'Mexican','Guillermo')
addfirstname(1,'Mexican','Gustavo')
addfirstname(1,'Mexican','Gutierre')
addfirstname(1,'Mexican','Hadrián')
addfirstname(1,'Mexican','Herculano')
addfirstname(1,'Mexican','Hilario')
addfirstname(1,'Mexican','Hipacio')
addfirstname(1,'Mexican','Hipolito')
addfirstname(1,'Mexican','Horacio')
addfirstname(1,'Mexican','Horado')
addfirstname(1,'Mexican','Huberto')
addfirstname(1,'Mexican','Iggy')
addfirstname(1,'Mexican','Ilidio')
addfirstname(1,'Mexican','Incencio')
addfirstname(1,'Mexican','Incendio')
addfirstname(1,'Mexican','Indíbil')
addfirstname(1,'Mexican','Isadoro')
addfirstname(1,'Mexican','Jade')
addfirstname(1,'Mexican','Jaguar')
addfirstname(1,'Mexican','Jaime')
addfirstname(1,'Mexican','Jair')
addfirstname(1,'Mexican','Jando')
addfirstname(1,'Mexican','Jenaro')
addfirstname(1,'Mexican','Joaquín')
addfirstname(1,'Mexican','Jorgeluis')
addfirstname(1,'Mexican','José')
addfirstname(1,'Mexican','Josealfredo')
addfirstname(1,'Mexican','Joseantonio')
addfirstname(1,'Mexican','Josemanuel')
addfirstname(1,'Mexican','Josué')
addfirstname(1,'Mexican','Juan')
addfirstname(1,'Mexican','Juanjose')
addfirstname(1,'Mexican','Julían')
addfirstname(1,'Mexican','Justino')
addfirstname(1,'Mexican','Karlos')
addfirstname(1,'Mexican','Líbano')
addfirstname(1,'Mexican','Laurencio')
addfirstname(1,'Mexican','Lelio')
addfirstname(1,'Mexican','Lerenzo')
addfirstname(1,'Mexican','Libanio')
addfirstname(1,'Mexican','Libio')
addfirstname(1,'Mexican','Lisandro')
addfirstname(1,'Mexican','Lobo')
addfirstname(1,'Mexican','Lorenzo')
addfirstname(1,'Mexican','Luís')
addfirstname(1,'Mexican','Lucío')
addfirstname(1,'Mexican','Luis')
addfirstname(1,'Mexican','Luisenrique')
addfirstname(1,'Mexican','Mío')
addfirstname(1,'Mexican','Macario')
addfirstname(1,'Mexican','Maceo')
addfirstname(1,'Mexican','Macerio')
addfirstname(1,'Mexican','Maciel')
addfirstname(1,'Mexican','Mancio')
addfirstname(1,'Mexican','Mango')
addfirstname(1,'Mexican','Marceliano')
addfirstname(1,'Mexican','Marius')
addfirstname(1,'Mexican','Martínez')
addfirstname(1,'Mexican','Martese')
addfirstname(1,'Mexican','Mateo')
addfirstname(1,'Mexican','Mattéo')
addfirstname(1,'Mexican','Mauricio')
addfirstname(1,'Mexican','Merlino')
addfirstname(1,'Mexican','Miguel')
addfirstname(1,'Mexican','Mique')
addfirstname(1,'Mexican','Mirco')
addfirstname(1,'Mexican','Moises')
addfirstname(1,'Mexican','Montego')
addfirstname(1,'Mexican','Montez')
addfirstname(1,'Mexican','Naldo')
addfirstname(1,'Mexican','Naolin')
addfirstname(1,'Mexican','Napier')
addfirstname(1,'Mexican','Natal')
addfirstname(1,'Mexican','Nelo')
addfirstname(1,'Mexican','Nemesio')
addfirstname(1,'Mexican','Neopolo')
addfirstname(1,'Mexican','Nero')
addfirstname(1,'Mexican','Neron')
addfirstname(1,'Mexican','Nesto')
addfirstname(1,'Mexican','Nestor')
addfirstname(1,'Mexican','Neto')
addfirstname(1,'Mexican','Nigel')
addfirstname(1,'Mexican','Niguel')
addfirstname(1,'Mexican','Noelino')
addfirstname(1,'Mexican','Norberto')
addfirstname(1,'Mexican','Oalo')
addfirstname(1,'Mexican','Octavio')
addfirstname(1,'Mexican','Oleos')
addfirstname(1,'Mexican','Olivero')
addfirstname(1,'Mexican','Oratio')
addfirstname(1,'Mexican','Ostiano')
addfirstname(1,'Mexican','Oswaldo')
addfirstname(1,'Mexican','Othello')
addfirstname(1,'Mexican','Ovidio')
addfirstname(1,'Mexican','Pancho')
addfirstname(1,'Mexican','Paquito')
addfirstname(1,'Mexican','Pascual')
addfirstname(1,'Mexican','Patricio')
addfirstname(1,'Mexican','Paz')
addfirstname(1,'Mexican','Pedro')
addfirstname(1,'Mexican','Pepe')
addfirstname(1,'Mexican','Perico')
addfirstname(1,'Mexican','Peyo')
addfirstname(1,'Mexican','Pilar')
addfirstname(1,'Mexican','Pilato')
addfirstname(1,'Mexican','Pio')
addfirstname(1,'Mexican','Pitin')
addfirstname(1,'Mexican','Plácido')
addfirstname(1,'Mexican','Porcio')
addfirstname(1,'Mexican','Porfirio')
addfirstname(1,'Mexican','Prudencio')
addfirstname(1,'Mexican','Pueblo')
addfirstname(1,'Mexican','Quintiliano')
addfirstname(1,'Mexican','Quito')
addfirstname(1,'Mexican','Rafael')
addfirstname(1,'Mexican','Ramírez')
addfirstname(1,'Mexican','Raymón')
addfirstname(1,'Mexican','Real')
addfirstname(1,'Mexican','Remedio')
addfirstname(1,'Mexican','Remegio')
addfirstname(1,'Mexican','Renzo')
addfirstname(1,'Mexican','Respicio')
addfirstname(1,'Mexican','Rey')
addfirstname(1,'Mexican','Reynaldo')
addfirstname(1,'Mexican','Ricardo')
addfirstname(1,'Mexican','Rio')
addfirstname(1,'Mexican','Riqui')
addfirstname(1,'Mexican','Rodolfo')
addfirstname(1,'Mexican','Rodrigo')
addfirstname(1,'Mexican','Rodriquez')
addfirstname(1,'Mexican','Rogelio')
addfirstname(1,'Mexican','Rolon')
addfirstname(1,'Mexican','Roman')
addfirstname(1,'Mexican','Romulus')
addfirstname(1,'Mexican','Salamon')
addfirstname(1,'Mexican','Salviano')
addfirstname(1,'Mexican','Santino')
addfirstname(1,'Mexican','Sarito')
addfirstname(1,'Mexican','Secundus')
addfirstname(1,'Mexican','Segundino')
addfirstname(1,'Mexican','Selesio')
addfirstname(1,'Mexican','Senon')
addfirstname(1,'Mexican','Sereno')
addfirstname(1,'Mexican','Servando')
addfirstname(1,'Mexican','Sidonio')
addfirstname(1,'Mexican','Siervo')
addfirstname(1,'Mexican','Silverio')
addfirstname(1,'Mexican','Simeón')
addfirstname(1,'Mexican','Socorro')
addfirstname(1,'Mexican','Stancio')
addfirstname(1,'Mexican','Tácito')
addfirstname(1,'Mexican','Taciano')
addfirstname(1,'Mexican','Tajo')
addfirstname(1,'Mexican','Tarsicio')
addfirstname(1,'Mexican','Tatius')
addfirstname(1,'Mexican','Tauro')
addfirstname(1,'Mexican','Tavió')
addfirstname(1,'Mexican','Teyo')
addfirstname(1,'Mexican','Tiago')
addfirstname(1,'Mexican','Ticiano')
addfirstname(1,'Mexican','Tino')
addfirstname(1,'Mexican','Topo')
addfirstname(1,'Mexican','Toro')
addfirstname(1,'Mexican','Turi')
addfirstname(1,'Mexican','Tutu')
addfirstname(1,'Mexican','Valentin')
addfirstname(1,'Mexican','Valien')
addfirstname(1,'Mexican','Valiente')
addfirstname(1,'Mexican','Valor')
addfirstname(1,'Mexican','Varian')
addfirstname(1,'Mexican','Ventura')
addfirstname(1,'Mexican','Vergil')
addfirstname(1,'Mexican','Victor')
addfirstname(1,'Mexican','Vidal')
addfirstname(1,'Mexican','Vidan')
addfirstname(1,'Mexican','Vincente')
addfirstname(1,'Mexican','Vito')
addfirstname(1,'Mexican','Viviano')
addfirstname(1,'Mexican','Vladimiro')
addfirstname(1,'Mexican','Waterio')
addfirstname(1,'Mexican','Xenaro')
addfirstname(1,'Mexican','Xeneroso')
addfirstname(1,'Mexican','Ximén')
addfirstname(1,'Mexican','Ximenes')
addfirstname(1,'Mexican','Xusto')
addfirstname(1,'Mexican','Yago')
addfirstname(1,'Mexican','Yobanis')
addfirstname(1,'Mexican','Zacarís')
addfirstname(0,'Mexican','Abrienda')
addfirstname(0,'Mexican','Adán')
addfirstname(0,'Mexican','Adana')
addfirstname(0,'Mexican','Agalia')
addfirstname(0,'Mexican','Alameda')
addfirstname(0,'Mexican','Alandra')
addfirstname(0,'Mexican','Aldana')
addfirstname(0,'Mexican','Aldonsa')
addfirstname(0,'Mexican','Alejandra')
addfirstname(0,'Mexican','Alejandrina')
addfirstname(0,'Mexican','Aleta')
addfirstname(0,'Mexican','Alma')
addfirstname(0,'Mexican','Almunda')
addfirstname(0,'Mexican','Aloise')
addfirstname(0,'Mexican','Alondra')
addfirstname(0,'Mexican','Alonzia')
addfirstname(0,'Mexican','Altamira')
addfirstname(0,'Mexican','Alva')
addfirstname(0,'Mexican','Alvarita')
addfirstname(0,'Mexican','Alvera')
addfirstname(0,'Mexican','Amaranta')
addfirstname(0,'Mexican','Amedia')
addfirstname(0,'Mexican','Anaclara')
addfirstname(0,'Mexican','Anatalia')
addfirstname(0,'Mexican','Ancarla')
addfirstname(0,'Mexican','Andeana')
addfirstname(0,'Mexican','Angelita')
addfirstname(0,'Mexican','Aniceta')
addfirstname(0,'Mexican','Antigua')
addfirstname(0,'Mexican','Antoliana')
addfirstname(0,'Mexican','Apolinaria')
addfirstname(0,'Mexican','Aracela')
addfirstname(0,'Mexican','Aracelis')
addfirstname(0,'Mexican','Arnalda')
addfirstname(0,'Mexican','Aryiola')
addfirstname(0,'Mexican','Ascensción')
addfirstname(0,'Mexican','Asela')
addfirstname(0,'Mexican','Atanasia')
addfirstname(0,'Mexican','Aurquena')
addfirstname(0,'Mexican','Basiana')
addfirstname(0,'Mexican','Bebe')
addfirstname(0,'Mexican','Belinda')
addfirstname(0,'Mexican','Benicio')
addfirstname(0,'Mexican','Benjamina')
addfirstname(0,'Mexican','Bienvenida')
addfirstname(0,'Mexican','Bonita')
addfirstname(0,'Mexican','Briselda')
addfirstname(0,'Mexican','Buena')
addfirstname(0,'Mexican','Camelia')
addfirstname(0,'Mexican','Camino')
addfirstname(0,'Mexican','Camiria')
addfirstname(0,'Mexican','Cancia')
addfirstname(0,'Mexican','Candeleria')
addfirstname(0,'Mexican','Carem')
addfirstname(0,'Mexican','Castana')
addfirstname(0,'Mexican','Catalin')
addfirstname(0,'Mexican','Catalina')
addfirstname(0,'Mexican','Celinda')
addfirstname(0,'Mexican','Chadelaria')
addfirstname(0,'Mexican','Chalina')
addfirstname(0,'Mexican','Chiquita')
addfirstname(0,'Mexican','Ciprina')
addfirstname(0,'Mexican','Cira')
addfirstname(0,'Mexican','Clemencia')
addfirstname(0,'Mexican','Clodina')
addfirstname(0,'Mexican','Cochiti')
addfirstname(0,'Mexican','Coco')
addfirstname(0,'Mexican','Conceta')
addfirstname(0,'Mexican','Conchita')
addfirstname(0,'Mexican','Corazon')
addfirstname(0,'Mexican','Corisande')
addfirstname(0,'Mexican','Cruz')
addfirstname(0,'Mexican','Cruzita')
addfirstname(0,'Mexican','Damacia')
addfirstname(0,'Mexican','Damiana')
addfirstname(0,'Mexican','Damicia')
addfirstname(0,'Mexican','Damita')
addfirstname(0,'Mexican','Danas')
addfirstname(0,'Mexican','Danita')
addfirstname(0,'Mexican','Delfina')
addfirstname(0,'Mexican','Delicias')
addfirstname(0,'Mexican','Delma')
addfirstname(0,'Mexican','Delmira')
addfirstname(0,'Mexican','Destina')
addfirstname(0,'Mexican','Devera')
addfirstname(0,'Mexican','Diamantina')
addfirstname(0,'Mexican','Diosa')
addfirstname(0,'Mexican','Dolores')
addfirstname(0,'Mexican','Doncia')
addfirstname(0,'Mexican','Donetta')
addfirstname(0,'Mexican','Donia')
addfirstname(0,'Mexican','Donila')
addfirstname(0,'Mexican','Donita')
addfirstname(0,'Mexican','Dorbeta')
addfirstname(0,'Mexican','Duella')
addfirstname(0,'Mexican','Duena')
addfirstname(0,'Mexican','Dulcina')
addfirstname(0,'Mexican','Eldora')
addfirstname(0,'Mexican','Eleadora')
addfirstname(0,'Mexican','Elenita')
addfirstname(0,'Mexican','Elysa')
addfirstname(0,'Mexican','Emeralda')
addfirstname(0,'Mexican','Eneida')
addfirstname(0,'Mexican','Epifana')
addfirstname(0,'Mexican','Eremita')
addfirstname(0,'Mexican','Erendira')
addfirstname(0,'Mexican','Especial')
addfirstname(0,'Mexican','Estebana')
addfirstname(0,'Mexican','Estefani')
addfirstname(0,'Mexican','Estella')
addfirstname(0,'Mexican','Etel')
addfirstname(0,'Mexican','Eumelia')
addfirstname(0,'Mexican','Eutimia')
addfirstname(0,'Mexican','Faqueza')
addfirstname(0,'Mexican','Farona')
addfirstname(0,'Mexican','Faustina')
addfirstname(0,'Mexican','Felica')
addfirstname(0,'Mexican','Finura')
addfirstname(0,'Mexican','Flores')
addfirstname(0,'Mexican','Florida')
addfirstname(0,'Mexican','Fuensanta')
addfirstname(0,'Mexican','Gaudencia')
addfirstname(0,'Mexican','Generosa')
addfirstname(0,'Mexican','Gimena')
addfirstname(0,'Mexican','Ginesa')
addfirstname(0,'Mexican','Gotzone')
addfirstname(0,'Mexican','Gracia')
addfirstname(0,'Mexican','Guillermina')
addfirstname(0,'Mexican','Guioma')
addfirstname(0,'Mexican','Heriberto')
addfirstname(0,'Mexican','Hermenegilda')
addfirstname(0,'Mexican','Honoratas')
addfirstname(0,'Mexican','Iberia')
addfirstname(0,'Mexican','Idoia')
addfirstname(0,'Mexican','Iluminada')
addfirstname(0,'Mexican','Immaculada')
addfirstname(0,'Mexican','Inocencia')
addfirstname(0,'Mexican','Inoceneia')
addfirstname(0,'Mexican','Isabel')
addfirstname(0,'Mexican','Isabelle')
addfirstname(0,'Mexican','Isona')
addfirstname(0,'Mexican','Itsel')
addfirstname(0,'Mexican','Izabella')
addfirstname(0,'Mexican','Izazcun')
addfirstname(0,'Mexican','Jacinta')
addfirstname(0,'Mexican','Jaden')
addfirstname(0,'Mexican','Jadie')
addfirstname(0,'Mexican','Jaedyn')
addfirstname(0,'Mexican','Jaimica')
addfirstname(0,'Mexican','Jaione')
addfirstname(0,'Mexican','Jaira')
addfirstname(0,'Mexican','Jameika')
addfirstname(0,'Mexican','Jamica')
addfirstname(0,'Mexican','Jayden')
addfirstname(0,'Mexican','Jaydon')
addfirstname(0,'Mexican','Josune')
addfirstname(0,'Mexican','Joyceta')
addfirstname(0,'Mexican','Juandalyn')
addfirstname(0,'Mexican','Juanita')
addfirstname(0,'Mexican','Julita')
addfirstname(0,'Mexican','Klarita')
addfirstname(0,'Mexican','Lali')
addfirstname(0,'Mexican','LaNeva')
addfirstname(0,'Mexican','Laurinda')
addfirstname(0,'Mexican','Laurita')
addfirstname(0,'Mexican','Lavinia')
addfirstname(0,'Mexican','Lindeza')
addfirstname(0,'Mexican','Lindy')
addfirstname(0,'Mexican','Lita')
addfirstname(0,'Mexican','Lorda')
addfirstname(0,'Mexican','Loreto')
addfirstname(0,'Mexican','Lucita')
addfirstname(0,'Mexican','Luisa')
addfirstname(0,'Mexican','Luvenia')
addfirstname(0,'Mexican','Lynda')
addfirstname(0,'Mexican','Lyndee')
addfirstname(0,'Mexican','Madra')
addfirstname(0,'Mexican','Madrona')
addfirstname(0,'Mexican','Maitana')
addfirstname(0,'Mexican','Maitane')
addfirstname(0,'Mexican','Maria del Mar')
addfirstname(0,'Mexican','Maricelia')
addfirstname(0,'Mexican','Marilin')
addfirstname(0,'Mexican','Marilu')
addfirstname(0,'Mexican','Mariquilla')
addfirstname(0,'Mexican','Marisel')
addfirstname(0,'Mexican','Marisol')
addfirstname(0,'Mexican','Marisol')
addfirstname(0,'Mexican','Marquilla')
addfirstname(0,'Mexican','Marquita')
addfirstname(0,'Mexican','Mejorana')
addfirstname(0,'Mexican','Melaida')
addfirstname(0,'Mexican','Milagros')
addfirstname(0,'Mexican','Miquela')
addfirstname(0,'Mexican','Monita')
addfirstname(0,'Mexican','Montana')
addfirstname(0,'Mexican','Nachine')
addfirstname(0,'Mexican','Naiara')
addfirstname(0,'Mexican','Naolin')
addfirstname(0,'Mexican','Natividad')
addfirstname(0,'Mexican','Neena')
addfirstname(0,'Mexican','Nelia')
addfirstname(0,'Mexican','Neva')
addfirstname(0,'Mexican','Nevada')
addfirstname(0,'Mexican','Nevara')
addfirstname(0,'Mexican','Neves')
addfirstname(0,'Mexican','Nila')
addfirstname(0,'Mexican','Nilda')
addfirstname(0,'Mexican','Nunila')
addfirstname(0,'Mexican','Octaviana')
addfirstname(0,'Mexican','Orieta')
addfirstname(0,'Mexican','Osane')
addfirstname(0,'Mexican','Osita')
addfirstname(0,'Mexican','Paca')
addfirstname(0,'Mexican','Paladia')
addfirstname(0,'Mexican','Paloma')
addfirstname(0,'Mexican','Paloma')
addfirstname(0,'Mexican','Pancha')
addfirstname(0,'Mexican','Paz')
addfirstname(0,'Mexican','Piedad')
addfirstname(0,'Mexican','Pilar')
addfirstname(0,'Mexican','Presta')
addfirstname(0,'Mexican','Prima')
addfirstname(0,'Mexican','Providencia')
addfirstname(0,'Mexican','Purisima')
addfirstname(0,'Mexican','Querida')
addfirstname(0,'Mexican','Raa')
addfirstname(0,'Mexican','Raeca')
addfirstname(0,'Mexican','Ramona')
addfirstname(0,'Mexican','Real')
addfirstname(0,'Mexican','Reseda')
addfirstname(0,'Mexican','Reya')
addfirstname(0,'Mexican','Richa')
addfirstname(0,'Mexican','Rigel')
addfirstname(0,'Mexican','Roja')
addfirstname(0,'Mexican','Roldana')
addfirstname(0,'Mexican','Rosalba')
addfirstname(0,'Mexican','Rosalin')
addfirstname(0,'Mexican','Rosalina')
addfirstname(0,'Mexican','Rosamira')
addfirstname(0,'Mexican','Rosamund')
addfirstname(0,'Mexican','Roseline')
addfirstname(0,'Mexican','Rosetta')
addfirstname(0,'Mexican','Rosilla')
addfirstname(0,'Mexican','Rosmaria')
addfirstname(0,'Mexican','Rozolia')
addfirstname(0,'Mexican','Ruana')
addfirstname(0,'Mexican','Sabria')
addfirstname(0,'Mexican','Sagrario')
addfirstname(0,'Mexican','Salbatora')
addfirstname(0,'Mexican','Sancia')
addfirstname(0,'Mexican','Sandia')
addfirstname(0,'Mexican','Sarita')
addfirstname(0,'Mexican','Sata')
addfirstname(0,'Mexican','Saturnina')
addfirstname(0,'Mexican','Senona')
addfirstname(0,'Mexican','Sephira')
addfirstname(0,'Mexican','Serafia')
addfirstname(0,'Mexican','Serrana')
addfirstname(0,'Mexican','Silveria')
addfirstname(0,'Mexican','Simona')
addfirstname(0,'Mexican','Sirena')
addfirstname(0,'Mexican','Solana')
addfirstname(0,'Mexican','Soledad')
addfirstname(0,'Mexican','Soledada')
addfirstname(0,'Mexican','Somona')
addfirstname(0,'Mexican','Sonora')
addfirstname(0,'Mexican','Sonsoles')
addfirstname(0,'Mexican','Suela')
addfirstname(0,'Mexican','Susanita')
addfirstname(0,'Mexican','Teena')
addfirstname(0,'Mexican','Teodomira')
addfirstname(0,'Mexican','Tercera')
addfirstname(0,'Mexican','Terciera')
addfirstname(0,'Mexican','Tobiana')
addfirstname(0,'Mexican','Toya')
addfirstname(0,'Mexican','Trena')
addfirstname(0,'Mexican','Trenita')
addfirstname(0,'Mexican','Trevia')
addfirstname(0,'Mexican','Tsivya')
addfirstname(0,'Mexican','Unida')
addfirstname(0,'Mexican','Usoa')
addfirstname(0,'Mexican','Valencia')
addfirstname(0,'Mexican','Vanda')
addfirstname(0,'Mexican','Verdad')
addfirstname(0,'Mexican','Vianca')
addfirstname(0,'Mexican','Vida')
addfirstname(0,'Mexican','Vittoria')
addfirstname(0,'Mexican','Xalbadora')
addfirstname(0,'Mexican','Xaviera')
addfirstname(0,'Mexican','Xeveria')
addfirstname(0,'Mexican','Ximena')
addfirstname(0,'Mexican','Yomaris')
addfirstname(0,'Mexican','Ysabel')
addfirstname(0,'Mexican','Zana')
addfirstname(0,'Mexican','Zaneta')
addfirstname(0,'Mexican','Zaviera')
addfirstname(0,'Mexican','Zobeida')
addfirstname(0,'Mexican','Zoila')
addfirstname(0,'Mexican','Zulmara')
addlastname('Mexican','Aguilar')
addlastname('Mexican','Flores')
addlastname('Mexican','Lopez')
addlastname('Mexican','Ortiz')
addlastname('Mexican','Ruiz')
addlastname('Mexican','Alvarez')
addlastname('Mexican','Garcia')
addlastname('Mexican','Martinez')
addlastname('Mexican','Pena')
addlastname('Mexican','Salazar')
addlastname('Mexican','Castillo')
addlastname('Mexican','Garza')
addlastname('Mexican','Medina')
addlastname('Mexican','Perez')
addlastname('Mexican','Sanchez')
addlastname('Mexican','Castro')
addlastname('Mexican','Gomez')
addlastname('Mexican','Mendez')
addlastname('Mexican','Ramirez')
addlastname('Mexican','Santiago')
addlastname('Mexican','Chavez')
addlastname('Mexican','Gonzales')
addlastname('Mexican','Mendoza')
addlastname('Mexican','Ramos')
addlastname('Mexican','Soto')
addlastname('Mexican','Cruz')
addlastname('Mexican','Gutierrez')
addlastname('Mexican','Morales')
addlastname('Mexican','Reyes')
addlastname('Mexican','Torres')
addlastname('Mexican','Delgado')
addlastname('Mexican','Guzman')
addlastname('Mexican','Moreno')
addlastname('Mexican','Rios')
addlastname('Mexican','Valdez')
addlastname('Mexican','Diaz')
addlastname('Mexican','Hernandez')
addlastname('Mexican','Munoz')
addlastname('Mexican','Rivera')
addlastname('Mexican','Vargas')
addlastname('Mexican','Espinoza')
addlastname('Mexican','Hierra')
addlastname('Mexican','Nunez')
addlastname('Mexican','Rodriguez')
addlastname('Mexican','Vasquez')
addlastname('Mexican','Fernandez')
addlastname('Mexican','Jiminez')
addlastname('Mexican','Ortega')
addlastname('Mexican','Romero')
addlastname('Mexican','Vega')
addlastname('German','Aachen')
addlastname('German','Ackerman')
addlastname('German','Ackermann')
addlastname('German','Adler')
addlastname('German','Amsel')
addlastname('German','Austerlitz')
addlastname('German','Bach')
addlastname('German','Bachmeier')
addlastname('German','Bader')
addlastname('German','Baader')
addlastname('German','Baecker')
addlastname('German','Becker')
addlastname('German','Baer')
addlastname('German','Bar')
addlastname('German','Barth')
addlastname('German','Bauer')
addlastname('German','Baum')
addlastname('German','Baumgaertner')
addlastname('German','Baumgartner')
addlastname('German','Bumgarner')
addlastname('German','Bayer')
addlastname('German','Baier')
addlastname('German','Beyer')
addlastname('German','Beckenbauer')
addlastname('German','Beich')
addlastname('German','Beike')
addlastname('German','Berg')
addlastname('German','Bergmann')
addlastname('German','Bieber')
addlastname('German','Biermann')
addlastname('German','Blau')
addlastname('German','Boehm')
addlastname('German','Bohm')
addlastname('German','Brandt')
addlastname('German','Brauer')
addlastname('German','Braun')
addlastname('German','Bürger')
addlastname('German','Burger')
addlastname('German','Busch')
addlastname('German','Bosch')
addlastname('German','Daecher')
addlastname('German','Decker')
addlastname('German','Diederich')
addlastname('German','Dietrich')
addlastname('German','Drechsler')
addlastname('German','Dreher')
addlastname('German','Dresdner')
addlastname('German','Dresner')
addlastname('German','Drescher')
addlastname('German','Duerr')
addlastname('German','Durr')
addlastname('German','Ebersbach')
addlastname('German','Ebersbacher')
addlastname('German','Eberhardt')
addlastname('German','Eberhart')
addlastname('German','Eichel')
addlastname('German','Eichelberger')
addlastname('German','Eichmann')
addlastname('German','Ehrlichmann')
addlastname('German','Eiffel')
addlastname('German','Eisenberg')
addlastname('German','Eisenhauer')
addlastname('German','Egger')
addlastname('German','Eggers')
addlastname('German','Engel')
addlastname('German','Faber')
addlastname('German','Faerber')
addlastname('German','Farber')
addlastname('German','Fassbinder')
addlastname('German','Faust')
addlastname('German','Feierabend')
addlastname('German','Fenstermacher')
addlastname('German','Fiedler')
addlastname('German','Fink')
addlastname('German','Finkel')
addlastname('German','Fischer')
addlastname('German','Fisher')
addlastname('German','Fleischer')
addlastname('German','Foerster')
addlastname('German','Frankfurter')
addlastname('German','Frei')
addlastname('German','Frey')
addlastname('German','Freitag')
addlastname('German','Freytag')
addlastname('German','Freud')
addlastname('German','Fried')
addlastname('German','Friedmann')
addlastname('German','Friedman')
addlastname('German','Frueh')
addlastname('German','Freeh')
addlastname('German','Fruehauf')
addlastname('German','Fuchs')
addlastname('German','Fuerst')
addlastname('German','Furst')
addlastname('German','Fuhrmann')
addlastname('German','Gaertner')
addlastname('German','Gärtner')
addlastname('German','Gerber')
addlastname('German','Gerste')
addlastname('German','Gersten')
addlastname('German','Gloeckner')
addlastname('German','Glockner')
addlastname('German','Goldschmidt')
addlastname('German','Gottlieb')
addlastname('German','Gottschalk')
addlastname('German','Gruenewald')
addlastname('German','Grunewald')
addlastname('German','Grunwald')
addlastname('German','Hahn')
addlastname('German','Herrmann')
addlastname('German','Herman')
addlastname('German','Hertz')
addlastname('German','Herz')
addlastname('German','Hertzog')
addlastname('German','Herzog')
addlastname('German','Hirsch')
addlastname('German','Hoch')
addlastname('German','Hoffmann')
addlastname('German','Hofmann')
addlastname('German','Holtzmann')
addlastname('German','Holzman')
addlastname('German','Hueber')
addlastname('German','Huber')
addlastname('German','Hoover')
addlastname('German','Jaeger')
addlastname('German','Jager')
addlastname('German','Jung')
addlastname('German','Junker')
addlastname('German','Kaiser')
addlastname('German','Kalb')
addlastname('German','Kaestner')
addlastname('German','Kastner')
addlastname('German','Kappel')
addlastname('German','Kaufmann')
addlastname('German','Keller')
addlastname('German','Kirsch')
addlastname('German','Klein')
addlastname('German','Klug')
addlastname('German','Kluge')
addlastname('German','Koch')
addlastname('German','Kohl')
addlastname('German','Kohler')
addlastname('German','Koehler')
addlastname('German','Koenig')
addlastname('German','Konig')
addlastname('German','Krause')
addlastname('German','Krueger')
addlastname('German','Kruger')
addlastname('German','Kuefer')
addlastname('German','Kuester')
addlastname('German','Kuster')
addlastname('German','Kuhn')
addlastname('German','Kunze')
addlastname('German','Koertig')
addlastname('German','Kortig')
addlastname('German','Lang')
addlastname('German','Lehmann')
addlastname('German','Lehrer')
addlastname('German','Loewe')
addlastname('German','Lowe')
addlastname('German','Luft')
addlastname('German','Mahler')
addlastname('German','Mehler')
addlastname('German','Maier')
addlastname('German','Meier')
addlastname('German','Meyer')
addlastname('German','Mauer')
addlastname('German','Maurer')
addlastname('German','Meister')
addlastname('German','Metzger')
addlastname('German','Meier')
addlastname('German','Meyer')
addlastname('German','Maier')
addlastname('German','Mueller')
addlastname('German','Muller')
addlastname('German','Moench')
addlastname('German','Muench')
addlastname('German','Nacht')
addlastname('German','Nadel')
addlastname('German','Nagel')
addlastname('German','Naumann')
addlastname('German','Neumann')
addlastname('German','Neudorf')
addlastname('German','Neustadt')
addlastname('German','Nussbaum')
addlastname('German','Oster')
addlastname('German','Osterhagen')
addlastname('German','Ostermann')
addlastname('German','Pabst')
addlastname('German','Papst')
addlastname('German','Pfaff')
addlastname('German','Pfeffer')
addlastname('German','Pfeifer')
addlastname('German','Pfeiffer')
addlastname('German','Probst')
addlastname('German','Propst')
addlastname('German','Reinhard')
addlastname('German','Reinhardt')
addlastname('German','Reiniger')
addlastname('German','Richter')
addlastname('German','Ritter')
addlastname('German','Roth')
addlastname('German','Rothschild')
addlastname('German','Rothstein')
addlastname('German','Saenger')
addlastname('German','Sanger')
addlastname('German','Sankt')
addlastname('German','Schäfer')
addlastname('German','Schaefer')
addlastname('German','Scherer')
addlastname('German','Schiffer')
addlastname('German','Schmidt')
addlastname('German','Schmitt')
addlastname('German','Schneider')
addlastname('German','Scholz')
addlastname('German','Schulze')
addlastname('German','Schreiber')
addlastname('German','Schreiner')
addlastname('German','Schroeder')
addlastname('German','Schroder')
addlastname('German','Schuhmacher')
addlastname('German','Schultheiss')
addlastname('German','Schultz')
addlastname('German','Schulz')
addlastname('German','Schulze')
addlastname('German','Scholz')
addlastname('German','Schuster')
addlastname('German','Shuster')
addlastname('German','Schwab')
addlastname('German','Schwartz')
addlastname('German','Schwarz')
addlastname('German','Schweitzer')
addlastname('German','Schweizer')
addlastname('German','Seiler')
addlastname('German','Sommer')
addlastname('German','Strauss')
addlastname('German','Thalberg')
addlastname('German','Theiss')
addlastname('German','Theissen')
addlastname('German','Traugott')
addlastname('German','Trommler')
addlastname('German','Unger')
addlastname('German','Urner')
addlastname('German','Vogel')
addlastname('German','Vogler')
addlastname('German','Vogt')
addlastname('German','Waechter')
addlastname('German','Wagner')
addlastname('German','Wannemaker')
addlastname('German','Weber')
addlastname('German','Wechsler')
addlastname('German','Wexler')
addlastname('German','Weiss')
addlastname('German','Weisz')
addlastname('German','Weissmuller')
addlastname('German','Werfel')
addlastname('German','Wurfel')
addlastname('German','Winkel')
addlastname('German','Wirth')
addlastname('German','Wirtz')
addlastname('German','Wolf')
addlastname('German','Wulf')
addlastname('German','Wurfel')
addlastname('German','Werfel')
addlastname('German','Ziegler')
addlastname('German','Zimmer')
addlastname('German','Zimmermann')
addlastname('German','Zimmerman')
addlastname('German','Zweig')
addfirstname(1,'German','Alexander')
addfirstname(1,'German','André')
addfirstname(1,'German','Andreas')
addfirstname(1,'German','Benjamin')
addfirstname(1,'German','Björn')
addfirstname(1,'German','Christian')
addfirstname(1,'German','Christoph')
addfirstname(1,'German','Christopher')
addfirstname(1,'German','Daniel')
addfirstname(1,'German','David')
addfirstname(1,'German','Dennis')
addfirstname(1,'German','Dirk')
addfirstname(1,'German','Dominik')
addfirstname(1,'German','Erich')
addfirstname(1,'German','Fabian')
addfirstname(1,'German','Felix')
addfirstname(1,'German','Florian')
addfirstname(1,'German','Frank')
addfirstname(1,'German','Georg')
addfirstname(1,'German','Gerd')
addfirstname(1,'German','Gert')
addfirstname(1,'German','Hans')
addfirstname(1,'German','Harald')
addfirstname(1,'German','Heiko')
addfirstname(1,'German','Heinz')
addfirstname(1,'German','Helmut')
addfirstname(1,'German','Herbert')
addfirstname(1,'German','Holger')
addfirstname(1,'German','Jakob')
addfirstname(1,'German','Jan')
addfirstname(1,'German','Jannik')
addfirstname(1,'German','Jens')
addfirstname(1,'German','Joachim')
addfirstname(1,'German','Johannes')
addfirstname(1,'German','Jonas')
addfirstname(1,'German','Jonathan')
addfirstname(1,'German','Jörg')
addfirstname(1,'German','Julian')
addfirstname(1,'German','Jürgen')
addfirstname(1,'German','Kai')
addfirstname(1,'German','Karl')
addfirstname(1,'German','Karsten')
addfirstname(1,'German','Klaus')
addfirstname(1,'German','Kurt')
addfirstname(1,'German','Lars')
addfirstname(1,'German','Lennart')
addfirstname(1,'German','Leon')
addfirstname(1,'German','Lukas')
addfirstname(1,'German','Manfred')
addfirstname(1,'German','Marcel')
addfirstname(1,'German','Marc')
addfirstname(1,'German','Marco')
addfirstname(1,'German','Markus')
addfirstname(1,'German','Martin')
addfirstname(1,'German','Marvin')
addfirstname(1,'German','Matthias')
addfirstname(1,'German','Max')
addfirstname(1,'German','Maximilian')
addfirstname(1,'German','Michael')
addfirstname(1,'German','Moritz')
addfirstname(1,'German','Nico')
addfirstname(1,'German','Niklas')
addfirstname(1,'German','Nils')
addfirstname(1,'German','Norbert')
addfirstname(1,'German','Olaf')
addfirstname(1,'German','Oliver')
addfirstname(1,'German','Oskar')
addfirstname(1,'German','Pascal')
addfirstname(1,'German','Patrick')
addfirstname(1,'German','Paul')
addfirstname(1,'German','Peter')
addfirstname(1,'German','Philipp')
addfirstname(1,'German','Rainer')
addfirstname(1,'German','Ralf')
addfirstname(1,'German','Reinhard')
addfirstname(1,'German','René')
addfirstname(1,'German','Richard')
addfirstname(1,'German','Robert')
addfirstname(1,'German','Robin')
addfirstname(1,'German','Rolf')
addfirstname(1,'German','Rüdiger')
addfirstname(1,'German','Rudolf')
addfirstname(1,'German','Sascha')
addfirstname(1,'German','Sebastian')
addfirstname(1,'German','Siegfried')
addfirstname(1,'German','Simon')
addfirstname(1,'German','Stefan')
addfirstname(1,'German','Sven')
addfirstname(1,'German','Theodor')
addfirstname(1,'German','Thomas')
addfirstname(1,'German','Tim')
addfirstname(1,'German','Timo')
addfirstname(1,'German','Tobias')
addfirstname(1,'German','Torsten')
addfirstname(1,'German','Ulrich')
addfirstname(1,'German','Uwe')
addfirstname(1,'German','Volker')
addfirstname(1,'German','Walter')
addfirstname(1,'German','Werner')
addfirstname(1,'German','Wilfried')
addfirstname(1,'German','Wilhelm')
addfirstname(1,'German','Willi')
addfirstname(1,'German','Wolfgang')
addfirstname(0,'German','Alexandra')
addfirstname(0,'German','Alina')
addfirstname(0,'German','Andrea')
addfirstname(0,'German','Angela')
addfirstname(0,'German','Angelika')
addfirstname(0,'German','Anja')
addfirstname(0,'German','Anke')
addfirstname(0,'German','Ann')
addfirstname(0,'German','Anna')
addfirstname(0,'German','Anne')
addfirstname(0,'German','Annemarie')
addfirstname(0,'German','Annika')
addfirstname(0,'German','Antonia')
addfirstname(0,'German','Astrid')
addfirstname(0,'German','Barbara')
addfirstname(0,'German','Bärbel')
addfirstname(0,'German','Beate')
addfirstname(0,'German','Berta')
addfirstname(0,'German','Bettina')
addfirstname(0,'German','Bianca')
addfirstname(0,'German','Birgit')
addfirstname(0,'German','Brigitte')
addfirstname(0,'German','Britta')
addfirstname(0,'German','Carla')
addfirstname(0,'German','Caroline')
addfirstname(0,'German','Celina')
addfirstname(0,'German','Celine')
addfirstname(0,'German','Charlotte')
addfirstname(0,'German','Chiara')
addfirstname(0,'German','Christa')
addfirstname(0,'German','Christel')
addfirstname(0,'German','Christiane')
addfirstname(0,'German','Christina')
addfirstname(0,'German','Christine')
addfirstname(0,'German','Claudia')
addfirstname(0,'German','Cornelia')
addfirstname(0,'German','Dagmar')
addfirstname(0,'German','Daniela')
addfirstname(0,'German','Dora')
addfirstname(0,'German','Doris')
addfirstname(0,'German','Dorothea')
addfirstname(0,'German','Edith')
addfirstname(0,'German','Elfriede')
addfirstname(0,'German','Elisabeth')
addfirstname(0,'German','Elise')
addfirstname(0,'German','Elke')
addfirstname(0,'German','Ella')
addfirstname(0,'German','Elli')
addfirstname(0,'German','Elsa')
addfirstname(0,'German','Else')
addfirstname(0,'German','Emilie')
addfirstname(0,'German','Emma')
addfirstname(0,'German','Emmi')
addfirstname(0,'German','Erika')
addfirstname(0,'German','Erna')
addfirstname(0,'German','Eva')
addfirstname(0,'German','Franziska')
addfirstname(0,'German','Frieda')
addfirstname(0,'German','Gabriele')
addfirstname(0,'German','Gerda')
addfirstname(0,'German','Gertrud')
addfirstname(0,'German','Gisela')
addfirstname(0,'German','Grete')
addfirstname(0,'German','Gudrun')
addfirstname(0,'German','Hannah')
addfirstname(0,'German','Hannelore')
addfirstname(0,'German','Hedwig')
addfirstname(0,'German','Heidemarie')
addfirstname(0,'German','Heidi')
addfirstname(0,'German','Heike')
addfirstname(0,'German','Helene')
addfirstname(0,'German','Helga')
addfirstname(0,'German','Herta')
addfirstname(0,'German','Hildegard')
addfirstname(0,'German','Ida')
addfirstname(0,'German','Ilse')
addfirstname(0,'German','Inge')
addfirstname(0,'German','Ingeborg')
addfirstname(0,'German','Ingrid')
addfirstname(0,'German','Irma')
addfirstname(0,'German','Irmgard')
addfirstname(0,'German','Isabel')
addfirstname(0,'German','Jacqueline')
addfirstname(0,'German','Jana')
addfirstname(0,'German','Janina')
addfirstname(0,'German','Janine')
addfirstname(0,'German','Jasmin')
addfirstname(0,'German','Jennifer')
addfirstname(0,'German','Jessica')
addfirstname(0,'German','Johanna')
addfirstname(0,'German','Jule')
addfirstname(0,'German','Julia')
addfirstname(0,'German','Jutta')
addfirstname(0,'German','Karin')
addfirstname(0,'German','Katharina')
addfirstname(0,'German','Käthe')
addfirstname(0,'German','Katja')
addfirstname(0,'German','Katrin')
addfirstname(0,'German','Kerstin')
addfirstname(0,'German','Kim')
addfirstname(0,'German','Kirsten')
addfirstname(0,'German','Klara')
addfirstname(0,'German','Kristin')
addfirstname(0,'German','Lara')
addfirstname(0,'German','Laura')
addfirstname(0,'German','Lea')
addfirstname(0,'German','Lena')
addfirstname(0,'German','Leonie')
addfirstname(0,'German','Lieselotte')
addfirstname(0,'German','Lilli')
addfirstname(0,'German','Lina')
addfirstname(0,'German','Lisa')
addfirstname(0,'German','Luisa')
addfirstname(0,'German','Luise')
addfirstname(0,'German','Maike')
addfirstname(0,'German','Maja')
addfirstname(0,'German','Manuela')
addfirstname(0,'German','Margareta')
addfirstname(0,'German','Margarete')
addfirstname(0,'German','Margot')
addfirstname(0,'German','Margret')
addfirstname(0,'German','Margrit')
addfirstname(0,'German','Maria')
addfirstname(0,'German','Marianne')
addfirstname(0,'German','Marie')
addfirstname(0,'German','Marion')
addfirstname(0,'German','Marlies')
addfirstname(0,'German','Martha')
addfirstname(0,'German','Martina')
addfirstname(0,'German','Melanie')
addfirstname(0,'German','Melina')
addfirstname(0,'German','Merle')
addfirstname(0,'German','Meta')
addfirstname(0,'German','Mia')
addfirstname(0,'German','Michaela')
addfirstname(0,'German','Michelle')
addfirstname(0,'German','Minna')
addfirstname(0,'German','Monika')
addfirstname(0,'German','Nadine')
addfirstname(0,'German','Natalie')
addfirstname(0,'German','Nele')
addfirstname(0,'German','Nicole')
addfirstname(0,'German','Nina')
addfirstname(0,'German','Olga')
addfirstname(0,'German','Paula')
addfirstname(0,'German','Petra')
addfirstname(0,'German','Pia')
addfirstname(0,'German','Regina')
addfirstname(0,'German','Renate')
addfirstname(0,'German','Rita')
addfirstname(0,'German','Rosemarie')
addfirstname(0,'German','Ruth')
addfirstname(0,'German','Sabine')
addfirstname(0,'German','Sabrina')
addfirstname(0,'German','Sandra')
addfirstname(0,'German','Sarah')
addfirstname(0,'German','Saskia')
addfirstname(0,'German','Sigrid')
addfirstname(0,'German','Silke')
addfirstname(0,'German','Silvia')
addfirstname(0,'German','Simone')
addfirstname(0,'German','Sonja')
addfirstname(0,'German','Sophia')
addfirstname(0,'German','Sophie')
addfirstname(0,'German','Stefanie')
addfirstname(0,'German','Susanne')
addfirstname(0,'German','Svenja')
addfirstname(0,'German','Tanja')
addfirstname(0,'German','Ulrike')
addfirstname(0,'German','Ursula')
addfirstname(0,'German','Ute')
addfirstname(0,'German','Vanessa')
addfirstname(0,'German','Vera')
addfirstname(0,'German','Waltraud')
addfirstname(0,'German','Yvonne')
addfirstname(1,'French','Thomas')
addfirstname(1,'French','Quentin')
addfirstname(1,'French','Alexandre')
addfirstname(1,'French','Maxime')
addfirstname(1,'French','Nicolas')
addfirstname(1,'French','Lucas')
addfirstname(1,'French','Antoine')
addfirstname(1,'French','Clément')
addfirstname(1,'French','Hugo')
addfirstname(1,'French','Valentin')
addfirstname(1,'French','Julien')
addfirstname(1,'French','Alexis')
addfirstname(1,'French','Théo')
addfirstname(1,'French','Romain')
addfirstname(1,'French','Dylan')
addfirstname(1,'French','Florian')
addfirstname(1,'French','Kevin')
addfirstname(1,'French','Guillaume')
addfirstname(1,'French','Pierre')
addfirstname(1,'French','Anthony')
addfirstname(1,'French','Benjamin')
addfirstname(1,'French','Corentin')
addfirstname(1,'French','Louis')
addfirstname(1,'French','Adrien')
addfirstname(1,'French','Paul')
addfirstname(1,'French','Vincent')
addfirstname(1,'French','Baptiste')
addfirstname(1,'French','Mathieu')
addfirstname(1,'French','Nathan')
addfirstname(1,'French','Jérémy')
addfirstname(1,'French','Axel')
addfirstname(1,'French','Arthur')
addfirstname(1,'French','Victor')
addfirstname(1,'French','Léo')
addfirstname(1,'French','Jordan')
addfirstname(1,'French','Bastien')
addfirstname(1,'French','Rémi')
addfirstname(1,'French','Loïc')
addfirstname(1,'French','Damien')
addfirstname(1,'French','Simon')
addfirstname(1,'French','Raphaël')
addfirstname(1,'French','Enzo')
addfirstname(1,'French','Thibault')
addfirstname(1,'French','Matthieu')
addfirstname(1,'French','Tom')
addfirstname(1,'French','Sébastien')
addfirstname(1,'French','Aurélien')
addfirstname(1,'French','Maxence')
addfirstname(1,'French','Steven')
addfirstname(1,'French','Dorian')
addfirstname(1,'French','Tristan')
addfirstname(1,'French','Fabien')
addfirstname(1,'French','William')
addfirstname(1,'French','Arnaud')
addfirstname(1,'French','David')
addfirstname(1,'French','Samuel')
addfirstname(1,'French','Jonathan')
addfirstname(1,'French','Martin')
addfirstname(1,'French','Robin')
addfirstname(1,'French','Yann')
addfirstname(1,'French','Jules')
addfirstname(1,'French','Bryan')
addfirstname(1,'French','Gaëtan')
addfirstname(1,'French','François')
addfirstname(1,'French','Benoît')
addfirstname(1,'French','Charles')
addfirstname(1,'French','Mickaël')
addfirstname(1,'French','Allan')
addfirstname(1,'French','Tanguy')
addfirstname(1,'French','Florent')
addfirstname(1,'French','Christopher')
addfirstname(1,'French','Mohamed')
addfirstname(1,'French','Gabriel')
addfirstname(1,'French','Thibaut')
addfirstname(1,'French','Cédric')
addfirstname(1,'French','Mehdi')
addfirstname(1,'French','Killian')
addfirstname(1,'French','Rémy')
addfirstname(1,'French','Mathis')
addfirstname(1,'French','Yanis')
addfirstname(1,'French','Jason')
addfirstname(1,'French','Erwan')
addfirstname(1,'French','Jean')
addfirstname(1,'French','Ludovic')
addfirstname(1,'French','Etienne')
addfirstname(1,'French','Grégory')
addfirstname(1,'French','Tony')
addfirstname(1,'French','Alex')
addfirstname(1,'French','Dimitri')
addfirstname(1,'French','Lilian')
addfirstname(1,'French','Cyril')
addfirstname(1,'French','Alan')
addfirstname(1,'French','Sylvain')
addfirstname(1,'French','Brandon')
addfirstname(1,'French','Jean-Baptiste')
addfirstname(1,'French','Grégoire')
addfirstname(1,'French','Mathias')
addfirstname(1,'French','Antonin')
addfirstname(1,'French','Yoann')
addfirstname(1,'French','Morgan')
addfirstname(0,'French','Léa')
addfirstname(0,'French','Manon')
addfirstname(0,'French','Camille')
addfirstname(0,'French','Marie')
addfirstname(0,'French','Chloé')
addfirstname(0,'French','Océane')
addfirstname(0,'French','Laura')
addfirstname(0,'French','Sarah')
addfirstname(0,'French','Pauline')
addfirstname(0,'French','Marine')
addfirstname(0,'French','Mathilde')
addfirstname(0,'French','Julie')
addfirstname(0,'French','Emma')
addfirstname(0,'French','Lucie')
addfirstname(0,'French','Anaïs')
addfirstname(0,'French','Justine')
addfirstname(0,'French','Marion')
addfirstname(0,'French','Morgane')
addfirstname(0,'French','Ines')
addfirstname(0,'French','Charlotte')
addfirstname(0,'French','Mélanie')
addfirstname(0,'French','Mélissa')
addfirstname(0,'French','Clara')
addfirstname(0,'French','Juliette')
addfirstname(0,'French','Emilie')
addfirstname(0,'French','Lisa')
addfirstname(0,'French','Amélie')
addfirstname(0,'French','Amandine')
addfirstname(0,'French','Margaux')
addfirstname(0,'French','Elodie')
addfirstname(0,'French','Clémence')
addfirstname(0,'French','Maeva')
addfirstname(0,'French','Noémie')
addfirstname(0,'French','Eva')
addfirstname(0,'French','Louise')
addfirstname(0,'French','Elisa')
addfirstname(0,'French','Claire')
addfirstname(0,'French','Audrey')
addfirstname(0,'French','Célia')
addfirstname(0,'French','Alice')
addfirstname(0,'French','Romane')
addfirstname(0,'French','Margot')
addfirstname(0,'French','Estelle')
addfirstname(0,'French','Coralie')
addfirstname(0,'French','Elise')
addfirstname(0,'French','Valentine')
addfirstname(0,'French','Alexia')
addfirstname(0,'French','Fanny')
addfirstname(0,'French','Céline')
addfirstname(0,'French','Laurine')
addfirstname(0,'French','Caroline')
addfirstname(0,'French','Solène')
addfirstname(0,'French','Alicia')
addfirstname(0,'French','Agathe')
addfirstname(0,'French','Ophélie')
addfirstname(0,'French','Coline')
addfirstname(0,'French','Sophie')
addfirstname(0,'French','Aurélie')
addfirstname(0,'French','Emeline')
addfirstname(0,'French','Laurie')
addfirstname(0,'French','Alexandra')
addfirstname(0,'French','Jeanne')
addfirstname(0,'French','Andréa')
addfirstname(0,'French','Jade')
addfirstname(0,'French','Victoria')
addfirstname(0,'French','Carla')
addfirstname(0,'French','Laetitia')
addfirstname(0,'French','Axelle')
addfirstname(0,'French','Cassandra')
addfirstname(0,'French','Zoe')
addfirstname(0,'French','Jessica')
addfirstname(0,'French','Julia')
addfirstname(0,'French','Lola')
addfirstname(0,'French','Ludivine')
addfirstname(0,'French','Aurore')
addfirstname(0,'French','Salomé')
addfirstname(0,'French','Marina')
addfirstname(0,'French','Charlène')
addfirstname(0,'French','Laure')
addfirstname(0,'French','Lena')
addfirstname(0,'French','Maelle')
addfirstname(0,'French','Cécile')
addfirstname(0,'French','Myriam')
addfirstname(0,'French','Clémentine')
addfirstname(0,'French','Kelly')
addfirstname(0,'French','Elsa')
addfirstname(0,'French','Eloïse')
addfirstname(0,'French','Lucile')
addfirstname(0,'French','Cindy')
addfirstname(0,'French','Tiffany')
addfirstname(0,'French','Adeline')
addfirstname(0,'French','Ambre')
addfirstname(0,'French','Alison')
addfirstname(0,'French','Olivia')
addfirstname(0,'French','Cloé')
addfirstname(0,'French','Gwendoline')
addfirstname(0,'French','Gaëlle')
addfirstname(0,'French','Kenza')
addfirstname(0,'French','Johanna')
addfirstname(0,'French','Charline')
addlastname('French','Martin')
addlastname('French','Bernard')
addlastname('French','Dubois')
addlastname('French','Thomas')
addlastname('French','Robert')
addlastname('French','Richard')
addlastname('French','Petit')
addlastname('French','Durand')
addlastname('French','Leroy')
addlastname('French','Moreau')
addlastname('French','Simon')
addlastname('French','Laurent')
addlastname('French','Lefebvre')
addlastname('French','Michel')
addlastname('French','Garcia')
addlastname('French','David')
addlastname('French','Bertrand')
addlastname('French','Roux')
addlastname('French','Vincent')
addlastname('French','Fournier')
addlastname('French','Morel')
addlastname('French','Girard')
addlastname('French','Andre')
addlastname('French','Lefevre')
addlastname('French','Mercier')
addlastname('French','Dupont')
addlastname('French','Lambert')
addlastname('French','Bonnet')
addlastname('French','Francois')
addlastname('French','Martinez')
addlastname('French','Legrand')
addlastname('French','Garnier')
addlastname('French','Faure')
addlastname('French','Rousseau')
addlastname('French','Blanc')
addlastname('French','Guerin')
addlastname('French','Muller')
addlastname('French','Henry')
addlastname('French','Roussel')
addlastname('French','Nicolas')
addlastname('French','Perrin')
addlastname('French','Morin')
addlastname('French','Mathieu')
addlastname('French','Clement')
addlastname('French','Gauthier')
addlastname('French','Dumont')
addlastname('French','Lopez')
addlastname('French','Fontaine')
addlastname('French','Chevalier')
addlastname('French','Robin')
addlastname('French','Masson')
addlastname('French','Sanchez')
addlastname('French','Gerard')
addlastname('French','Nguyen')
addlastname('French','Boyer')
addlastname('French','Denis')
addlastname('French','Lemaire')
addlastname('French','Duval')
addlastname('French','Joly')
addlastname('French','Gautier')
addlastname('French','Roger')
addlastname('French','Roche')
addlastname('French','Roy')
addlastname('French','Noel')
addlastname('French','Meyer')
addlastname('French','Lucas')
addlastname('French','Meunier')
addlastname('French','Jean')
addlastname('French','Perez')
addlastname('French','Marchand')
addlastname('French','Dufour')
addlastname('French','Blanchard')
addlastname('French','Marie')
addlastname('French','Barbier')
addlastname('French','Brun')
addlastname('French','Dumas')
addlastname('French','Brunet')
addlastname('French','Schmitt')
addlastname('French','Leroux')
addlastname('French','Colin')
addlastname('French','Fernandez')
addlastname('French','Pierre')
addlastname('French','Renard')
addlastname('French','Arnaud')
addlastname('French','Rolland')
addlastname('French','Caron')
addlastname('French','Aubert')
addlastname('French','Giraud')
addlastname('French','Leclerc')
addlastname('French','Vidal')
addlastname('French','Bourgeois')
addlastname('French','Renaud')
addlastname('French','Lemoine')
addlastname('French','Picard')
addlastname('French','Gaillard')
addlastname('French','Philippe')
addlastname('French','Leclercq')
addlastname('French','Lacroix')
addlastname('French','Fabre')
addlastname('French','Dupuis')
addlastname('French','Olivier')
addlastname('French','Rodriguez')
addlastname('French','Da Silva')
addlastname('French','Hubert')
addlastname('French','Louis')
addlastname('French','Charles')
addlastname('French','Guillot')
addlastname('French','Riviere')
addlastname('French','Le Gall')
addlastname('French','Guillaume')
addlastname('French','Adam')
addlastname('French','Rey')
addlastname('French','Moulin')
addlastname('French','Gonzalez')
addlastname('French','Berger')
addlastname('French','Lecomte')
addlastname('French','Menard')
addlastname('French','Fleury')
addlastname('French','Deschamps')
addlastname('French','Carpentier')
addlastname('French','Julien')
addlastname('French','Benoit')
addlastname('French','Paris')
addlastname('French','Maillard')
addlastname('French','Marchal')
addlastname('French','Aubry')
addlastname('French','Vasseur')
addlastname('French','Le Roux')
addlastname('French','Renault')
addlastname('French','Jacquet')
addlastname('French','Collet')
addlastname('French','Prevost')
addlastname('French','Poirier')
addlastname('French','Charpentier')
addlastname('French','Royer')
addlastname('French','Huet')
addlastname('French','Baron')
addlastname('French','Dupuy')
addlastname('French','Pons')
addlastname('French','Paul')
addlastname('French','Laine')
addlastname('French','Carre')
addlastname('French','Breton')
addlastname('French','Remy')
addlastname('French','Schneider')
addlastname('French','Perrot')
addlastname('French','Guyot')
addlastname('French','Barre')
addlastname('French','Marty')
addlastname('French','Cousin')
addlastname('French','Le Goff')
addlastname('French','Boucher')
addlastname('French','Bailly')
addlastname('French','Boulanger')
addlastname('French','Collin')
addlastname('French','Herve')
addlastname('French','Evrard')
addlastname('French','Poulain')
addlastname('French','Etienne')
addlastname('French','Lebrun')
addlastname('French','Daniel')
addlastname('French','Pereira')
addlastname('French','Pasquier')
addlastname('French','Cordier')
addlastname('French','Humbert')
addlastname('French','Gillet')
addlastname('French','Bouvier')
addlastname('French','Leveque')
addlastname('French','Albert')
addlastname('French','Ferreira')
addlastname('French','Jacob')
addlastname('French','Germain')
addlastname('French','Klein')
addlastname('French','Millet')
addlastname('French','Weber')
addlastname('French','Gomez')
addlastname('French','Marechal')
addlastname('French','Gay')
addlastname('French','Chevallier')
addlastname('French','Mallet')
addlastname('French','Lesage')
addlastname('French','Bertin')
addlastname('French','Leblanc')
addlastname('French','Alexandre')
addlastname('French','Gonçalves')
addlastname('French','Perrier')
addlastname('French','Hamon')
addlastname('French','Dos Santos')
addlastname('French','Rodrigues')
addlastname('French','Pelletier')
addlastname('French','Bouchet')
addlastname('French','Monnier')
addlastname('French','Leger')
addlastname('French','Marin')
addlastname('French','Lemaitre')
addlastname('French','Reynaud')
addlastname('French','Pichon')
addlastname('French','Lamy')
addlastname('French','Antoine')
addlastname('French','Camus')
addlastname('French','Georges')
addlastname('French','Perret')
addlastname('French','Coulon')
addlastname('French','Gros')
addlastname('French','Devaux')
addlastname('French','Langlois')
addlastname('French','Gilbert')
addlastname('French','Tessier')
addlastname('French','Chauvin')
addlastname('French','Ollivier')
addlastname('French','Levy')
addlastname('French','Marion')
addlastname('French','Dupond')
addlastname('French','Joubert')
addlastname('French','Jacques')
addlastname('French','Rossi')
addlastname('French','Besson')
addlastname('French','Legros')
addlastname('French','Guichard')
addlastname('French','Fernandes')
addlastname('French','Carlier')
addlastname('French','Delattre')
addlastname('French','Maury')
addlastname('French','Cohen')
addlastname('French','Hernandez')
addlastname('French','Guillon')
addlastname('French','Coste')
addlastname('French','Verdier')
addlastname('French','Sauvage')
addlastname('French','Lejeune')
addlastname('French','Martins')
addlastname('French','Ferrand')
addlastname('French','Blanchet')
addlastname('French','Ruiz')
addlastname('French','Bousquet')
addlastname('French','Didier')
addlastname('French','Tanguy')
addlastname('French','Marques')
addlastname('French','Michaud')
addlastname('French','Gregoire')
addlastname('French','Barthelemy')
addlastname('French','Charrier')
addlastname('French','Briand')
addlastname('French','Guillou')
addlastname('French','Maurice')
addlastname('French','Navarro')
addlastname('French','Leduc')
addlastname('French','Pascal')
addlastname('French','Delorme')
addlastname('French','Delaunay')
addlastname('French','Thibault')
addlastname('French','Bodin')
addlastname('French','Valentin')
addlastname('French','Gaudin')
addlastname('French','Allard')
addlastname('French','Mahe')
addlastname('French','Chauvet')
addlastname('French','Masse')
addlastname('French','Tran')
addlastname('French','Vallee')
addlastname('French','Barbe')
addlastname('French','Buisson')
addlastname('French','Lebreton')
addlastname('French','Benard')
addlastname('French','Blondel')
addlastname('French','Laporte')
addlastname('French','Hebert')
addlastname('French','Courtois')
addlastname('French','Riou')
addlastname('French','Legendre')
addlastname('French','Fischer')
addlastname('French','Delannoy')
addlastname('French','Vaillant')
addlastname('French','Lefort')
addlastname('French','Regnier')
addlastname('French','Guillet')
addlastname('French','Couturier')
addlastname('French','Raynaud')
addlastname('French','Bazin')
addlastname('French','Bigot')
addlastname('French','Peltier')
addlastname('French','Bourdon')
addlastname('French','Allain')
addlastname('French','Descamps')
addlastname('French','Duhamel')
addlastname('French','Dupre')
addlastname('French','Bruneau')
addlastname('French','Besnard')
addlastname('French','Lenoir')
addlastname('French','Lacombe')
addlastname('French','Laroche')
addlastname('French','Launay')
addlastname('French','Loiseau')
addlastname('French','Morvan')
addlastname('French','Jacquot')
addlastname('French','Raymond')
addlastname('French','Rossignol')
addlastname('French','Auger')
addlastname('French','Brunel')
addlastname('French','Thierry')
addlastname('French','Jourdan')
addlastname('French','Voisin')
addlastname('French','Godard')
addlastname('French','Blin')
addlastname('French','Baudry')
addlastname('French','Pages')
addlastname('French','Martel')
addlastname('French','Martineau')
addlastname('French','Faivre')
addlastname('French','Berthelot')
addlastname('French','Pineau')
addlastname('French','Texier')
addlastname('French','Girault')
addlastname('French','Normand')
addlastname('French','Petitjean')
addlastname('French','Seguin')
addlastname('French','Blot')
addlastname('French','Delmas')
addlastname('French','Fouquet')
addlastname('French','Guilbert')
addlastname('French','Colas')
addlastname('French','Merle')
addlastname('French','Pruvost')
addlastname('French','Labbe')
addlastname('French','Imbert')
addlastname('French','Toussaint')
addlastname('French','Maillet')
addlastname('French','Bonneau')
addlastname('French','Tournier')
addlastname('French','Salaun')
addlastname('French','Vallet')
addlastname('French','Favre')
addlastname('French','Delage')
addlastname('French','Wagner')
addlastname('French','Hardy')
addlastname('French','Gervais')
addlastname('French','Chretien')
addlastname('French','Grandjean')
addlastname('French','Parent')
addlastname('French','Gomes')
addlastname('French','Peron')
addlastname('French','Guyon')
addlastname('French','Lombard')
addlastname('French','Claude')
addlastname('French','Clerc')
addlastname('French','Chartier')
addlastname('French','Leblond')
addlastname('French','Da Costa')
addlastname('French','Lagarde')
addlastname('French','Guibert')
addlastname('French','Mace')
addlastname('French','Chauveau')
addlastname('French','Leconte')
addlastname('French','Hamel')
addlastname('French','Prevot')
addlastname('French','Cornu')
addlastname('French','Lelievre')
addlastname('French','Flament')
addlastname('French','Merlin')
addlastname('French','Vial')
addlastname('French','Boulay')
addlastname('French','Mary')
addlastname('French','Parmentier')
addlastname('French','Valette')
addlastname('French','Chapuis')
addlastname('French','Lecoq')
addlastname('French','Mouton')
addlastname('French','Geoffroy')
addlastname('French','Alves')
addlastname('French','Ribeiro')
addlastname('French','Lopes')
addlastname('French','Laborde')
addlastname('French','Besse')
addlastname('French','Marc')
addlastname('French','Picot')
addlastname('French','Boutin')
addlastname('French','Lacoste')
addlastname('French','Salmon')
addlastname('French','Prigent')
addlastname('French','Gilles')
addlastname('French','Poisson')
addlastname('French','Pujol')
addlastname('French','Gallet')
addlastname('French','Gueguen')
addlastname('French','Thiery')
addlastname('French','Lemonnier')
addlastname('French','Costa')
addlastname('French','Serre')
addlastname('French','Bouvet')
addlastname('French','Foucher')
addlastname('French','Pottier')
addlastname('French','Mas')
addlastname('French','Grenier')
addlastname('French','Leonard')
addlastname('French','Durant')
addlastname('French','Doucet')
addlastname('French','Potier')
addlastname('French','Torres')
addlastname('French','Le Corre')
addlastname('French','Brault')
addlastname('French','Charbonnier')
addlastname('French','Bouchard')
addlastname('French','Gras')
addlastname('French','Bayle')
addlastname('French','Delahaye')
addlastname('French','Ferry')
addlastname('French','Berthier')
addlastname('French','Maurin')
addlastname('French','Bonhomme')
addlastname('French','Bataille')
addlastname('French','Bouquet')
addlastname('French','Dubreuil')
addlastname('French','Lelong')
addlastname('French','Rault')
addlastname('French','Prost')
addlastname('French','Duchemin')
addlastname('French','Jourdain')
addlastname('French','Grand')
addlastname('French','Moreno')
addlastname('French','Bocquet')
addlastname('French','Lebon')
addlastname('French','Jacquemin')
addlastname('French','Neveu')
addlastname('French','Becker')
addlastname('French','Husson')
addlastname('French','Combes')
addlastname('French','Marquet')
addlastname('French','Benoist')
addlastname('French','Guy')
addlastname('French','Maire')
addlastname('French','Dumoulin')
addlastname('French','Huguet')
addlastname('French','Bernier')
addlastname('French','Lafon')
addlastname('French','Sabatier')
addlastname('French','Rocher')
addlastname('French','Arnould')
addlastname('French','Boulet')
addlastname('French','Lecocq')
addlastname('French','Morand')
addlastname('French','Ferre')
addlastname('French','Comte')
addlastname('French','Monier')
addlastname('French','Le Roy')
addlastname('French','Thiebaut')
addlastname('French','Bourdin')
addlastname('French','Guillemin')
addlastname('French','Leleu')
addlastname('French','Millot')
addlastname('French','Forestier')
addlastname('French','Mangin')
addlastname('French','Fortin')
addlastname('French','Ricard')
addlastname('French','Billard')
addlastname('French','Le Guen')
addlastname('French','Rousset')
addlastname('French','Jamet')
addlastname('French','Roques')
addlastname('French','Chambon')
addlastname('French','Jung')
addlastname('French','Dujardin')
addlastname('French','Turpin')
addlastname('French','Diaz')
addlastname('French','Prat')
addlastname('French','Jolivet')
addlastname('French','Favier')
addlastname('French','Andrieux')
addlastname('French','Castel')
addlastname('French','Bonnin')
addlastname('French','Ferrer')
addlastname('French','Grosjean')
addlastname('French','Maurel')
addlastname('French','Dias')
addlastname('French','Munoz')
addlastname('French','Chatelain')
addlastname('French','Rose')
addlastname('French','Blondeau')
addlastname('French','Guignard')
addlastname('French','Tellier')
addlastname('French','Cros')
addlastname('French','Le')
addlastname('French','Tardy')
addlastname('French','Combe')
addlastname('French','Cochet')
addlastname('French','Schmidt')
addlastname('French','Magnier')
addlastname('French','Sellier')
addlastname('French','Barreau')
addlastname('French','Monnet')
addlastname('French','Guiraud')
addlastname('French','Zimmermann')
addlastname('French','Granger')
addlastname('French','Leon')
addlastname('French','Godin')
addlastname('French','Andrieu')
addlastname('French','Walter')
addlastname('French','Granier')
addlastname('French','Gosselin')
addlastname('French','Drouet')
addlastname('French','Villain')
addfirstname(1,'Spanish','Zacarías')
addfirstname(1,'Spanish','Xavier')
addfirstname(1,'Spanish','Victor')
addfirstname(1,'Spanish','Vicente')
addfirstname(1,'Spanish','Valentín')
addfirstname(1,'Spanish','Umberto')
addfirstname(1,'Spanish','Tomás')
addfirstname(1,'Spanish','Teodoro')
addfirstname(1,'Spanish','Stefano')
addfirstname(1,'Spanish','Sócrates')
addfirstname(1,'Spanish','Silvestre')
addfirstname(1,'Spanish','Sergio')
addfirstname(1,'Spanish','Set')
addfirstname(1,'Spanish','Sebastián')
addfirstname(1,'Spanish','Santiago')
addfirstname(1,'Spanish','Salvador')
addfirstname(1,'Spanish','Roque')
addfirstname(1,'Spanish','Rolando')
addfirstname(1,'Spanish','Roberto')
addfirstname(1,'Spanish','Rigoberto')
addfirstname(1,'Spanish','Ricardo')
addfirstname(1,'Spanish','René')
addfirstname(1,'Spanish','Raúl')
addfirstname(1,'Spanish','Ramón')
addfirstname(1,'Spanish','Rafael')
addfirstname(1,'Spanish','Pablo')
addfirstname(1,'Spanish','Oscar')
addfirstname(1,'Spanish','Orlando')
addfirstname(1,'Spanish','Octavio')
addfirstname(1,'Spanish','Noé')
addfirstname(1,'Spanish','Nicolás')
addfirstname(1,'Spanish','Neptuno')
addfirstname(1,'Spanish','Nataniel')
addfirstname(1,'Spanish','Moisés')
addfirstname(1,'Spanish','Miguel')
addfirstname(1,'Spanish','Maximiliano')
addfirstname(1,'Spanish','Mateo')
addfirstname(1,'Spanish','Martín')
addfirstname(1,'Spanish','Marcos')
addfirstname(1,'Spanish','Marco')
addfirstname(1,'Spanish','Manuel')
addfirstname(1,'Spanish','Luperco')
addfirstname(1,'Spanish','Luis')
addfirstname(1,'Spanish','Lucas')
addfirstname(1,'Spanish','Luca')
addfirstname(1,'Spanish','Leonardo')
addfirstname(1,'Spanish','Leo')
addfirstname(1,'Spanish','Julián')
addfirstname(1,'Spanish','Juan')
addfirstname(1,'Spanish','José')
addfirstname(1,'Spanish','Jorge')
addfirstname(1,'Spanish','Joel')
addfirstname(1,'Spanish','Joaquín')
addfirstname(1,'Spanish','Jesús')
addfirstname(1,'Spanish','Javier')
addfirstname(1,'Spanish','Jaime')
addfirstname(1,'Spanish','Jacobo')
addfirstname(1,'Spanish','Jacob')
addfirstname(1,'Spanish','Jacinto')
addfirstname(1,'Spanish','Iván')
addfirstname(1,'Spanish','Isaías')
addfirstname(1,'Spanish','Isaac')
addfirstname(1,'Spanish','Hugo')
addfirstname(1,'Spanish','Guillermo')
addfirstname(1,'Spanish','Gerardo')
addfirstname(1,'Spanish','Gabriel')
addfirstname(1,'Spanish','Francisco')
addfirstname(1,'Spanish','Fortunato')
addfirstname(1,'Spanish','Fidel')
addfirstname(1,'Spanish','Fernando')
addfirstname(1,'Spanish','Felix')
addfirstname(1,'Spanish','Felipe')
addfirstname(1,'Spanish','Fabio')
addfirstname(1,'Spanish','Eustacio')
addfirstname(1,'Spanish','Esteban')
addfirstname(1,'Spanish','Ernesto')
addfirstname(1,'Spanish','Emanuel')
addfirstname(1,'Spanish','Enrique')
addfirstname(1,'Spanish','Eliseo')
addfirstname(1,'Spanish','Elías')
addfirstname(1,'Spanish','Efrain')
addfirstname(1,'Spanish','Efraim')
addfirstname(1,'Spanish','Eduardo')
addfirstname(1,'Spanish','Dión')
addfirstname(1,'Spanish','Diógenes')
addfirstname(1,'Spanish','Dino')
addfirstname(1,'Spanish','Diego')
addfirstname(1,'Spanish','David')
addfirstname(1,'Spanish','Claudio')
addfirstname(1,'Spanish','Celestino')
addfirstname(1,'Spanish','Carlos')
addfirstname(1,'Spanish','Ceferino')
addfirstname(1,'Spanish','Caleb')
addfirstname(1,'Spanish','Benjamín')
addfirstname(1,'Spanish','Benedicto')
addfirstname(1,'Spanish','Bartolomé')
addfirstname(1,'Spanish','Augusto')
addfirstname(1,'Spanish','Arsenio')
addfirstname(1,'Spanish','Antonio')
addfirstname(1,'Spanish','Ángelo')
addfirstname(1,'Spanish','Ángel')
addfirstname(1,'Spanish','Andrés')
addfirstname(1,'Spanish','Anastacio')
addfirstname(1,'Spanish','Amadeo')
addfirstname(1,'Spanish','Alfonso')
addfirstname(1,'Spanish','Alejandro')
addfirstname(1,'Spanish','Alberto')
addfirstname(1,'Spanish','Agnano')
addfirstname(1,'Spanish','Adán')
addfirstname(1,'Spanish','Absalón')
addfirstname(1,'Spanish','Abrán')
addfirstname(1,'Spanish','Abraham')
addfirstname(1,'Spanish','Aarón')
addfirstname(0,'Spanish','Zoe')
addfirstname(0,'Spanish','Zarina')
addfirstname(0,'Spanish','Yesenia')
addfirstname(0,'Spanish','Yara')
addfirstname(0,'Spanish','Ximena')
addfirstname(0,'Spanish','Yvonne')
addfirstname(0,'Spanish','Yolanda')
addfirstname(0,'Spanish','Wendy')
addfirstname(0,'Spanish','Viviana')
addfirstname(0,'Spanish','Virginia')
addfirstname(0,'Spanish','Violeta')
addfirstname(0,'Spanish','Victoria')
addfirstname(0,'Spanish','Verónica')
addfirstname(0,'Spanish','Venus')
addfirstname(0,'Spanish','Vanesa')
addfirstname(0,'Spanish','Valentina')
addfirstname(0,'Spanish','Uma')
addfirstname(0,'Spanish','Trinidad')
addfirstname(0,'Spanish','Tina')
addfirstname(0,'Spanish','Tía')
addfirstname(0,'Spanish','Thalia')
addfirstname(0,'Spanish','Teresa')
addfirstname(0,'Spanish','Tatiana')
addfirstname(0,'Spanish','Talia')
addfirstname(0,'Spanish','Tiara')
addfirstname(0,'Spanish','Susana')
addfirstname(0,'Spanish','Sonia')
addfirstname(0,'Spanish','Solana')
addfirstname(0,'Spanish','Sol')
addfirstname(0,'Spanish','Sofía')
addfirstname(0,'Spanish','Socorro')
addfirstname(0,'Spanish','Sierra')
addfirstname(0,'Spanish','Shakira')
addfirstname(0,'Spanish','Serena')
addfirstname(0,'Spanish','Selena')
addfirstname(0,'Spanish','Selena')
addfirstname(0,'Spanish','Segunda')
addfirstname(0,'Spanish','Samanta')
addfirstname(0,'Spanish','Sabrina')
addfirstname(0,'Spanish','Rubí')
addfirstname(0,'Spanish','Rosana')
addfirstname(0,'Spanish','Rosa')
addfirstname(0,'Spanish','Roberta')
addfirstname(0,'Spanish','Rita')
addfirstname(0,'Spanish','Reyna')
addfirstname(0,'Spanish','Renee')
addfirstname(0,'Spanish','Reina')
addfirstname(0,'Spanish','Regina')
addfirstname(0,'Spanish','Rebeca')
addfirstname(0,'Spanish','Raquel')
addfirstname(0,'Spanish','Perla')
addfirstname(0,'Spanish','Paz')
addfirstname(0,'Spanish','Paulina')
addfirstname(0,'Spanish','Patricia')
addfirstname(0,'Spanish','Pamela')
addfirstname(0,'Spanish','Paloma')
addfirstname(0,'Spanish','Olivia')
addfirstname(0,'Spanish','Octavia')
addfirstname(0,'Spanish','Nuria')
addfirstname(0,'Spanish','Norma')
addfirstname(0,'Spanish','Noemí')
addfirstname(0,'Spanish','Nina')
addfirstname(0,'Spanish','Nevada')
addfirstname(0,'Spanish','Nazarena')
addfirstname(0,'Spanish','Natalia')
addfirstname(0,'Spanish','Nancy')
addfirstname(0,'Spanish','Nadia')
addfirstname(0,'Spanish','Mónica')
addfirstname(0,'Spanish','Miriam')
addfirstname(0,'Spanish','Miranda')
addfirstname(0,'Spanish','Milagros')
addfirstname(0,'Spanish','Mía')
addfirstname(0,'Spanish','Mercedes')
addfirstname(0,'Spanish','Maya')
addfirstname(0,'Spanish','Marta')
addfirstname(0,'Spanish','Marisol')
addfirstname(0,'Spanish','Marisa')
addfirstname(0,'Spanish','Maricela')
addfirstname(0,'Spanish','Mariana')
addfirstname(0,'Spanish','Marisa')
addfirstname(0,'Spanish','Marina')
addfirstname(0,'Spanish','Marilú')
addfirstname(0,'Spanish','Mariangeles')
addfirstname(0,'Spanish','Marianela')
addfirstname(0,'Spanish','María')
addfirstname(0,'Spanish','Margarita')
addfirstname(0,'Spanish','Marcia')
addfirstname(0,'Spanish','Marcela')
addfirstname(0,'Spanish','Mara')
addfirstname(0,'Spanish','Manuela')
addfirstname(0,'Spanish','Margarita')
addfirstname(0,'Spanish','Malvina')
addfirstname(0,'Spanish','Malena')
addfirstname(0,'Spanish','Magdalena')
addfirstname(0,'Spanish','Mabel')
addfirstname(0,'Spanish','Luz')
addfirstname(0,'Spanish','Luna')
addfirstname(0,'Spanish','Luisa')
addfirstname(0,'Spanish','Luana')
addfirstname(0,'Spanish','Lorena')
addfirstname(0,'Spanish','Lola')
addfirstname(0,'Spanish','Lisa')
addfirstname(0,'Spanish','Linda')
addfirstname(0,'Spanish','Liliana')
addfirstname(0,'Spanish','Lidia')
addfirstname(0,'Spanish','Libertad')
addfirstname(0,'Spanish','Liana')
addfirstname(0,'Spanish','Leandra')
addfirstname(0,'Spanish','Laura')
addfirstname(0,'Spanish','Lara')
addfirstname(0,'Spanish','Lana')
addfirstname(0,'Spanish','Juliana')
addfirstname(0,'Spanish','Juana')
addfirstname(0,'Spanish','Joana')
addfirstname(0,'Spanish','Jimena')
addfirstname(0,'Spanish','Jéssica')
addfirstname(0,'Spanish','Jennifer')
addfirstname(0,'Spanish','Jasmín')
addfirstname(0,'Spanish','Jade')
addfirstname(0,'Spanish','Jacinta')
addfirstname(0,'Spanish','Ivana')
addfirstname(0,'Spanish','Isadora')
addfirstname(0,'Spanish','Isabella')
addfirstname(0,'Spanish','Isabel')
addfirstname(0,'Spanish','Iris')
addfirstname(0,'Spanish','Irene')
addfirstname(0,'Spanish','Ines')
addfirstname(0,'Spanish','Ilana')
addfirstname(0,'Spanish','Hermosa')
addfirstname(0,'Spanish','Helena')
addfirstname(0,'Spanish','Guadalupe')
addfirstname(0,'Spanish','Graciana')
addfirstname(0,'Spanish','Gracia')
addfirstname(0,'Spanish','Gloria')
addfirstname(0,'Spanish','Gladis')
addfirstname(0,'Spanish','Giuliana')
addfirstname(0,'Spanish','Gisela')
addfirstname(0,'Spanish','Gina')
addfirstname(0,'Spanish','Gianina')
addfirstname(0,'Spanish','Gema')
addfirstname(0,'Spanish','Gabriela')
addfirstname(0,'Spanish','Frida')
addfirstname(0,'Spanish','Fortunata')
addfirstname(0,'Spanish','Florencia')
addfirstname(0,'Spanish','Flora')
addfirstname(0,'Spanish','Flor')
addfirstname(0,'Spanish','Fiona')
addfirstname(0,'Spanish','Felipa')
addfirstname(0,'Spanish','Felicitas')
addfirstname(0,'Spanish','Felicia')
addfirstname(0,'Spanish','Fe')
addfirstname(0,'Spanish','Fátima')
addfirstname(0,'Spanish','Evelina')
addfirstname(0,'Spanish','Eva')
addfirstname(0,'Spanish','Eugenia')
addfirstname(0,'Spanish','Estrella')
addfirstname(0,'Spanish','Estela')
addfirstname(0,'Spanish','Estafania')
addfirstname(0,'Spanish','Esperanza')
addfirstname(0,'Spanish','Esmerelda')
addfirstname(0,'Spanish','Encarnición')
addfirstname(0,'Spanish','Emilia')
addfirstname(0,'Spanish','Emanuela')
addfirstname(0,'Spanish','Ema')
addfirstname(0,'Spanish','Elvira')
addfirstname(0,'Spanish','Elizabeth')
addfirstname(0,'Spanish','Eliana')
addfirstname(0,'Spanish','Elena')
addfirstname(0,'Spanish','Elaina')
addfirstname(0,'Spanish','Eduarda')
addfirstname(0,'Spanish','Dulce')
addfirstname(0,'Spanish','Doria')
addfirstname(0,'Spanish','Dora')
addfirstname(0,'Spanish','Dolores')
addfirstname(0,'Spanish','Desirée')
addfirstname(0,'Spanish','Déborah')
addfirstname(0,'Spanish','Diana')
addfirstname(0,'Spanish','Dalia')
addfirstname(0,'Spanish','Daira')
addfirstname(0,'Spanish','Dafne')
addfirstname(0,'Spanish','Dana')
addfirstname(0,'Spanish','Cristina')
addfirstname(0,'Spanish','Cristal')
addfirstname(0,'Spanish','Clorinda')
addfirstname(0,'Spanish','Cleopatra')
addfirstname(0,'Spanish','Claudia')
addfirstname(0,'Spanish','Cintia')
addfirstname(0,'Spanish','Celina')
addfirstname(0,'Spanish','Celeste')
addfirstname(0,'Spanish','Cecelia')
addfirstname(0,'Spanish','Catarina')
addfirstname(0,'Spanish','Catalina')
addfirstname(0,'Spanish','Casia')
addfirstname(0,'Spanish','Casandra')
addfirstname(0,'Spanish','Carolina')
addfirstname(0,'Spanish','Carlota')
addfirstname(0,'Spanish','Carla')
addfirstname(0,'Spanish','Carisa')
addfirstname(0,'Spanish','Carina')
addfirstname(0,'Spanish','Caridad')
addfirstname(0,'Spanish','Candelaria')
addfirstname(0,'Spanish','Candela')
addfirstname(0,'Spanish','Camila')
addfirstname(0,'Spanish','Calíope')
addfirstname(0,'Spanish','Brunela')
addfirstname(0,'Spanish','Brita')
addfirstname(0,'Spanish','Brisa')
addfirstname(0,'Spanish','Brenda')
addfirstname(0,'Spanish','Blanca')
addfirstname(0,'Spanish','Bibiana')
addfirstname(0,'Spanish','Bianca')
addfirstname(0,'Spanish','Betania')
addfirstname(0,'Spanish','Bella')
addfirstname(0,'Spanish','Belinda')
addfirstname(0,'Spanish','Belén')
addfirstname(0,'Spanish','Bárbara')
addfirstname(0,'Spanish','Azura')
addfirstname(0,'Spanish','Azalea')
addfirstname(0,'Spanish','Avril')
addfirstname(0,'Spanish','Ava')
addfirstname(0,'Spanish','Aurora')
addfirstname(0,'Spanish','Alison')
addfirstname(0,'Spanish','Ariel')
addfirstname(0,'Spanish','Antonella')
addfirstname(0,'Spanish','Anna')
addfirstname(0,'Spanish','Angelina')
addfirstname(0,'Spanish','Angélica')
addfirstname(0,'Spanish','Ángela')
addfirstname(0,'Spanish','Andrea')
addfirstname(0,'Spanish','Anastacia')
addfirstname(0,'Spanish','Analía')
addfirstname(0,'Spanish','Ana')
addfirstname(0,'Spanish','Amara')
addfirstname(0,'Spanish','Amanda')
addfirstname(0,'Spanish','Amalia')
addfirstname(0,'Spanish','Alma')
addfirstname(0,'Spanish','Alexia')
addfirstname(0,'Spanish','Alejandrina')
addfirstname(0,'Spanish','Alejandra')
addfirstname(0,'Spanish','Albina')
addfirstname(0,'Spanish','Alana')
addfirstname(0,'Spanish','Aida')
addfirstname(0,'Spanish','Agustina')
addfirstname(0,'Spanish','Adriana')
addfirstname(0,'Spanish','Adoración')
addfirstname(0,'Spanish','Ada')
addfirstname(0,'Spanish','Abril')
addfirstname(0,'Spanish','Abra')
addfirstname(0,'Spanish','Abigail')
addlastname('Spanish','García')
addlastname('Spanish','Fernández')
addlastname('Spanish','González')
addlastname('Spanish','Rodríguez')
addlastname('Spanish','López')
addlastname('Spanish','Martínez')
addlastname('Spanish','Sánchez')
addlastname('Spanish','Pérez')
addlastname('Spanish','Martín')
addlastname('Spanish','Gómez')
addlastname('Spanish','Ruiz')
addlastname('Spanish','Hernández')
addlastname('Spanish','Jiménez')
addlastname('Spanish','Díaz')
addlastname('Spanish','Álvarez')
addlastname('Spanish','Moreno')
addlastname('Spanish','Muñoz')
addlastname('Spanish','Alonso')
addlastname('Spanish','Gutiérrez')
addlastname('Spanish','Romero')
addlastname('Spanish','Navarro')
addlastname('Spanish','Torres')
addlastname('Spanish','Domínguez')
addlastname('Spanish','Gil')
addlastname('Spanish','Vázquez')
addlastname('Spanish','Serrano')
addlastname('Spanish','Ramos')
addlastname('Spanish','Blanco')
addlastname('Spanish','Sanz')
addlastname('Spanish','Castro')
addlastname('Spanish','Suárez')
addlastname('Spanish','Ortega')
addlastname('Spanish','Rubio')
addlastname('Spanish','Molina')
addlastname('Spanish','Delgado')
addlastname('Spanish','Ramírez')
addlastname('Spanish','Morales')
addlastname('Spanish','Ortiz')
addlastname('Spanish','Marín')
addlastname('Spanish','Iglesias')
addfirstname(1,'Polish','Jan')
addfirstname(1,'Polish','Andrzej')
addfirstname(1,'Polish','Piotr')
addfirstname(1,'Polish','Krzysztof')
addfirstname(1,'Polish','Stanislaw')
addfirstname(1,'Polish','Tomasz')
addfirstname(1,'Polish','Pawel')
addfirstname(1,'Polish','Józef')
addfirstname(1,'Polish','Marcin')
addfirstname(1,'Polish','Marek')
addfirstname(1,'Polish','Michal')
addfirstname(1,'Polish','Grzegorz')
addfirstname(1,'Polish','Jerzy')
addfirstname(1,'Polish','Tadeusz')
addfirstname(1,'Polish','Adam')
addfirstname(1,'Polish','Lukasz')
addfirstname(1,'Polish','Zbigniew')
addfirstname(1,'Polish','Ryszard')
addfirstname(1,'Polish','Dariusz')
addfirstname(1,'Polish','Henryk')
addfirstname(1,'Polish','Mariusz')
addfirstname(1,'Polish','Kazimierz')
addfirstname(1,'Polish','Wojciech')
addfirstname(1,'Polish','Robert')
addfirstname(1,'Polish','Mateusz')
addfirstname(1,'Polish','Marian')
addfirstname(1,'Polish','Rafal')
addfirstname(1,'Polish','Jacek')
addfirstname(1,'Polish','Janusz')
addfirstname(1,'Polish','Miroslaw')
addfirstname(1,'Polish','Maciej')
addfirstname(1,'Polish','Slawomir')
addfirstname(1,'Polish','Jaroslaw')
addfirstname(1,'Polish','Kamil')
addfirstname(1,'Polish','Wieslaw')
addfirstname(1,'Polish','Roman')
addfirstname(1,'Polish','Wladyslaw')
addfirstname(1,'Polish','Jakub')
addfirstname(1,'Polish','Artur')
addfirstname(1,'Polish','Zdzislaw')
addfirstname(1,'Polish','Edward')
addfirstname(1,'Polish','Mieczyslaw')
addfirstname(1,'Polish','Damian')
addfirstname(1,'Polish','Dawid')
addfirstname(1,'Polish','Przemyslaw')
addfirstname(1,'Polish','Sebastian')
addfirstname(1,'Polish','Czeslaw')
addfirstname(1,'Polish','Leszek')
addfirstname(1,'Polish','Daniel')
addfirstname(1,'Polish','Waldemar')
addfirstname(0,'Polish','Anna')
addfirstname(0,'Polish','Maria')
addfirstname(0,'Polish','Katarzyna')
addfirstname(0,'Polish','Malgorzata')
addfirstname(0,'Polish','Agnieszka')
addfirstname(0,'Polish','Krystyna')
addfirstname(0,'Polish','Barbara')
addfirstname(0,'Polish','Ewa')
addfirstname(0,'Polish','Elzbieta')
addfirstname(0,'Polish','Zofia')
addfirstname(0,'Polish','Janina')
addfirstname(0,'Polish','Teresa')
addfirstname(0,'Polish','Joanna')
addfirstname(0,'Polish','Magdalena')
addfirstname(0,'Polish','Monika')
addfirstname(0,'Polish','Jadwiga')
addfirstname(0,'Polish','Danuta')
addfirstname(0,'Polish','Irena')
addfirstname(0,'Polish','Halina')
addfirstname(0,'Polish','Helena')
addfirstname(0,'Polish','Beata')
addfirstname(0,'Polish','Aleksandra')
addfirstname(0,'Polish','Marta')
addfirstname(0,'Polish','Dorota')
addfirstname(0,'Polish','Marianna')
addfirstname(0,'Polish','Grazyna')
addfirstname(0,'Polish','Jolanta')
addfirstname(0,'Polish','Stanislawa')
addfirstname(0,'Polish','Iwona')
addfirstname(0,'Polish','Karolina')
addfirstname(0,'Polish','Bozena')
addfirstname(0,'Polish','Urszula')
addfirstname(0,'Polish','Justyna')
addfirstname(0,'Polish','Renata')
addfirstname(0,'Polish','Alicja')
addfirstname(0,'Polish','Paulina')
addfirstname(0,'Polish','Sylwia')
addfirstname(0,'Polish','Natalia')
addfirstname(0,'Polish','Wanda')
addfirstname(0,'Polish','Agata')
addfirstname(0,'Polish','Aneta')
addfirstname(0,'Polish','Izabela')
addfirstname(0,'Polish','Ewelina')
addfirstname(0,'Polish','Marzena')
addfirstname(0,'Polish','Wieslawa')
addfirstname(0,'Polish','Genowefa')
addfirstname(0,'Polish','Patrycja')
addfirstname(0,'Polish','Kazimiera')
addfirstname(0,'Polish','Edyta')
addfirstname(0,'Polish','Stefania')
addlastname('Polish','Nowak')
addlastname('Polish','Kowalski')
addlastname('Polish','Wiśniewski')
addlastname('Polish','Wójcik')
addlastname('Polish','Kowalczyk')
addlastname('Polish','Kamiński')
addlastname('Polish','Lewandowski')
addlastname('Polish','Zieliński')
addlastname('Polish','Szymański')
addlastname('Polish','Woźniak')
addlastname('Polish','Dąbrowski')
addlastname('Polish','Kozłowski')
addlastname('Polish','Jankowski')
addlastname('Polish','Mazur')
addlastname('Polish','Kwiatkowski')
addlastname('Polish','Wojciechowski')
addlastname('Polish','Krawczyk')
addlastname('Polish','Kaczmarek')
addlastname('Polish','Piotrowski')
addlastname('Polish','Grabowski')
addfirstname(1,'Austrian','Lukas')
addfirstname(1,'Austrian','Tobias')
addfirstname(1,'Austrian','Maximilian')
addfirstname(1,'Austrian','Alexander')
addfirstname(1,'Austrian','David')
addfirstname(1,'Austrian','Jonas')
addfirstname(1,'Austrian','Simon')
addfirstname(1,'Austrian','Jakob')
addfirstname(1,'Austrian','Elias')
addfirstname(1,'Austrian','Florian')
addfirstname(1,'Austrian','Sebastian')
addfirstname(1,'Austrian','Julian')
addfirstname(1,'Austrian','Paul')
addfirstname(1,'Austrian','Felix')
addfirstname(1,'Austrian','Fabian')
addfirstname(1,'Austrian','Leon')
addfirstname(1,'Austrian','Raphael')
addfirstname(1,'Austrian','Philipp')
addfirstname(1,'Austrian','Moritz')
addfirstname(1,'Austrian','Daniel')
addfirstname(1,'Austrian','Matthias')
addfirstname(1,'Austrian','Luca')
addfirstname(1,'Austrian','Nico')
addfirstname(1,'Austrian','Niklas')
addfirstname(1,'Austrian','Michael')
addfirstname(1,'Austrian','Samuel')
addfirstname(1,'Austrian','Noah')
addfirstname(1,'Austrian','Dominik')
addfirstname(1,'Austrian','Gabriel')
addfirstname(1,'Austrian','Benjamin')
addfirstname(1,'Austrian','Johannes')
addfirstname(1,'Austrian','Marcel')
addfirstname(1,'Austrian','Lorenz')
addfirstname(1,'Austrian','Thomas')
addfirstname(1,'Austrian','Manuel')
addfirstname(1,'Austrian','Valentin')
addfirstname(1,'Austrian','Jan')
addfirstname(1,'Austrian','Stefan')
addfirstname(1,'Austrian','Konstantin')
addfirstname(1,'Austrian','Christoph')
addfirstname(1,'Austrian','Matteo')
addfirstname(1,'Austrian','Leo')
addfirstname(1,'Austrian','Kilian')
addfirstname(1,'Austrian','Marco')
addfirstname(1,'Austrian','Clemens')
addfirstname(1,'Austrian','Andreas')
addfirstname(1,'Austrian','Markus')
addfirstname(1,'Austrian','Adrian')
addfirstname(1,'Austrian','Martin')
addfirstname(1,'Austrian','Fabio')
addfirstname(1,'Austrian','Max')
addfirstname(1,'Austrian','Oliver')
addfirstname(1,'Austrian','Jonathan')
addfirstname(1,'Austrian','Christian')
addfirstname(1,'Austrian','Luis')
addfirstname(1,'Austrian','Nicolas')
addfirstname(1,'Austrian','Emil')
addfirstname(1,'Austrian','Yannick')
addfirstname(1,'Austrian','Tim')
addfirstname(1,'Austrian','Ben')
addfirstname(0,'Austrian','Anna')
addfirstname(0,'Austrian','Sarah')
addfirstname(0,'Austrian','Lena')
addfirstname(0,'Austrian','Hannah')
addfirstname(0,'Austrian','Julia')
addfirstname(0,'Austrian','Sophie')
addfirstname(0,'Austrian','Laura')
addfirstname(0,'Austrian','Marie')
addfirstname(0,'Austrian','Lea')
addfirstname(0,'Austrian','Katharina')
addfirstname(0,'Austrian','Johanna')
addfirstname(0,'Austrian','Emma')
addfirstname(0,'Austrian','Lara')
addfirstname(0,'Austrian','Lisa')
addfirstname(0,'Austrian','Sophia')
addfirstname(0,'Austrian','Mia')
addfirstname(0,'Austrian','Viktoria')
addfirstname(0,'Austrian','Valentina')
addfirstname(0,'Austrian','Jana')
addfirstname(0,'Austrian','Magdalena')
addfirstname(0,'Austrian','Emily')
addfirstname(0,'Austrian','Nina')
addfirstname(0,'Austrian','Elena')
addfirstname(0,'Austrian','Amelie')
addfirstname(0,'Austrian','Clara')
addfirstname(0,'Austrian','Emilia')
addfirstname(0,'Austrian','Selina')
addfirstname(0,'Austrian','Marlene')
addfirstname(0,'Austrian','Alina')
addfirstname(0,'Austrian','Theresa')
addfirstname(0,'Austrian','Vanessa')
addfirstname(0,'Austrian','Lilly')
addfirstname(0,'Austrian','Luisa')
addfirstname(0,'Austrian','Maja')
addfirstname(0,'Austrian','Lina')
addfirstname(0,'Austrian','Isabella')
addfirstname(0,'Austrian','Helena')
addfirstname(0,'Austrian','Emely')
addfirstname(0,'Austrian','Jasmin')
addfirstname(0,'Austrian','Miriam')
addfirstname(0,'Austrian','Christina')
addfirstname(0,'Austrian','Pia')
addfirstname(0,'Austrian','Larissa')
addfirstname(0,'Austrian','Franziska')
addfirstname(0,'Austrian','Anja')
addfirstname(0,'Austrian','Eva')
addfirstname(0,'Austrian','Nora')
addfirstname(0,'Austrian','Annika')
addfirstname(0,'Austrian','Anna Lena')
addfirstname(0,'Austrian','Valerie')
addfirstname(0,'Austrian','Maria')
addfirstname(0,'Austrian','Paula')
addfirstname(0,'Austrian','Melanie')
addfirstname(0,'Austrian','Chiara')
addfirstname(0,'Austrian','Elisa')
addfirstname(0,'Austrian','Isabel')
addfirstname(0,'Austrian','Natalie')
addfirstname(0,'Austrian','Angelina')
addfirstname(0,'Austrian','Carina')
addlastname('Austrian','Gruber')
addlastname('Austrian','Huber')
addlastname('Austrian','Bauer')
addlastname('Austrian','Wagner')
addlastname('Austrian','Pichler')
addlastname('Austrian','Steiner')
addlastname('Austrian','Moser')
addlastname('Austrian','Mayer')
addlastname('Austrian','Hofer')
addlastname('Austrian','Leitner')
addlastname('Austrian','Berger')
addlastname('Austrian','Fuchs')
addlastname('Austrian','Eder')
addlastname('Austrian','Fischer')
addlastname('Austrian','Schmid')
addlastname('Austrian','Winkler')
addlastname('Austrian','Weber')
addfirstname(1,'Australian','William')
addfirstname(1,'Australian','Jack')
addfirstname(1,'Australian','Oliver')
addfirstname(1,'Australian','Joshua')
addfirstname(1,'Australian','Thomas')
addfirstname(1,'Australian','Lachlan')
addfirstname(1,'Australian','Cooper')
addfirstname(1,'Australian','Noah')
addfirstname(1,'Australian','Ethan')
addfirstname(1,'Australian','Lucas')
addfirstname(1,'Australian','James')
addfirstname(1,'Australian','Samuel')
addfirstname(1,'Australian','Jacob')
addfirstname(1,'Australian','Liam')
addfirstname(1,'Australian','Alexander')
addfirstname(1,'Australian','Benjamin')
addfirstname(1,'Australian','Max')
addfirstname(1,'Australian','Isaac')
addfirstname(1,'Australian','Daniel')
addfirstname(1,'Australian','Riley')
addfirstname(1,'Australian','Ryan')
addfirstname(1,'Australian','Charlie')
addfirstname(1,'Australian','Tyler')
addfirstname(1,'Australian','Jake')
addfirstname(1,'Australian','Matthew')
addfirstname(1,'Australian','Xavier')
addfirstname(1,'Australian','Harry')
addfirstname(1,'Australian','Jayden')
addfirstname(1,'Australian','Nicholas')
addfirstname(1,'Australian','Harrison')
addfirstname(1,'Australian','Levi')
addfirstname(1,'Australian','Luke')
addfirstname(1,'Australian','Adam')
addfirstname(1,'Australian','Henry')
addfirstname(1,'Australian','Aiden')
addfirstname(1,'Australian','Dylan')
addfirstname(1,'Australian','Oscar')
addfirstname(1,'Australian','Michael')
addfirstname(1,'Australian','Jackson')
addfirstname(1,'Australian','Logan')
addfirstname(1,'Australian','Joseph')
addfirstname(1,'Australian','Blake')
addfirstname(1,'Australian','Nathan')
addfirstname(1,'Australian','Connor')
addfirstname(1,'Australian','Elijah')
addfirstname(1,'Australian','Nate')
addfirstname(1,'Australian','Archie')
addfirstname(1,'Australian','Bailey')
addfirstname(1,'Australian','Marcus')
addfirstname(1,'Australian','Cameron')
addfirstname(1,'Australian','Jordan')
addfirstname(1,'Australian','Zachary')
addfirstname(1,'Australian','Caleb')
addfirstname(1,'Australian','Hunter')
addfirstname(1,'Australian','Ashton')
addfirstname(1,'Australian','Toby')
addfirstname(1,'Australian','Aidan')
addfirstname(1,'Australian','Hayden')
addfirstname(1,'Australian','Mason')
addfirstname(1,'Australian','Hamish')
addfirstname(1,'Australian','Edward')
addfirstname(1,'Australian','Angus')
addfirstname(1,'Australian','Eli')
addfirstname(1,'Australian','Sebastian')
addfirstname(1,'Australian','Christian')
addfirstname(1,'Australian','Patrick')
addfirstname(1,'Australian','Andrew')
addfirstname(1,'Australian','Anthony')
addfirstname(1,'Australian','Luca')
addfirstname(1,'Australian','Kai')
addfirstname(1,'Australian','Beau')
addfirstname(1,'Australian','Alex')
addfirstname(1,'Australian','George')
addfirstname(1,'Australian','Callum')
addfirstname(1,'Australian','Finn')
addfirstname(1,'Australian','Zac')
addfirstname(1,'Australian','Mitchell')
addfirstname(1,'Australian','Jett')
addfirstname(1,'Australian','Jesse')
addfirstname(1,'Australian','Gabriel')
addfirstname(1,'Australian','Leo')
addfirstname(1,'Australian','Declan')
addfirstname(1,'Australian','Charles')
addfirstname(1,'Australian','Jasper')
addfirstname(1,'Australian','Jonathan')
addfirstname(1,'Australian','Aaron')
addfirstname(1,'Australian','Hugo')
addfirstname(1,'Australian','David')
addfirstname(1,'Australian','Christopher')
addfirstname(1,'Australian','Chase')
addfirstname(1,'Australian','Owen')
addfirstname(1,'Australian','Justin')
addfirstname(1,'Australian','Ali')
addfirstname(1,'Australian','Darcy')
addfirstname(1,'Australian','Lincoln')
addfirstname(1,'Australian','Cody')
addfirstname(1,'Australian','Phoenix')
addfirstname(1,'Australian','Sam')
addfirstname(1,'Australian','John')
addfirstname(1,'Australian','Joel')
addfirstname(0,'Australian','Isabella')
addfirstname(0,'Australian','Ruby')
addfirstname(0,'Australian','Chloe')
addfirstname(0,'Australian','Olivia')
addfirstname(0,'Australian','Charlotte')
addfirstname(0,'Australian','Mia')
addfirstname(0,'Australian','Lily')
addfirstname(0,'Australian','Emily')
addfirstname(0,'Australian','Ella')
addfirstname(0,'Australian','Sienna')
addfirstname(0,'Australian','Sophie')
addfirstname(0,'Australian','Amelia')
addfirstname(0,'Australian','Grace')
addfirstname(0,'Australian','Ava')
addfirstname(0,'Australian','Zoe')
addfirstname(0,'Australian','Emma')
addfirstname(0,'Australian','Sophia')
addfirstname(0,'Australian','Matilda')
addfirstname(0,'Australian','Hannah')
addfirstname(0,'Australian','Jessica')
addfirstname(0,'Australian','Lucy')
addfirstname(0,'Australian','Georgia')
addfirstname(0,'Australian','Sarah')
addfirstname(0,'Australian','Abigail')
addfirstname(0,'Australian','Zara')
addfirstname(0,'Australian','Eva')
addfirstname(0,'Australian','Scarlett')
addfirstname(0,'Australian','Jasmine')
addfirstname(0,'Australian','Chelsea')
addfirstname(0,'Australian','Lilly')
addfirstname(0,'Australian','Ivy')
addfirstname(0,'Australian','Isla')
addfirstname(0,'Australian','Evie')
addfirstname(0,'Australian','Isabelle')
addfirstname(0,'Australian','Maddison')
addfirstname(0,'Australian','Layla')
addfirstname(0,'Australian','Summer')
addfirstname(0,'Australian','Annabelle')
addfirstname(0,'Australian','Alexis')
addfirstname(0,'Australian','Elizabeth')
addfirstname(0,'Australian','Bella')
addfirstname(0,'Australian','Holly')
addfirstname(0,'Australian','Lara')
addfirstname(0,'Australian','Madison')
addfirstname(0,'Australian','Alyssa')
addfirstname(0,'Australian','Maya')
addfirstname(0,'Australian','Tahlia')
addfirstname(0,'Australian','Claire')
addfirstname(0,'Australian','Hayley')
addfirstname(0,'Australian','Imogen')
addfirstname(0,'Australian','Jade')
addfirstname(0,'Australian','Ellie')
addfirstname(0,'Australian','Sofia')
addfirstname(0,'Australian','Addison')
addfirstname(0,'Australian','Molly')
addfirstname(0,'Australian','Phoebe')
addfirstname(0,'Australian','Alice')
addfirstname(0,'Australian','Savannah')
addfirstname(0,'Australian','Gabriella')
addfirstname(0,'Australian','Kayla')
addfirstname(0,'Australian','Mikayla')
addfirstname(0,'Australian','Abbey')
addfirstname(0,'Australian','Eliza')
addfirstname(0,'Australian','Willow')
addfirstname(0,'Australian','Alexandra')
addfirstname(0,'Australian','Poppy')
addfirstname(0,'Australian','Samantha')
addfirstname(0,'Australian','Stella')
addfirstname(0,'Australian','Amy')
addfirstname(0,'Australian','Amelie')
addfirstname(0,'Australian','Anna')
addfirstname(0,'Australian','Piper')
addfirstname(0,'Australian','Gemma')
addfirstname(0,'Australian','Isabel')
addfirstname(0,'Australian','Victoria')
addfirstname(0,'Australian','Stephanie')
addfirstname(0,'Australian','Caitlin')
addfirstname(0,'Australian','Heidi')
addfirstname(0,'Australian','Paige')
addfirstname(0,'Australian','Rose')
addfirstname(0,'Australian','Amber')
addfirstname(0,'Australian','Audrey')
addfirstname(0,'Australian','Claudia')
addfirstname(0,'Australian','Taylor')
addfirstname(0,'Australian','Madeline')
addfirstname(0,'Australian','Angelina')
addfirstname(0,'Australian','Natalie')
addfirstname(0,'Australian','Charli')
addfirstname(0,'Australian','Lauren')
addfirstname(0,'Australian','Ashley')
addfirstname(0,'Australian','Violet')
addfirstname(0,'Australian','Mackenzie')
addfirstname(0,'Australian','Abby')
addfirstname(0,'Australian','Skye')
addfirstname(0,'Australian','Lillian')
addfirstname(0,'Australian','Alana')
addfirstname(0,'Australian','Lola')
addfirstname(0,'Australian','Leah')
addfirstname(0,'Australian','Eve')
addfirstname(0,'Australian','Kiara')
addlastname('Australian','Smith')
addlastname('Australian','Jones')
addlastname('Australian','Williams')
addlastname('Australian','Brown')
addlastname('Australian','Wilson')
addlastname('Australian','Taylor')
addlastname('Australian','Johnson')
addlastname('Australian','White')
addlastname('Australian','Martin')
addlastname('Australian','Anderson')
addlastname('Australian','Thompson')
addlastname('Australian','Nguyen')
addlastname('Australian','Thomas')
addlastname('Australian','Walker')
addlastname('Australian','Harris')
addlastname('Australian','Lee')
addlastname('Australian','Ryan')
addlastname('Australian','Robinson')
addlastname('Australian','Kelly')
addlastname('Australian','King')
addfirstname(1,'SwissGerman','Luca')
addfirstname(1,'SwissGerman','Noah')
addfirstname(1,'SwissGerman','David')
addfirstname(1,'SwissGerman','Jan')
addfirstname(1,'SwissGerman','Simon')
addfirstname(1,'SwissGerman','Tim')
addfirstname(1,'SwissGerman','Nico')
addfirstname(1,'SwissGerman','Jonas')
addfirstname(1,'SwissGerman','Lukas')
addfirstname(1,'SwissGerman','Fabian')
addfirstname(1,'SwissGerman','Gian')
addfirstname(1,'SwissGerman','Robin')
addfirstname(1,'SwissGerman','Manuel')
addfirstname(1,'SwissGerman','Marco')
addfirstname(1,'SwissGerman','Silvan')
addfirstname(1,'SwissGerman','Fabio')
addfirstname(1,'SwissGerman','Florian')
addfirstname(1,'SwissGerman','Nicolas')
addfirstname(1,'SwissGerman','Dario')
addfirstname(1,'SwissGerman','Lars')
addfirstname(1,'SwissGerman','Pascal')
addfirstname(1,'SwissGerman','Samuel')
addfirstname(1,'SwissGerman','Michael')
addfirstname(1,'SwissGerman','Daniel')
addfirstname(1,'SwissGerman','Kevin')
addfirstname(1,'SwissGerman','Sven')
addfirstname(1,'SwissGerman','Tobias')
addfirstname(1,'SwissGerman','Elias')
addfirstname(1,'SwissGerman','Julian')
addfirstname(1,'SwissGerman','Janis')
addfirstname(1,'SwissGerman','Benjamin')
addfirstname(1,'SwissGerman','Nils')
addfirstname(1,'SwissGerman','Sandro')
addfirstname(1,'SwissGerman','Leandro')
addfirstname(1,'SwissGerman','Marc')
addfirstname(1,'SwissGerman','Loris')
addfirstname(1,'SwissGerman','Raphaël')
addfirstname(1,'SwissGerman','Timo')
addfirstname(1,'SwissGerman','Elia')
addfirstname(1,'SwissGerman','Alexander')
addfirstname(1,'SwissGerman','Levin')
addfirstname(1,'SwissGerman','Matteo')
addfirstname(1,'SwissGerman','Livio')
addfirstname(1,'SwissGerman','Silas')
addfirstname(0,'SwissGerman','Lea')
addfirstname(0,'SwissGerman','Laura')
addfirstname(0,'SwissGerman','Julia')
addfirstname(0,'SwissGerman','Sarah')
addfirstname(0,'SwissGerman','Chiara')
addfirstname(0,'SwissGerman','Lara')
addfirstname(0,'SwissGerman','Anna')
addfirstname(0,'SwissGerman','Michelle')
addfirstname(0,'SwissGerman','Vanessa')
addfirstname(0,'SwissGerman','Sara')
addfirstname(0,'SwissGerman','Leonie')
addfirstname(0,'SwissGerman','Nina')
addfirstname(0,'SwissGerman','Jasmin')
addfirstname(0,'SwissGerman','Alina')
addfirstname(0,'SwissGerman','Elena')
addfirstname(0,'SwissGerman','Lena')
addfirstname(0,'SwissGerman','Jana')
addfirstname(0,'SwissGerman','Céline')
addfirstname(0,'SwissGerman','Melanie')
addfirstname(0,'SwissGerman','Selina')
addfirstname(0,'SwissGerman','Jessica')
addfirstname(0,'SwissGerman','Lisa')
addfirstname(0,'SwissGerman','Livia')
addfirstname(0,'SwissGerman','Larissa')
addfirstname(0,'SwissGerman','Luana')
addfirstname(0,'SwissGerman','Fabienne')
addfirstname(0,'SwissGerman','Nadine')
addfirstname(0,'SwissGerman','Lorena')
addfirstname(0,'SwissGerman','Anja')
addfirstname(0,'SwissGerman','Noemi')
addfirstname(0,'SwissGerman','Alessia')
addfirstname(0,'SwissGerman','Rahel')
addfirstname(0,'SwissGerman','Sophie')
addfirstname(0,'SwissGerman','Samira')
addfirstname(0,'SwissGerman','Aline')
addfirstname(0,'SwissGerman','Zoé')
addfirstname(0,'SwissGerman','Mia')
addfirstname(0,'SwissGerman','Lia')
addfirstname(0,'SwissGerman','Sarina')
addfirstname(0,'SwissGerman','Salome')
addfirstname(0,'SwissGerman','Svenja')
addfirstname(0,'SwissGerman','Giulia')
addfirstname(0,'SwissGerman','Fiona')
addfirstname(0,'SwissGerman','Sophia')
addfirstname(0,'SwissGerman','Nora')
addfirstname(0,'SwissGerman','Lynn')
addfirstname(0,'SwissGerman','Lina')
addfirstname(0,'SwissGerman','Anina')
addfirstname(0,'SwissGerman','Angelina')
addlastname('SwissGerman','Müller')
addlastname('SwissGerman','Meier')
addlastname('SwissGerman','Schmid')
addlastname('SwissGerman','Keller')
addlastname('SwissGerman','Weber')
addlastname('SwissGerman','Huber')
addlastname('SwissGerman','Meyer')
addlastname('SwissGerman','Schneider')
addlastname('SwissGerman','Steiner')
addlastname('SwissGerman','Fischer')
addlastname('SwissGerman','Brunner')
addlastname('SwissGerman','Baumann')
addlastname('SwissGerman','Gerber')
addlastname('SwissGerman','Frei')
addlastname('SwissGerman','Moser')
addfirstname(1,'Canadian','Ethan')
addfirstname(1,'Canadian','Liam')
addfirstname(1,'Canadian','Lucas')
addfirstname(1,'Canadian','Mason')
addfirstname(1,'Canadian','Logan')
addfirstname(1,'Canadian','Noah')
addfirstname(1,'Canadian','Alexander')
addfirstname(1,'Canadian','Benjamin')
addfirstname(1,'Canadian','Jacob')
addfirstname(1,'Canadian','Jack')
addfirstname(1,'Canadian','Owen')
addfirstname(1,'Canadian','Ryan')
addfirstname(1,'Canadian','Daniel')
addfirstname(1,'Canadian','Oliver')
addfirstname(1,'Canadian','James')
addfirstname(1,'Canadian','Nathan')
addfirstname(1,'Canadian','Jayden')
addfirstname(1,'Canadian','Samuel')
addfirstname(1,'Canadian','Matthew')
addfirstname(1,'Canadian','William')
addfirstname(0,'Canadian','Olivia')
addfirstname(0,'Canadian','Emma')
addfirstname(0,'Canadian','Sophia')
addfirstname(0,'Canadian','Emily')
addfirstname(0,'Canadian','Ava')
addfirstname(0,'Canadian','Ella')
addfirstname(0,'Canadian','Chloe')
addfirstname(0,'Canadian','Isabella')
addfirstname(0,'Canadian','Avery')
addfirstname(0,'Canadian','Hannah')
addfirstname(0,'Canadian','Sophie')
addfirstname(0,'Canadian','Abigail')
addfirstname(0,'Canadian','Charlotte')
addfirstname(0,'Canadian','Lily')
addfirstname(0,'Canadian','Brooklyn')
addfirstname(0,'Canadian','Madison')
addfirstname(0,'Canadian','Isla')
addfirstname(0,'Canadian','Grace')
addfirstname(0,'Canadian','Maya')
addfirstname(0,'Canadian','Amelia')
addlastname('Canadian','Brown')
addlastname('Canadian','Lee')
addlastname('Canadian','Wilson')
addlastname('Canadian','Martin')
addlastname('Canadian','Patel')
addlastname('Canadian','Taylor')
addlastname('Canadian','Wong')
addlastname('Canadian','Campbell')
addlastname('Canadian','Williams')
addlastname('Canadian','Thompson')
addlastname('Canadian','Jones')
addfirstname(1,'FrenchCanadian','William')
addfirstname(1,'FrenchCanadian','Nathan')
addfirstname(1,'FrenchCanadian','Olivier')
addfirstname(1,'FrenchCanadian','Alexis')
addfirstname(1,'FrenchCanadian','Samuel')
addfirstname(1,'FrenchCanadian','Gabriel')
addfirstname(1,'FrenchCanadian','Thomas')
addfirstname(1,'FrenchCanadian','Jacob')
addfirstname(1,'FrenchCanadian','Felix')
addfirstname(1,'FrenchCanadian','Raphael')
addfirstname(1,'FrenchCanadian','Antoine')
addfirstname(1,'FrenchCanadian','Liam')
addfirstname(1,'FrenchCanadian','Noah')
addfirstname(1,'FrenchCanadian','Benjamin')
addfirstname(1,'FrenchCanadian','Xavier')
addfirstname(1,'FrenchCanadian','Emile')
addfirstname(1,'FrenchCanadian','Mathis')
addfirstname(1,'FrenchCanadian','Adam')
addfirstname(1,'FrenchCanadian','Justin')
addfirstname(1,'FrenchCanadian','Zachary')
addfirstname(1,'FrenchCanadian','Charles')
addfirstname(1,'FrenchCanadian','Lucas')
addfirstname(1,'FrenchCanadian','Alexandre')
addfirstname(1,'FrenchCanadian','Logan')
addfirstname(1,'FrenchCanadian','Loïc')
addfirstname(1,'FrenchCanadian','Édouard')
addfirstname(1,'FrenchCanadian','Zack')
addfirstname(1,'FrenchCanadian','Nicolas')
addfirstname(1,'FrenchCanadian','Theo')
addfirstname(1,'FrenchCanadian','Leo')
addfirstname(1,'FrenchCanadian','Anthony')
addfirstname(1,'FrenchCanadian','Etienne')
addfirstname(1,'FrenchCanadian','Victor')
addfirstname(1,'FrenchCanadian','Elliot')
addfirstname(1,'FrenchCanadian','Tristan')
addfirstname(1,'FrenchCanadian','Jeremy')
addfirstname(1,'FrenchCanadian','Vincent')
addfirstname(1,'FrenchCanadian','Jayden')
addfirstname(1,'FrenchCanadian','Louis')
addfirstname(1,'FrenchCanadian','Maxime')
addfirstname(1,'FrenchCanadian','Malik')
addfirstname(1,'FrenchCanadian','Ethan')
addfirstname(1,'FrenchCanadian','Philippe')
addfirstname(1,'FrenchCanadian','Arthur')
addfirstname(1,'FrenchCanadian','Simon')
addfirstname(1,'FrenchCanadian','James')
addfirstname(1,'FrenchCanadian','Zackary')
addfirstname(1,'FrenchCanadian','Arnaud')
addfirstname(1,'FrenchCanadian','Dylan')
addfirstname(1,'FrenchCanadian','David')
addfirstname(1,'FrenchCanadian','Isaac')
addfirstname(1,'FrenchCanadian','Laurent')
addfirstname(1,'FrenchCanadian','Alex')
addfirstname(1,'FrenchCanadian','Mathieu')
addfirstname(1,'FrenchCanadian','Julien')
addfirstname(1,'FrenchCanadian','Eliot')
addfirstname(1,'FrenchCanadian','Damien')
addfirstname(1,'FrenchCanadian','Ludovic')
addfirstname(1,'FrenchCanadian','Hugo')
addfirstname(1,'FrenchCanadian','Loik')
addfirstname(1,'FrenchCanadian','Eli')
addfirstname(1,'FrenchCanadian','Mathéo')
addfirstname(1,'FrenchCanadian','Derek')
addfirstname(1,'FrenchCanadian','Ryan')
addfirstname(1,'FrenchCanadian','Matteo')
addfirstname(1,'FrenchCanadian','Eloi')
addfirstname(1,'FrenchCanadian','Rafael')
addfirstname(1,'FrenchCanadian','Michael')
addfirstname(1,'FrenchCanadian','Mikael')
addfirstname(1,'FrenchCanadian','Henri')
addfirstname(1,'FrenchCanadian','Eliott')
addfirstname(1,'FrenchCanadian','Daniel')
addfirstname(1,'FrenchCanadian','Louka')
addfirstname(1,'FrenchCanadian','Rayan')
addfirstname(1,'FrenchCanadian','Caleb')
addfirstname(1,'FrenchCanadian','Joshua')
addfirstname(1,'FrenchCanadian','Cedric')
addfirstname(1,'FrenchCanadian','Tommy')
addfirstname(1,'FrenchCanadian','Evan')
addfirstname(1,'FrenchCanadian','Luca')
addfirstname(1,'FrenchCanadian','Nolan')
addfirstname(1,'FrenchCanadian','Guillaume')
addfirstname(1,'FrenchCanadian','Emrick')
addfirstname(1,'FrenchCanadian','Mathias')
addfirstname(1,'FrenchCanadian','Charles-Antoine')
addfirstname(1,'FrenchCanadian','Rémi')
addfirstname(1,'FrenchCanadian','Jordan')
addfirstname(1,'FrenchCanadian','Christopher')
addfirstname(1,'FrenchCanadian','Jules')
addfirstname(1,'FrenchCanadian','Mathys')
addfirstname(1,'FrenchCanadian','Nathaniel')
addfirstname(1,'FrenchCanadian','Milan')
addfirstname(1,'FrenchCanadian','Hayden')
addfirstname(1,'FrenchCanadian','Luka')
addfirstname(1,'FrenchCanadian','Matthew')
addfirstname(1,'FrenchCanadian','Maxim')
addfirstname(1,'FrenchCanadian','Jonathan')
addfirstname(1,'FrenchCanadian','Christophe')
addfirstname(1,'FrenchCanadian','Enzo')
addfirstname(1,'FrenchCanadian','Leonard')
addfirstname(0,'FrenchCanadian','Emma')
addfirstname(0,'FrenchCanadian','Lea')
addfirstname(0,'FrenchCanadian','Olivia')
addfirstname(0,'FrenchCanadian','Florence')
addfirstname(0,'FrenchCanadian','Alice')
addfirstname(0,'FrenchCanadian','Zoe')
addfirstname(0,'FrenchCanadian','Rosalie')
addfirstname(0,'FrenchCanadian','Juliette')
addfirstname(0,'FrenchCanadian','Camille')
addfirstname(0,'FrenchCanadian','Mia')
addfirstname(0,'FrenchCanadian','Laurence')
addfirstname(0,'FrenchCanadian','Charlie')
addfirstname(0,'FrenchCanadian','Jade')
addfirstname(0,'FrenchCanadian','Alicia')
addfirstname(0,'FrenchCanadian','Anaïs')
addfirstname(0,'FrenchCanadian','Victoria')
addfirstname(0,'FrenchCanadian','Maelie')
addfirstname(0,'FrenchCanadian','Beatrice')
addfirstname(0,'FrenchCanadian','Eva')
addfirstname(0,'FrenchCanadian','Chloe')
addfirstname(0,'FrenchCanadian','Maeva')
addfirstname(0,'FrenchCanadian','Noémie')
addfirstname(0,'FrenchCanadian','Sarah')
addfirstname(0,'FrenchCanadian','Charlotte')
addfirstname(0,'FrenchCanadian','Annabelle')
addfirstname(0,'FrenchCanadian','Gabrielle')
addfirstname(0,'FrenchCanadian','Sofia')
addfirstname(0,'FrenchCanadian','Coralie')
addfirstname(0,'FrenchCanadian','Elodie')
addfirstname(0,'FrenchCanadian','Eve')
addfirstname(0,'FrenchCanadian','Megane')
addfirstname(0,'FrenchCanadian','Emy')
addfirstname(0,'FrenchCanadian','Elizabeth')
addfirstname(0,'FrenchCanadian','Maika')
addfirstname(0,'FrenchCanadian','Rose')
addfirstname(0,'FrenchCanadian','Alexia')
addfirstname(0,'FrenchCanadian','Emilie')
addfirstname(0,'FrenchCanadian','Léonie')
addfirstname(0,'FrenchCanadian','Raphaëlle')
addfirstname(0,'FrenchCanadian','Aurelie')
addfirstname(0,'FrenchCanadian','Laurie')
addfirstname(0,'FrenchCanadian','Clara')
addfirstname(0,'FrenchCanadian','Jasmine')
addfirstname(0,'FrenchCanadian','Marianne')
addfirstname(0,'FrenchCanadian','Megan')
addfirstname(0,'FrenchCanadian','Amelia')
addfirstname(0,'FrenchCanadian','Delphine')
addfirstname(0,'FrenchCanadian','Melodie')
addfirstname(0,'FrenchCanadian','Sophia')
addfirstname(0,'FrenchCanadian','Maya')
addfirstname(0,'FrenchCanadian','Ariane')
addfirstname(0,'FrenchCanadian','Magalie')
addfirstname(0,'FrenchCanadian','Justine')
addfirstname(0,'FrenchCanadian','Julia')
addfirstname(0,'FrenchCanadian','Leanne')
addfirstname(0,'FrenchCanadian','Marilou')
addfirstname(0,'FrenchCanadian','Alycia')
addfirstname(0,'FrenchCanadian','Jeanne')
addfirstname(0,'FrenchCanadian','Mila')
addfirstname(0,'FrenchCanadian','Sophie')
addfirstname(0,'FrenchCanadian','Lily')
addfirstname(0,'FrenchCanadian','Océane')
addfirstname(0,'FrenchCanadian','Lily-Rose')
addfirstname(0,'FrenchCanadian','Adele')
addfirstname(0,'FrenchCanadian','Daphnée')
addfirstname(0,'FrenchCanadian','Eloise')
addfirstname(0,'FrenchCanadian','Sara')
addfirstname(0,'FrenchCanadian','Livia')
addfirstname(0,'FrenchCanadian','Eliane')
addfirstname(0,'FrenchCanadian','Romy')
addfirstname(0,'FrenchCanadian','Kayla')
addfirstname(0,'FrenchCanadian','Naomie')
addfirstname(0,'FrenchCanadian','Mathilde')
addfirstname(0,'FrenchCanadian','Maely')
addfirstname(0,'FrenchCanadian','Mya')
addfirstname(0,'FrenchCanadian','Emily')
addfirstname(0,'FrenchCanadian','Oceanne')
addfirstname(0,'FrenchCanadian','Yasmine')
addfirstname(0,'FrenchCanadian','Maude')
addfirstname(0,'FrenchCanadian','Lea-Rose')
addfirstname(0,'FrenchCanadian','Amelie')
addfirstname(0,'FrenchCanadian','Elisabeth')
addfirstname(0,'FrenchCanadian','Frèdèrique')
addfirstname(0,'FrenchCanadian','Julianne')
addfirstname(0,'FrenchCanadian','Alyssa')
addfirstname(0,'FrenchCanadian','Stella')
addfirstname(0,'FrenchCanadian','Alexandra')
addfirstname(0,'FrenchCanadian','Lina')
addfirstname(0,'FrenchCanadian','Flavie')
addfirstname(0,'FrenchCanadian','Anabelle')
addfirstname(0,'FrenchCanadian','Arielle')
addfirstname(0,'FrenchCanadian','Amy')
addfirstname(0,'FrenchCanadian','Ophélie')
addfirstname(0,'FrenchCanadian','Alyson')
addfirstname(0,'FrenchCanadian','Ellie')
addfirstname(0,'FrenchCanadian','Simone')
addfirstname(0,'FrenchCanadian','Maelle')
addfirstname(0,'FrenchCanadian','Lydia')
addfirstname(0,'FrenchCanadian','Melody')
addfirstname(0,'FrenchCanadian','Evelyne')
addlastname('FrenchCanadian','Tremblay')
addlastname('FrenchCanadian','Roy')
addlastname('FrenchCanadian','Gagnon')
addlastname('FrenchCanadian','Côté')
addlastname('FrenchCanadian','Bouchard')
addlastname('FrenchCanadian','Gauthier')
addlastname('FrenchCanadian','Morin')
addlastname('FrenchCanadian','Lavoie')
addlastname('FrenchCanadian','Fortin')
addlastname('FrenchCanadian','Gagné')
| cjbauer/idmas | names.py | Python | agpl-3.0 | 198,168 | [
"Amber",
"Brian",
"CRYSTAL",
"Dalton",
"Desmond",
"Jaguar"
] | b98cdcce74ce8e2c7d69a3ebe011dfaaa377ecb36d7a373d2499614cc3bb56f7 |
# -*- coding: utf-8 -*-
"""INI simulator with time-to-first-spike code and a dynamic threshold.
@author: rbodo
"""
import sys
from tensorflow import keras
import numpy as np
from snntoolbox.simulation.target_simulators.\
INI_temporal_mean_rate_target_sim import SNN as SNN_
remove_classifier = False
class SNN(SNN_):
"""
The compiled spiking neural network, using layers derived from
Keras base classes (see
`snntoolbox.simulation.backends.inisim.ttfs_dyn_thresh`).
Aims at simulating the network on a self-implemented Integrate-and-Fire
simulator using a timestepped approach.
"""
def __init__(self, config, queue=None):
SNN_.__init__(self, config, queue)
def simulate(self, **kwargs):
from snntoolbox.utils.utils import echo
from snntoolbox.simulation.utils import get_layer_synaptic_operations
input_b_l = kwargs[str('x_b_l')] * self._dt
output_b_l_t = np.zeros((self.batch_size, self.num_classes,
self._num_timesteps))
print("Current accuracy of batch:")
# Loop through simulation time.
self.avg_rate = 0
self._input_spikecount = 0
add_threshold_ops = False # Todo: Add option in config file.
spike_flags_b_l = None
if add_threshold_ops:
prospective_spikes = [
np.zeros(l.output_shape)[:self.batch_size] for l in
self.snn.layers if hasattr(l, 'spiketrain')
and l.spiketrain is not None]
else:
prospective_spikes = []
for sim_step_int in range(self._num_timesteps):
sim_step = (sim_step_int + 1) * self._dt
self.set_time(sim_step)
# Generate new input in case it changes with each simulation step.
if self._poisson_input:
input_b_l = self.get_poisson_frame_batch(kwargs[str('x_b_l')])
elif self._is_aedat_input:
input_b_l = kwargs[str('dvs_gen')].next_eventframe_batch()
new_input = np.concatenate([input_b_l, np.zeros_like(input_b_l)])
# Main step: Propagate input through network and record output
# spikes.
out_spikes = self.snn.predict_on_batch(new_input)[:self.batch_size]
# Add current spikes to previous spikes.
if remove_classifier: # Need to flatten output.
output_b_l_t[:, :, sim_step_int] = np.argmax(np.reshape(
out_spikes > 0, (out_spikes.shape[0], -1)), 1)
else:
output_b_l_t[:, :, sim_step_int] = out_spikes > 0
# Record neuron variables.
i = j = 0
for layer in self.snn.layers:
# Excludes Input, Flatten, Concatenate, etc:
if hasattr(layer, 'spiketrain') \
and layer.spiketrain is not None:
tmp = keras.backend.get_value(layer.spiketrain)
spiketrains_b_l = tmp[:self.batch_size]
if add_threshold_ops:
spike_flags_b_l = np.abs(tmp[self.batch_size:] -
prospective_spikes[i])
prospective_spikes[i] = tmp[self.batch_size:]
self.avg_rate += np.count_nonzero(spiketrains_b_l)
if self.spiketrains_n_b_l_t is not None:
self.spiketrains_n_b_l_t[i][0][
Ellipsis, sim_step_int] = spiketrains_b_l
if self.synaptic_operations_b_t is not None:
self.synaptic_operations_b_t[:, sim_step_int] += \
get_layer_synaptic_operations(spiketrains_b_l,
self.fanout[i + 1])
if add_threshold_ops:
self.synaptic_operations_b_t[:, sim_step_int] += \
get_layer_synaptic_operations(
spike_flags_b_l, self.fanout[i + 1])
if self.neuron_operations_b_t is not None:
self.neuron_operations_b_t[:, sim_step_int] += \
self.num_neurons_with_bias[i + 1]
i += 1
if hasattr(layer, 'mem') and self.mem_n_b_l_t is not None:
self.mem_n_b_l_t[j][0][Ellipsis, sim_step_int] = \
keras.backend.get_value(layer.mem)[self.batch_size:]
j += 1
if 'input_b_l_t' in self._log_keys:
self.input_b_l_t[Ellipsis, sim_step_int] = input_b_l
if self._poisson_input or self._is_aedat_input:
if self.synaptic_operations_b_t is not None:
self.synaptic_operations_b_t[:, sim_step_int] += \
get_layer_synaptic_operations(input_b_l,
self.fanout[0])
else:
if self.neuron_operations_b_t is not None:
if sim_step_int == 0:
self.neuron_operations_b_t[:, 0] += self.fanin[1] * \
self.num_neurons[1] * np.ones(self.batch_size) * 2
spike_sums_b_l = np.sum(output_b_l_t, 2)
undecided_b = np.sum(spike_sums_b_l, 1) == 0
guesses_b = np.argmax(spike_sums_b_l, 1)
none_class_b = -1 * np.ones(self.batch_size)
clean_guesses_b = np.where(undecided_b, none_class_b, guesses_b)
current_acc = np.mean(kwargs[str('truth_b')] == clean_guesses_b)
if self.config.getint('output', 'verbose') > 0:
if sim_step % 1 == 0:
echo('{:.2%}_'.format(current_acc))
else:
sys.stdout.write('\r{:>7.2%}'.format(current_acc))
sys.stdout.flush()
if self.config.getboolean('conversion', 'softmax_to_relu') and \
all(np.count_nonzero(output_b_l_t, (1, 2)) >= self.top_k):
print("Finished early.")
break
self.avg_rate /= self.batch_size * np.sum(self.num_neurons) * \
self._num_timesteps
if self.spiketrains_n_b_l_t is None:
print("Average spike rate: {} spikes per simulation time step."
"".format(self.avg_rate))
return np.cumsum(output_b_l_t, 2)
def load(self, path, filename):
SNN_.load(self, path, filename)
| NeuromorphicProcessorProject/snn_toolbox | snntoolbox/simulation/target_simulators/INI_ttfs_dyn_thresh_target_sim.py | Python | mit | 6,562 | [
"NEURON"
] | dc9a6a527dfce3cb22d6b09cc2332ead5ff361d3fab2f1317bd90cc16ade2f3c |
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-delete
# Author : Stuart Paterson
########################################################################
"""
Peek StdOut of the given DIRAC job
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
errorList = []
for job in args:
result = dirac.peek( job, printout=True )
if not result['OK']:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
| avedaee/DIRAC | Interfaces/scripts/dirac-wms-job-peek.py | Python | gpl-3.0 | 1,156 | [
"DIRAC"
] | 8f89846ad1cf0fbfcc853267f954f5056711a0746b720ff87a106c8825035513 |
#!/usr/bin/python
'''
Created on Oct 23, 2013
@author: drnorris
'''
from Star import *
import random
import math
class Galaxy():
def __init__(self, position, velocity, centerMass):
self.position = position
self.velocity = velocity
self.centerMass = centerMass
self.stars = []
massDistributions = []
random.seed(a=0)
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
def getStat(self, what):
if what == "static":
return (self.__class__.__name__,self.position, self.centerMass)
if what == "dynamic":
return (self.__class__.__name__,self.position, self.centerMass, self.velocity)
def getDiameter():
largest = 0
for star in range(len(self.stars))
for pos in self.stars[star].getPos():
if pos > largest:
largest = pos
def createStars(self, count):
for c in range(count):
xpos = random.gauss(1, 1)
ypos = random.gauss(1, 1)
self.stars.append(Star(c, (xpos, ypos), (0, 0), 1))
def getStarStats(self, what):
list = []
for star in range(len(self.stars)):
list.append(self.stars[star].getStat(what))
return list
def calcMassDist(self, dtype, granularity):
extent = math.ceil(getDiameter())
if dtype == "radial":
pass
if dtype == "grid":
gap = granularity/2
for xloc in seq(-extent, extent, granularity):
for yloc in seq(-extent, extent, granularity):
for star in
#[x * 0.1 for x in range(0, 10)]
| a8ksh4/solar | Rev-1/Galaxy.py | Python | gpl-3.0 | 1,805 | [
"Galaxy"
] | 9636e91398d231b4e01cd4cd333223d0b8327f3ccedaef1929c8e84467348eba |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2011 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for Gramps.
"""
from ._disconnected import Disconnected
from ._everyone import Everyone
from ._familywithincompleteevent import FamilyWithIncompleteEvent
from ._hasaddress import HasAddress
from ._hasaddresstext import HasAddressText
from ._hasalternatename import HasAlternateName
from ._hasassociation import HasAssociation
from ._hasattribute import HasAttribute
from ._hasbirth import HasBirth
from ._hascitation import HasCitation
from ._hascommonancestorwith import HasCommonAncestorWith
from ._hascommonancestorwithfiltermatch import HasCommonAncestorWithFilterMatch
from ._hasdeath import HasDeath
from ._hasevent import HasEvent
from ._hasfamilyattribute import HasFamilyAttribute
from ._hasfamilyevent import HasFamilyEvent
from ._hasgallery import HavePhotos
from ._hasidof import HasIdOf
from ._haslds import HasLDS
from ._hasnameof import HasNameOf
from ._hasnameorigintype import HasNameOriginType
from ._hasnametype import HasNameType
from ._hasnickname import HasNickname
from ._hasnote import HasNote
from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf
from ._hasnoteregexp import HasNoteRegexp
from ._hasrelationship import HasRelationship
from ._hassourcecount import HasSourceCount
from ._hassourceof import HasSourceOf
from ._hastag import HasTag
from ._hastextmatchingregexpof import HasTextMatchingRegexpOf
from ._hastextmatchingsubstringof import HasTextMatchingSubstringOf
from ._hasunknowngender import HasUnknownGender
from ._havealtfamilies import HaveAltFamilies
from ._havechildren import HaveChildren
from ._incompletenames import IncompleteNames
from ._isancestorof import IsAncestorOf
from ._isancestoroffiltermatch import IsAncestorOfFilterMatch
from ._isbookmarked import IsBookmarked
from ._ischildoffiltermatch import IsChildOfFilterMatch
from ._isdefaultperson import IsDefaultPerson
from ._isdescendantfamilyof import IsDescendantFamilyOf
from ._isdescendantfamilyoffiltermatch import IsDescendantFamilyOfFilterMatch
from ._isdescendantof import IsDescendantOf
from ._isdescendantoffiltermatch import IsDescendantOfFilterMatch
from ._isduplicatedancestorof import IsDuplicatedAncestorOf
from ._isfemale import IsFemale
from ._islessthannthgenerationancestorof import \
IsLessThanNthGenerationAncestorOf
from ._islessthannthgenerationancestorofbookmarked import \
IsLessThanNthGenerationAncestorOfBookmarked
from ._islessthannthgenerationancestorofdefaultperson import \
IsLessThanNthGenerationAncestorOfDefaultPerson
from ._islessthannthgenerationdescendantof import \
IsLessThanNthGenerationDescendantOf
from ._ismale import IsMale
from ._ismorethannthgenerationancestorof import \
IsMoreThanNthGenerationAncestorOf
from ._ismorethannthgenerationdescendantof import \
IsMoreThanNthGenerationDescendantOf
from ._isparentoffiltermatch import IsParentOfFilterMatch
from ._issiblingoffiltermatch import IsSiblingOfFilterMatch
from ._isspouseoffiltermatch import IsSpouseOfFilterMatch
from ._iswitness import IsWitness
from ._matchesfilter import MatchesFilter
from ._matcheseventfilter import MatchesEventFilter
from ._matchessourceconfidence import MatchesSourceConfidence
from ._missingparent import MissingParent
from ._multiplemarriages import MultipleMarriages
from ._nevermarried import NeverMarried
from ._nobirthdate import NoBirthdate
from ._nodeathdate import NoDeathdate
from ._peopleprivate import PeoplePrivate
from ._peoplepublic import PeoplePublic
from ._personwithincompleteevent import PersonWithIncompleteEvent
from ._probablyalive import ProbablyAlive
from ._relationshippathbetween import RelationshipPathBetween
from ._deeprelationshippathbetween import DeepRelationshipPathBetween
from ._relationshippathbetweenbookmarks import RelationshipPathBetweenBookmarks
from ._searchname import SearchName
from ._regexpname import RegExpName
from ._matchidof import MatchIdOf
from ._regexpidof import RegExpIdOf
from ._changedsince import ChangedSince
from ._isrelatedwith import IsRelatedWith
from ._hassoundexname import HasSoundexName
#-------------------------------------------------------------------------
#
# This is used by Custom Filter Editor tool
#
#-------------------------------------------------------------------------
editor_rule_list = [
Everyone,
IsFemale,
HasUnknownGender,
IsMale,
IsDefaultPerson,
IsBookmarked,
HasAlternateName,
HasAddress,
HasAddressText,
HasAssociation,
HasIdOf,
HasLDS,
HasNameOf,
HasNameOriginType,
HasNameType,
HasNickname,
HasRelationship,
HasDeath,
HasBirth,
HasCitation,
HasEvent,
HasFamilyEvent,
HasAttribute,
HasFamilyAttribute,
HasTag,
HasSourceCount,
HasSourceOf,
HaveAltFamilies,
HavePhotos,
HaveChildren,
IncompleteNames,
NeverMarried,
MultipleMarriages,
NoBirthdate,
NoDeathdate,
PersonWithIncompleteEvent,
FamilyWithIncompleteEvent,
ProbablyAlive,
PeoplePrivate,
PeoplePublic,
IsWitness,
IsDescendantOf,
IsDescendantFamilyOf,
IsDescendantFamilyOfFilterMatch,
IsLessThanNthGenerationAncestorOfDefaultPerson,
IsDescendantOfFilterMatch,
IsDuplicatedAncestorOf,
IsLessThanNthGenerationDescendantOf,
IsMoreThanNthGenerationDescendantOf,
IsAncestorOf,
IsAncestorOfFilterMatch,
IsLessThanNthGenerationAncestorOf,
IsLessThanNthGenerationAncestorOfBookmarked,
IsMoreThanNthGenerationAncestorOf,
HasCommonAncestorWith,
HasCommonAncestorWithFilterMatch,
MatchesFilter,
MatchesEventFilter,
MatchesSourceConfidence,
MissingParent,
IsChildOfFilterMatch,
IsParentOfFilterMatch,
IsSpouseOfFilterMatch,
IsSiblingOfFilterMatch,
RelationshipPathBetween,
DeepRelationshipPathBetween,
RelationshipPathBetweenBookmarks,
HasTextMatchingSubstringOf,
HasNote,
HasNoteRegexp,
RegExpIdOf,
RegExpName,
Disconnected,
ChangedSince,
IsRelatedWith,
HasSoundexName,
]
| SNoiraud/gramps | gramps/gen/filters/rules/person/__init__.py | Python | gpl-2.0 | 6,985 | [
"Brian"
] | 1ba4ad9c395f84bb01fa93f3e54774c1ab5aca38b947334795cce69cecf7d4be |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Proxy class for the GRAMPS databases. Filter out all living people.
"""
#-------------------------------------------------------------------------
#
# Python libraries
#
#-------------------------------------------------------------------------
from itertools import ifilter
#-------------------------------------------------------------------------
#
# GRAMPS libraries
#
#-------------------------------------------------------------------------
from proxybase import ProxyDbBase
from ..lib import Date, Person, Name, Surname, NameOriginType
from ..utils.alive import probably_alive
from ..config import config
#-------------------------------------------------------------------------
#
# LivingProxyDb
#
#-------------------------------------------------------------------------
class LivingProxyDb(ProxyDbBase):
"""
A proxy to a Gramps database. This proxy will act like a Gramps database,
but all living people will be hidden from the user.
"""
MODE_EXCLUDE_ALL = 0
MODE_INCLUDE_LAST_NAME_ONLY = 1
MODE_INCLUDE_FULL_NAME_ONLY = 2
def __init__(self, dbase, mode, current_year=None, years_after_death=0):
"""
Create a new LivingProxyDb instance.
@param dbase: The database to be a proxy for
@type dbase: DbBase
@param mode: The method for handling living people.
LivingProxyDb.MODE_EXCLUDE_ALL will remove living people altogether.
LivingProxyDb.MODE_INCLUDE_LAST_NAME_ONLY will remove all information
and change their given name to "[Living]" or what has been set in
Preferences -> Text.
LivingProxyDb.MODE_INCLUDE_FULL_NAME_ONLY will remove all information
but leave the entire name intact.
@type mode: int
@param current_year: The current year to use for living determination.
If None is supplied, the current year will be found from the system.
@type current_year: int or None
@param years_after_death: The number of years after a person's death to
still consider them living.
@type years_after_death: int
"""
ProxyDbBase.__init__(self, dbase)
self.mode = mode
if current_year is not None:
self.current_date = Date()
self.current_date.set_year(current_year)
else:
self.current_date = None
self.years_after_death = years_after_death
def get_person_from_handle(self, handle):
"""
Finds a Person in the database from the passed gramps ID.
If no such Person exists, None is returned.
"""
person = self.db.get_person_from_handle(handle)
if person and self.__is_living(person):
if self.mode == self.MODE_EXCLUDE_ALL:
person = None
else:
person = self.__restrict_person(person)
return person
def get_family_from_handle(self, handle):
"""
Finds a Family in the database from the passed handle.
If no such Family exists, None is returned.
"""
family = self.db.get_family_from_handle(handle)
family = self.__remove_living_from_family(family)
return family
def iter_people(self):
"""
Protected version of iter_people
"""
for person in ifilter(None, self.db.iter_people()):
if not(self.__is_living(person) and
self.mode == self.MODE_EXCLUDE_ALL):
yield self.__restrict_person(person)
else:
yield person
def get_person_from_gramps_id(self, val):
"""
Finds a Person in the database from the passed GRAMPS ID.
If no such Person exists, None is returned.
"""
person = self.db.get_person_from_gramps_id(val)
if person and self.__is_living(person):
if self.mode == self.MODE_EXCLUDE_ALL:
return None
else:
return self.__restrict_person(person)
else:
return person
def get_family_from_gramps_id(self, val):
"""
Finds a Family in the database from the passed GRAMPS ID.
If no such Family exists, None is returned.
"""
family = self.db.get_family_from_gramps_id(val)
family = self.__remove_living_from_family(family)
return family
def include_person(self, handle):
if self.mode == self.MODE_EXCLUDE_ALL:
person = self.get_unfiltered_person(handle)
if person and self.__is_living(person):
return False
return True
def get_default_person(self):
"""returns the default Person of the database"""
person_handle = self.db.get_default_handle()
return self.get_person_from_handle(person_handle)
def get_default_handle(self):
"""returns the default Person of the database"""
person_handle = self.db.get_default_handle()
if self.get_person_from_handle(person_handle):
return person_handle
return None
def has_person_handle(self, handle):
"""
returns True if the handle exists in the current Person database.
"""
if self.get_person_from_handle(handle):
return True
return False
def find_backlink_handles(self, handle, include_classes=None):
"""
Find all objects that hold a reference to the object handle.
Returns an iterator over a list of (class_name, handle) tuples.
@param handle: handle of the object to search for.
@type handle: database handle
@param include_classes: list of class names to include in the results.
Default: None means include all classes.
@type include_classes: list of class names
This default implementation does a sequential scan through all
the primary object databases and is very slow. Backends can
override this method to provide much faster implementations that
make use of additional capabilities of the backend.
Note that this is a generator function, it returns a iterator for
use in loops. If you want a list of the results use:
> result_list = list(find_backlink_handles(handle))
"""
handle_itr = self.db.find_backlink_handles(handle, include_classes)
for (class_name, handle) in handle_itr:
if class_name == 'Person':
if not self.get_person_from_handle(handle):
continue
yield (class_name, handle)
return
def __is_living(self, person):
"""
Check if a person is considered living.
Returns True if the person is considered living.
Returns False if the person is not considered living.
"""
person_handle = person.get_handle()
unfil_person = self.get_unfiltered_person(person_handle)
return probably_alive( unfil_person,
self.db,
self.current_date,
self.years_after_death )
def __remove_living_from_family(self, family):
"""
Remove information from a family that pertains to living people.
Returns a family instance with information about living people removed.
Returns None if family is None.
"""
if family is None:
return None
parent_is_living = False
father_handle = family.get_father_handle()
if father_handle:
father = self.db.get_person_from_handle(father_handle)
if father and self.__is_living(father):
parent_is_living = True
if self.mode == self.MODE_EXCLUDE_ALL:
family.set_father_handle(None)
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.db.get_person_from_handle(mother_handle)
if mother and self.__is_living(mother):
parent_is_living = True
if self.mode == self.MODE_EXCLUDE_ALL:
family.set_mother_handle(None)
if parent_is_living:
# Clear all events for families where a parent is living.
family.set_event_ref_list([])
if self.mode == self.MODE_EXCLUDE_ALL:
for child_ref in family.get_child_ref_list():
child_handle = child_ref.get_reference_handle()
child = self.db.get_person_from_handle(child_handle)
if child and self.__is_living(child):
family.remove_child_ref(child_ref)
return family
def __restrict_person(self, person):
"""
Remove information from a person and replace the first name with
"[Living]" or what has been set in Preferences -> Text.
"""
new_person = Person()
new_name = Name()
old_name = person.get_primary_name()
new_name.set_group_as(old_name.get_group_as())
new_name.set_sort_as(old_name.get_sort_as())
new_name.set_display_as(old_name.get_display_as())
new_name.set_type(old_name.get_type())
if self.mode == self.MODE_INCLUDE_LAST_NAME_ONLY:
new_name.set_first_name(config.get('preferences.private-given-text'))
new_name.set_title("")
else: # self.mode == self.MODE_INCLUDE_FULL_NAME_ONLY
new_name.set_first_name(old_name.get_first_name())
new_name.set_suffix(old_name.get_suffix())
new_name.set_title(old_name.get_title())
surnlst = []
for surn in old_name.get_surname_list():
surname = Surname(source=surn)
if int(surname.origintype) in [NameOriginType.PATRONYMIC,
NameOriginType.MATRONYMIC]:
surname.set_surname(config.get('preferences.private-surname-text'))
surnlst.append(surname)
new_name.set_surname_list(surnlst)
new_person.set_primary_name(new_name)
new_person.set_privacy(person.get_privacy())
new_person.set_gender(person.get_gender())
new_person.set_gramps_id(person.get_gramps_id())
new_person.set_handle(person.get_handle())
new_person.set_change_time(person.get_change_time())
new_person.set_family_handle_list(person.get_family_handle_list())
new_person.set_parent_family_handle_list(
person.get_parent_family_handle_list() )
new_person.set_tag_list(person.get_tag_list())
return new_person
| arunkgupta/gramps | gramps/gen/proxy/living.py | Python | gpl-2.0 | 11,584 | [
"Brian"
] | 5f3ee601f0261c36f6c3ccec744a9c7bde5045a2a4d63c1646d272ed121c8958 |
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MVN with covariance parameterized by a LinearOperatorLowRankUpdate."""
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
# Not part of public API since we're unsure if the base distribution should be
# this, or a more general MVNCovariance.
__all__ = []
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
class MultivariateNormalLowRankUpdateLinearOperatorCovariance(
distribution.AutoCompositeTensorDistribution):
"""The multivariate normal distribution on `R^k`.
This Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (the mean) and a (batch of) `k x k`
`covariance` matrix. The covariance matrix for this particular Normal is given
as a linear operator, `LinearOperatorLowRankUpdate`.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, covariance) = exp(-0.5 y^T @ inv(covariance) @ y) / Z,
y := x - loc
Z := (2 pi)**(0.5 k) |det(covariance)|**0.5,
```
where `^T` denotes matrix transpose and `@` matrix multiplication
The MultivariateNormal distribution can also be parameterized as a
[location-scale family](https://en.wikipedia.org/wiki/Location-scale_family),
i.e., it can be constructed using a matrix `scale` such that
`covariance = scale @ scale^T`, and then
```none
X ~ MultivariateNormal(loc=0, scale=I) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
tfd = tfp.distributions
# Initialize a single 2-variate Gaussian.
# The covariance is a rank 1 update of a diagonal matrix.
loc = [1., 2.]
diag = [1., 1.]
u = tf.ones((2, 1)) * np.sqrt(2) # Unit vector
cov_operator = tf.linalg.LinearOperatorLowRankUpdate(
base_operator=tf.linalg.LinearOperatorDiag(diag,
is_positive_definite=True),
u=u,
)
mvn = MultivariateNormalLowRankUpdateLinearOperatorCovariance(
loc=loc, cov_operator=cov_operator)
# Covariance agrees with the cov_operator.
mvn.covariance()
# ==> [[ 2., 1.],
# [ 1., 2.]]
# Compute the pdf of an`R^2` observation; return a scalar.
mvn.prob([-1., 0]) # shape: []
# Initialize a 2-batch of 2-variate Gaussians.
mu = [[1., 2],
[11, 22]] # shape: [2, 2]
diag = [[1., 2],
[0.5, 1]] # shape: [2, 2]
u = tf.ones((2, 1)) * np.sqrt(2) # Unit vector, will broadcast over batches!
cov_operator = tf.linalg.LinearOperatorLowRankUpdate(
base_operator=tf.linalg.LinearOperatorDiag(diag,
is_positive_definite=True),
u=u,
)
mvn = MultivariateNormalLowRankUpdateLinearOperatorCovariance(
loc=loc, cov_operator=cov_operator)
# Compute the pdf of two `R^2` observations; return a length-2 vector.
x = [[-0.9, 0],
[-10, 0]] # shape: [2, 2]
mvn.prob(x) # shape: [2]
```
"""
def __init__(self,
loc=None,
cov_operator=None,
validate_args=False,
allow_nan_stats=True,
name='MultivariateNormalLowRankUpdateLinearOperatorCovariance'):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and
`cov_operator` arguments.
The `event_shape` is given by last dimension of the matrix implied by
`cov_operator`. The last dimension of `loc` (if provided) must
broadcast with this.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
cov_operator: Instance of `LinearOperatorLowRankUpdate` with same
`dtype` as `loc` and shape `[B1, ..., Bb, k, k]`. Must have structure
`A + UU^T` or `A + UDU^T`, where `A` and `D` (if provided) are
self-adjoint and positive definite.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `cov_operator` is unspecified.
ValueError: if `cov_operator` does not specify the self-adjoint
positive definite conditions explained above.
TypeError: if not `cov_operator.dtype.is_floating`
"""
parameters = dict(locals())
if cov_operator is None:
raise ValueError('Missing required `cov_operator` parameter.')
if not dtype_util.is_floating(cov_operator.dtype):
raise TypeError(
'`cov_operator` parameter must have floating-point dtype.')
if not isinstance(cov_operator,
tf.linalg.LinearOperatorLowRankUpdate):
raise TypeError(
'`cov_operator` must be a LinearOperatorLowRankUpdate. '
'Found {}'.format(type(cov_operator)))
if cov_operator.u is not cov_operator.v:
raise ValueError('The `U` and `V` (typically low rank) matrices of '
'`cov_operator` must be the same, but were not.')
# For cov_operator, raise if the user explicitly set these to False,
# or if False was inferred by the LinearOperator. The default value is None,
# which will not trigger these raises.
# pylint: disable=g-bool-id-comparison
if cov_operator.is_self_adjoint is False:
raise ValueError('`cov_operator` must be self-adjoint.')
if cov_operator.is_positive_definite is False:
raise ValueError('`cov_operator` must be positive definite.')
# pylint: enable=g-bool-id-comparison
# For the base_operator, we require the user to explicity set
# is_self_adjoint and is_positive_definite.
if not cov_operator.base_operator.is_self_adjoint:
raise ValueError(
'The `base_operator` of `cov_operator` must be self-adjoint. '
'You may have to set the `is_self_adjoint` initialization hint.')
if not cov_operator.base_operator.is_positive_definite:
raise ValueError(
'The `base_operator` of `cov_operator` must be positive '
'definite. You may have to set the `is_positive_definite` '
'initialization hint.')
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, cov_operator],
dtype_hint=tf.float32)
if loc is not None:
loc = tensor_util.convert_nonref_to_tensor(loc, dtype=dtype, name='loc')
# Get dynamic shapes (for self.*shape_tensor methods).
# shapes_from_loc_and_scale tries to return TensorShapes, but may return
# tensors. So we can only use it for the *shape_tensor methods.
# It is useful though, since it does lots of shape checks, and is a
# well-tested function.
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, cov_operator)
self._batch_shape_tensor_value = ps.convert_to_shape_tensor(
batch_shape, name='batch_shape')
self._event_shape_tensor_value = ps.convert_to_shape_tensor(
event_shape, name='event_shape')
# Get static shapes (for self.*shape methods).
self._batch_shape_value = cov_operator.batch_shape
if loc is not None:
self._batch_shape_value = tf.broadcast_static_shape(
self._batch_shape_value, loc.shape[:-1])
self._event_shape_value = cov_operator.shape[-1:]
if loc is not None:
self._event_shape_value = tf.broadcast_static_shape(
self._event_shape_value, loc.shape[-1:])
self._loc = loc
self._cov_operator = cov_operator
super(MultivariateNormalLowRankUpdateLinearOperatorCovariance,
self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The location arg for this distribution."""
return self._loc
@property
def cov_operator(self):
"""The linear operator providing the covariance of this distribution."""
return self._cov_operator
def _batch_shape_tensor(self, loc=None, scale=None):
return self._batch_shape_tensor_value
def _batch_shape(self):
return self._batch_shape_value
def _event_shape_tensor(self):
return self._event_shape_tensor_value
def _event_shape(self):
return self._event_shape_value
def _batch_plus_event_shape(self):
"""Prefer static version of self.batch_shape + self.event_shape."""
return ps.concat([
self._batch_shape_tensor(),
self._event_shape_tensor(),
],
axis=0)
def _entropy(self):
d = tf.cast(self._event_shape_tensor()[-1], self.dtype)
const = (d / 2.) * tf.cast(np.log(2. * np.pi * np.exp(1.)),
dtype=self.dtype)
entropy_value = const + 0.5 * self.cov_operator.log_abs_determinant()
return tf.broadcast_to(entropy_value, self._batch_shape_tensor())
def _sample_n(self, n, seed=None):
seed_1, seed_2 = samplers.split_seed(seed, n=2)
cov = self.cov_operator
# Convert, in case cov.u is a ref
u = tf.convert_to_tensor(cov.u, name='u')
full_shape = ps.concat(
[[n], self._batch_shape_tensor(),
self._event_shape_tensor()], axis=0)
low_rank_shape = ps.concat(
[[n], self._batch_shape_tensor(),
ps.shape(u)[-1:]], axis=0)
w1 = samplers.normal(
shape=full_shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed_1)
w2 = samplers.normal(
shape=low_rank_shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed_2)
# Important: cov.diag_operator is the diagonal part of the perturbation.
# More details: cov takes one of two forms,
# = B + U D U^T,
# with B = cov.base_operator, U = cov.u, D = cov.diag_operator
# or,
# = B + U U^T
# B^{1/2} @ w1
base_matvec_w1 = cov.base_operator.cholesky().matvec(w1)
# U @ D^{1/2} @ w2
d_one_half = cov.diag_operator.cholesky()
low_rank_matvec_w2 = tf.linalg.matvec(u, d_one_half.matvec(w2))
samples = base_matvec_w1 + low_rank_matvec_w2
if self.loc is not None:
samples += self.loc
return samples
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
loc=parameter_properties.ParameterProperties(event_ndims=1),
cov_operator=parameter_properties.BatchedComponentProperties())
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
# This is how any MVN log prob could be written, assuming we had the
# covariance as a linear operator.
if self.loc is not None:
x = x - self.loc
quad_form = tf.reduce_sum(x * self.cov_operator.solvevec(x), axis=-1)
d = tf.cast(self._event_shape_tensor()[-1], self.dtype)
log_normalizer = (
(d / 2.) * np.log(2. * np.pi) +
(1. / 2.) * self.cov_operator.log_abs_determinant())
return -(1. / 2.) * quad_form - log_normalizer
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return tf.math.exp(self.log_prob(x))
def _mean(self):
shape = self._batch_plus_event_shape()
if self.loc is None:
return tf.zeros(shape, self.dtype)
return tf.broadcast_to(self.loc, shape)
def _covariance(self):
cov = self.cov_operator.to_dense()
if self.loc is not None:
batch_plus_event_shape = self._batch_plus_event_shape()
shape = ps.concat([
batch_plus_event_shape,
batch_plus_event_shape[-1:],
],
axis=0)
cov = tf.broadcast_to(cov, shape)
return cov
def _variance(self):
variance = self.cov_operator.diag_part()
if self.loc is not None:
variance = tf.broadcast_to(variance, self._batch_plus_event_shape())
return variance
def _mode(self):
return self._mean()
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
if is_init != any(
tensor_util.is_ref(v) for v in self.cov_operator.variables):
return [
self.cov_operator.assert_self_adjoint(),
self.cov_operator.assert_positive_definite(),
]
return []
_composite_tensor_nonshape_params = ('loc', 'cov_operator')
| tensorflow/probability | tensorflow_probability/python/distributions/mvn_low_rank_update_linear_operator_covariance.py | Python | apache-2.0 | 14,579 | [
"Gaussian"
] | 5ce89038c585e3b677235633be81ab910109c23a23c536e547a5ccb54f8e72f5 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| espressopp/espressopp | src/main/__init__.py | Python | gpl-3.0 | 860 | [
"ESPResSo"
] | 66778dbba81c5f7280a103200a443c4f93c071147f18d2b2a24f8d31aca86cc2 |
# Copyright (c) 2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Four Panel Map
===============
By reading model output data from a netCDF file, we can create a four panel plot showing:
* 300 hPa heights and winds
* 500 hPa heights and absolute vorticity
* Surface temperatures
* Precipitable water
"""
###########################################
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage as ndimage
import xarray as xr
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo
###########################################
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
###########################################
# Function used to create the map subplots
def plot_background(ax):
ax.set_extent([235., 290., 20., 55.])
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.5)
ax.add_feature(cfeature.STATES, linewidth=0.5)
ax.add_feature(cfeature.BORDERS, linewidth=0.5)
return ax
###########################################
# Open the example netCDF data
ds = xr.open_dataset(get_test_data('gfs_output.nc', False))
print(ds)
###########################################
# Combine 1D latitude and longitudes into a 2D grid of locations
lon_2d, lat_2d = np.meshgrid(ds['lon'], ds['lat'])
###########################################
# Pull out the data
vort_500 = ds['vort_500'][0]
surface_temp = ds['temp'][0]
precip_water = ds['precip_water'][0]
winds_300 = ds['winds_300'][0]
###########################################
# Do unit conversions to what we wish to plot
vort_500 = vort_500 * 1e5
surface_temp = surface_temp.metpy.convert_units('degF')
precip_water = precip_water.metpy.convert_units('inches')
winds_300 = winds_300.metpy.convert_units('knots')
###########################################
# Smooth the height data
heights_300 = ndimage.gaussian_filter(ds['heights_300'][0], sigma=1.5, order=0)
heights_500 = ndimage.gaussian_filter(ds['heights_500'][0], sigma=1.5, order=0)
###########################################
# Create the figure and plot background on different axes
fig, axarr = plt.subplots(nrows=2, ncols=2, figsize=(20, 13), constrained_layout=True,
subplot_kw={'projection': crs})
add_metpy_logo(fig, 140, 120, size='large')
axlist = axarr.flatten()
for ax in axlist:
plot_background(ax)
# Upper left plot - 300-hPa winds and geopotential heights
cf1 = axlist[0].contourf(lon_2d, lat_2d, winds_300, cmap='cool', transform=ccrs.PlateCarree())
c1 = axlist[0].contour(lon_2d, lat_2d, heights_300, colors='black', linewidths=2,
transform=ccrs.PlateCarree())
axlist[0].clabel(c1, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
axlist[0].set_title('300-hPa Wind Speeds and Heights', fontsize=16)
cb1 = fig.colorbar(cf1, ax=axlist[0], orientation='horizontal', shrink=0.74, pad=0)
cb1.set_label('knots', size='x-large')
# Upper right plot - 500mb absolute vorticity and geopotential heights
cf2 = axlist[1].contourf(lon_2d, lat_2d, vort_500, cmap='BrBG', transform=ccrs.PlateCarree(),
zorder=0, norm=plt.Normalize(-32, 32))
c2 = axlist[1].contour(lon_2d, lat_2d, heights_500, colors='k', linewidths=2,
transform=ccrs.PlateCarree())
axlist[1].clabel(c2, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
axlist[1].set_title('500-hPa Absolute Vorticity and Heights', fontsize=16)
cb2 = fig.colorbar(cf2, ax=axlist[1], orientation='horizontal', shrink=0.74, pad=0)
cb2.set_label(r'$10^{-5}$ s$^{-1}$', size='x-large')
# Lower left plot - surface temperatures
cf3 = axlist[2].contourf(lon_2d, lat_2d, surface_temp, cmap='YlOrRd',
transform=ccrs.PlateCarree(), zorder=0)
axlist[2].set_title('Surface Temperatures', fontsize=16)
cb3 = fig.colorbar(cf3, ax=axlist[2], orientation='horizontal', shrink=0.74, pad=0)
cb3.set_label('\N{DEGREE FAHRENHEIT}', size='x-large')
# Lower right plot - precipitable water entire atmosphere
cf4 = axlist[3].contourf(lon_2d, lat_2d, precip_water, cmap='Greens',
transform=ccrs.PlateCarree(), zorder=0)
axlist[3].set_title('Precipitable Water', fontsize=16)
cb4 = fig.colorbar(cf4, ax=axlist[3], orientation='horizontal', shrink=0.74, pad=0)
cb4.set_label('in.', size='x-large')
# Set height padding for plots
fig.set_constrained_layout_pads(w_pad=0., h_pad=0.1, hspace=0., wspace=0.)
# Set figure title
fig.suptitle(ds['time'][0].dt.strftime('%d %B %Y %H:%MZ').values, fontsize=24)
# Display the plot
plt.show()
| metpy/MetPy | v1.1/_downloads/d506add257ad1ffd2447607d972a8e31/Four_Panel_Map.py | Python | bsd-3-clause | 4,731 | [
"NetCDF"
] | 2ba7a313031cff690d7b2c84507202d847052e2fc59ecaaf057a37b936aeb243 |
#
# Copyright (c) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" utility functionality for fingerprinting sets of molecules
includes a command line app for working with fingerprints
and databases
Sample Usage:
python FingerprintMols.py -d data.gdb \
-t 'raw_dop_data' --smilesName="Structure" --idName="Mol_ID" \
--outTable="daylight_sig"
"""
import getopt
import pickle
import sys
from rdkit import Chem, DataStructs
from rdkit.Chem import MACCSkeys
from rdkit.ML.Cluster import Murtagh
def error(msg):
sys.stderr.write(msg)
def message(msg):
sys.stderr.write(msg)
def GetRDKFingerprint(mol):
""" uses default parameters """
details = FingerprinterDetails()
return FingerprintMol(mol, **details.__dict__)
def FoldFingerprintToTargetDensity(fp, **fpArgs):
nOn = fp.GetNumOnBits()
nTot = fp.GetNumBits()
while float(nOn) / nTot < fpArgs['tgtDensity']:
if nTot / 2 > fpArgs['minSize']:
fp = DataStructs.FoldFingerprint(fp, 2)
nOn = fp.GetNumOnBits()
nTot = fp.GetNumBits()
else:
break
return fp
def FingerprintMol(mol, fingerprinter=Chem.RDKFingerprint, **fpArgs):
if not fpArgs:
fpArgs = FingerprinterDetails().__dict__
if fingerprinter != Chem.RDKFingerprint:
fp = fingerprinter(mol, **fpArgs)
return FoldFingerprintToTargetDensity(fp, **fpArgs)
return fingerprinter(mol, fpArgs['minPath'], fpArgs['maxPath'], fpArgs['fpSize'],
fpArgs['bitsPerHash'], fpArgs['useHs'], fpArgs['tgtDensity'],
fpArgs['minSize'])
def FingerprintsFromSmiles(dataSource, idCol, smiCol, fingerprinter=Chem.RDKFingerprint,
reportFreq=10, maxMols=-1, **fpArgs):
""" fpArgs are passed as keyword arguments to the fingerprinter
Returns a list of 2-tuples: (ID,fp)
"""
res = []
nDone = 0
for entry in dataSource:
ID, smi = str(entry[idCol]), str(entry[smiCol])
mol = Chem.MolFromSmiles(smi)
if mol is not None:
fp = FingerprintMol(mol, fingerprinter, **fpArgs)
res.append((ID, fp))
nDone += 1
if reportFreq > 0 and not nDone % reportFreq:
message(f'Done {nDone} molecules\n')
if maxMols > 0 and nDone >= maxMols:
break
else:
error(f'Problems parsing SMILES: {smi}\n')
return res
def FingerprintsFromMols(mols, fingerprinter=Chem.RDKFingerprint, reportFreq=10, maxMols=-1,
**fpArgs):
""" fpArgs are passed as keyword arguments to the fingerprinter
Returns a list of 2-tuples: (ID,fp)
"""
res = []
nDone = 0
for ID, mol in mols:
if mol:
fp = FingerprintMol(mol, fingerprinter, **fpArgs)
res.append((ID, fp))
nDone += 1
if reportFreq > 0 and not nDone % reportFreq:
message(f'Done {nDone} molecules\n')
if maxMols > 0 and nDone >= maxMols:
break
else:
error(f'Problems parsing SMILES: {smi}\n')
return res
def FingerprintsFromPickles(dataSource, idCol, pklCol, fingerprinter=Chem.RDKFingerprint,
reportFreq=10, maxMols=-1, **fpArgs):
""" fpArgs are passed as keyword arguments to the fingerprinter
Returns a list of 2-tuples: (ID,fp)
"""
res = []
nDone = 0
for entry in dataSource:
ID, pkl = str(entry[idCol]), str(entry[pklCol])
mol = Chem.Mol(pkl)
if mol is not None:
fp = FingerprintMol(mol, fingerprinter, **fpArgs)
res.append((ID, fp))
nDone += 1
if reportFreq > 0 and not nDone % reportFreq:
message(f'Done {nDone} molecules\n')
if maxMols > 0 and nDone >= maxMols:
break
else:
error(f'Problems parsing pickle for ID: {ID}\n')
return res
def FingerprintsFromDetails(details, reportFreq=10):
data = None
if details.dbName and details.tableName:
from rdkit.Dbase import DbInfo
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.ML.Data import DataUtils
try:
conn = DbConnect(details.dbName, details.tableName)
except Exception:
import traceback
error(f'Problems establishing connection to database: {details.dbName}|{details.tableName}\n')
traceback.print_exc()
if not details.idName:
details.idName = DbInfo.GetColumnNames(details.dbName, details.tableName)[0]
dataSet = DataUtils.DBToData(details.dbName, details.tableName,
what=f'{details.idName},{details.smilesName}')
idCol = 0
smiCol = 1
elif details.inFileName and details.useSmiles:
from rdkit.ML.Data import DataUtils
conn = None
if not details.idName:
details.idName = 'ID'
try:
dataSet = DataUtils.TextFileToData(details.inFileName,
onlyCols=[details.idName, details.smilesName])
except IOError:
import traceback
error(f'Problems reading from file {details.inFileName}\n')
traceback.print_exc()
idCol = 0
smiCol = 1
elif details.inFileName and details.useSD:
conn = None
if not details.idName:
details.idName = 'ID'
dataSet = []
try:
s = Chem.SDMolSupplier(details.inFileName)
except Exception:
import traceback
error(f'Problems reading from file {details.inFileName}\n')
traceback.print_exc()
else:
while 1:
try:
m = s.next()
except StopIteration:
break
if m:
dataSet.append(m)
if reportFreq > 0 and not len(dataSet) % reportFreq:
message(f'Read {len(dataSet)} molecules\n')
if 0 < details.maxMols <= len(dataSet):
break
for i, mol in enumerate(dataSet):
if mol.HasProp(details.idName):
nm = mol.GetProp(details.idName)
else:
nm = mol.GetProp('_Name')
dataSet[i] = (nm, mol)
else:
dataSet = None
fps = None
if dataSet and not details.useSD:
data = dataSet.GetNamedData()
if not details.molPklName:
fps = FingerprintsFromSmiles(data, idCol, smiCol, **details.__dict__)
else:
fps = FingerprintsFromPickles(data, idCol, smiCol, **details.__dict__)
elif dataSet and details.useSD:
fps = FingerprintsFromMols(dataSet, **details.__dict__)
if fps:
if details.outFileName:
outF = open(details.outFileName, 'wb+')
for i in range(len(fps)):
pickle.dump(fps[i], outF)
outF.close()
dbName = details.outDbName or details.dbName
if details.outTableName and dbName:
from rdkit.Dbase import DbModule, DbUtils
from rdkit.Dbase.DbConnection import DbConnect
conn = DbConnect(dbName)
#
# We don't have a db open already, so we'll need to figure out
# the types of our columns...
#
colTypes = DbUtils.TypeFinder(data, len(data), len(data[0]))
typeStrs = DbUtils.GetTypeStrings([details.idName, details.smilesName], colTypes,
keyCol=details.idName)
cols = f'{typeStrs[0]}, {details.fpColName} {DbModule.binaryTypeName}'
# FIX: we should really check to see if the table
# is already there and, if so, add the appropriate
# column.
#
# create the new table
#
if details.replaceTable or \
details.outTableName.upper() not in [x.upper() for x in conn.GetTableNames()]:
conn.AddTable(details.outTableName, cols)
#
# And add the data
#
for ID, fp in fps:
tpl = ID, DbModule.binaryHolder(fp.ToBinary())
conn.InsertData(details.outTableName, tpl)
conn.Commit()
return fps
# ------------------------------------------------
#
# Command line parsing stuff
#
# ------------------------------------------------
class FingerprinterDetails(object):
""" class for storing the details of a fingerprinting run,
generates sensible defaults on construction
"""
def __init__(self):
self._fingerprinterInit()
self._screenerInit()
self._clusterInit()
def _fingerprinterInit(self):
self.fingerprinter = Chem.RDKFingerprint
self.fpColName = "AutoFragmentFP"
self.idName = ''
self.dbName = ''
self.outDbName = ''
self.tableName = ''
self.minSize = 64
self.fpSize = 2048
self.tgtDensity = 0.3
self.minPath = 1
self.maxPath = 7
self.discrimHash = 0
self.useHs = 0
self.useValence = 0
self.bitsPerHash = 2
self.smilesName = 'SMILES'
self.maxMols = -1
self.outFileName = ''
self.outTableName = ''
self.inFileName = ''
self.replaceTable = True
self.molPklName = ''
self.useSmiles = True
self.useSD = False
def _screenerInit(self):
self.metric = DataStructs.TanimotoSimilarity
self.doScreen = ''
self.topN = 10
self.screenThresh = 0.75
self.doThreshold = 0
self.smilesTableName = ''
self.probeSmiles = ''
self.probeMol = None
self.noPickle = 0
def _clusterInit(self):
self.clusterAlgo = Murtagh.WARDS
self.actTableName = ''
self.actName = ''
def GetMetricName(self):
# DataStructs.TverskySimilarity: 'Tversky'
metricDict = {DataStructs.DiceSimilarity: 'Dice',
DataStructs.TanimotoSimilarity: 'Tanimoto',
DataStructs.CosineSimilarity: 'Cosine', }
metric = metricDict.get(self.metric, self.metric)
if metric:
return metric
return 'Unknown'
def SetMetricFromName(self, name):
# 'TVERSKY': DataStructs.TverskySimilarity,
metricDict = {'DICE': DataStructs.DiceSimilarity,
'TANIMOTO': DataStructs.TanimotoSimilarity,
'COSINE': DataStructs.CosineSimilarity, }
self.metric = metricDict.get(name.upper(), self.metric)
def Usage():
""" prints a usage string and exits
"""
print(_usageDoc)
sys.exit(-1)
_usageDoc = """
Usage: FingerprintMols.py [args] <fName>
If <fName> is provided and no tableName is specified (see below),
data will be read from the text file <fName>. Text files delimited
with either commas (extension .csv) or tabs (extension .txt) are
supported.
Command line arguments are:
- -d _dbName_: set the name of the database from which
to pull input molecule information. If output is
going to a database, this will also be used for that
unless the --outDbName option is used.
- -t _tableName_: set the name of the database table
from which to pull input molecule information
- --smilesName=val: sets the name of the SMILES column
in the input database. Default is *SMILES*.
- --useSD: Assume that the input file is an SD file, not a SMILES
table.
- --idName=val: sets the name of the id column in the input
database. Defaults to be the name of the first db column
(or *ID* for text files).
- -o _outFileName_: name of the output file (output will
be a pickle file with one label,fingerprint entry for each
molecule).
- --outTable=val: name of the output db table used to store
fingerprints. If this table already exists, it will be
replaced.
- --outDbName: name of output database, if it's being used.
Defaults to be the same as the input db.
- --fpColName=val: name to use for the column which stores
fingerprints (in pickled format) in the output db table.
Default is *AutoFragmentFP*
- --maxSize=val: base size of the fingerprints to be generated
Default is *2048*
- --minSize=val: minimum size of the fingerprints to be generated
(limits the amount of folding that happens). Default is *64*
- --density=val: target bit density in the fingerprint. The
fingerprint will be folded until this density is
reached. Default is *0.3*
- --minPath=val: minimum path length to be included in
fragment-based fingerprints. Default is *1*.
- --maxPath=val: maximum path length to be included in
fragment-based fingerprints. Default is *7*.
- --nBitsPerHash: number of bits to be set in the output
fingerprint for each fragment. Default is *2*.
- --discrim: use of path-based discriminators to hash bits.
Default is *false*.
- -V: include valence information in the fingerprints
Default is *false*.
- -H: include Hs in the fingerprint
Default is *false*.
- --maxMols=val: sets the maximum number of molecules to be
fingerprinted.
- --useMACCS: use the public MACCS keys to do the fingerprinting
(instead of a daylight-type fingerprint)
"""
def ParseArgs(details=None):
""" parses the command line arguments and returns a
_FingerprinterDetails_ instance with the results.
**Note**:
- If you make modifications here, please update the global
_usageDoc string so the Usage message is up to date.
- This routine is used by both the fingerprinter, the clusterer and the
screener; not all arguments make sense for all applications.
"""
args = sys.argv[1:]
try:
args, extras = getopt.getopt(args,
'HVs:d:t:o:h',
[
'minSize=',
'maxSize=',
'density=',
'minPath=',
'maxPath=',
'bitsPerHash=',
'smilesName=',
'molPkl=',
'useSD',
'idName=',
'discrim',
'outTable=',
'outDbName=',
'fpColName=',
'maxMols=',
'useMACCS',
'keepTable',
# SCREENING:
'smilesTable=',
'doScreen=',
'topN=',
'thresh=',
'smiles=',
'dice',
'cosine',
# CLUSTERING:
'actTable=',
'actName=',
'SLINK',
'CLINK',
'UPGMA',
])
except Exception:
import traceback
traceback.print_exc()
Usage()
if details is None:
details = FingerprinterDetails()
if len(extras):
details.inFileName = extras[0]
for arg, val in args:
if arg == '-H':
details.useHs = 1
elif arg == '-V':
details.useValence = 1
elif arg == '-d':
details.dbName = val
elif arg == '-t':
details.tableName = val
elif arg == '-o':
details.outFileName = val
elif arg == '--minSize':
details.minSize = int(val)
elif arg == '--maxSize':
details.fpSize = int(val)
elif arg == '--density':
details.tgtDensity = float(val)
elif arg == '--outTable':
details.outTableName = val
elif arg == '--outDbName':
details.outDbName = val
elif arg == '--fpColName':
details.fpColName = val
elif arg == '--minPath':
details.minPath = int(val)
elif arg == '--maxPath':
details.maxPath = int(val)
elif arg == '--nBitsPerHash':
details.bitsPerHash = int(val)
elif arg == '--discrim':
details.discrimHash = 1
elif arg == '--smilesName':
details.smilesName = val
elif arg == '--molPkl':
details.molPklName = val
elif arg == '--useSD':
details.useSmiles = False
details.useSD = True
elif arg == '--idName':
details.idName = val
elif arg == '--maxMols':
details.maxMols = int(val)
elif arg == '--useMACCS':
details.fingerprinter = MACCSkeys.GenMACCSKeys
elif arg == '--keepTable':
details.replaceTable = False
# SCREENER:
elif arg == '--smilesTable':
details.smilesTableName = val
elif arg == '--topN':
details.doThreshold = 0
details.topN = int(val)
elif arg == '--thresh':
details.doThreshold = 1
details.screenThresh = float(val)
elif arg == '--smiles':
details.probeSmiles = val
elif arg == '--dice':
details.metric = DataStructs.DiceSimilarity
elif arg == '--cosine':
details.metric = DataStructs.CosineSimilarity
# CLUSTERS:
elif arg == '--SLINK':
details.clusterAlgo = Murtagh.SLINK
elif arg == '--CLINK':
details.clusterAlgo = Murtagh.CLINK
elif arg == '--UPGMA':
details.clusterAlgo = Murtagh.UPGMA
elif arg == '--actTable':
details.actTableName = val
elif arg == '--actName':
details.actName = val
elif arg == '-h':
Usage()
return details
if __name__ == '__main__':
message("This is FingerprintMols\n\n")
details = ParseArgs()
FingerprintsFromDetails(details)
| ptosco/rdkit | rdkit/Chem/Fingerprints/FingerprintMols.py | Python | bsd-3-clause | 17,388 | [
"RDKit"
] | 84a5884ac53c59e6e2b992322e799484cc94c6ced3ea902ff23ef154235bd1e0 |
"""Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# Bernardo Stein <bernardovstein@gmail.com>
# Shangwu Yao <shangwuyao@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = {type_true, type_pred}
if y_type == {"binary", "multiclass"}:
y_type = {"multiclass"}
if len(y_type) > 1:
raise ValueError("Classification metrics can't handle a mix of {0} "
"and {1} targets".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type == "binary":
unique_values = np.union1d(y_true, y_pred)
if len(unique_values) > 2:
y_type = "multiclass"
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the fraction of correctly
classified samples (float), else returns the number of correctly
classified samples (int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_score`` function.
Examples
--------
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
(Wikipedia and other references may use a different
convention for axes)
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int64)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
n_labels = labels.size
label_to_ind = {y: x for x, y in enumerate(labels)}
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
# Choose the accumulator dtype to always have high precision
if sample_weight.dtype.kind in {'i', 'u', 'b'}:
dtype = np.int64
else:
dtype = np.float64
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels), dtype=dtype,
).toarray()
return CM
def multilabel_confusion_matrix(y_true, y_pred, sample_weight=None,
labels=None, samplewise=False):
"""Compute a confusion matrix for each class or sample
.. versionadded:: 0.21
Compute class-wise (default) or sample-wise (samplewise=True) multilabel
confusion matrix to evaluate the accuracy of a classification, and output
confusion matrices for each class or sample.
In multilabel confusion matrix :math:`MCM`, the count of true negatives
is :math:`MCM_{:,0,0}`, false negatives is :math:`MCM_{:,1,0}`,
true positives is :math:`MCM_{:,1,1}` and false positives is
:math:`MCM_{:,0,1}`.
Multiclass data will be treated as if binarized under a one-vs-rest
transformation. Returned confusion matrices will be in the order of
sorted unique labels in the union of (y_true, y_pred).
Read more in the :ref:`User Guide <multilabel_confusion_matrix>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
of shape (n_samples, n_outputs) or (n_samples,)
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
of shape (n_samples, n_outputs) or (n_samples,)
Estimated targets as returned by a classifier
sample_weight : array-like of shape = (n_samples,), optional
Sample weights
labels : array-like
A list of classes or column indices to select some (or to force
inclusion of classes absent from the data)
samplewise : bool, default=False
In the multilabel case, this calculates a confusion matrix per sample
Returns
-------
multi_confusion : array, shape (n_outputs, 2, 2)
A 2x2 confusion matrix corresponding to each output in the input.
When calculating class-wise multi_confusion (default), then
n_outputs = n_labels; when calculating sample-wise multi_confusion
(samplewise=True), n_outputs = n_samples. If ``labels`` is defined,
the results will be returned in the order specified in ``labels``,
otherwise the results will be returned in sorted order by default.
See also
--------
confusion_matrix
Notes
-----
The multilabel_confusion_matrix calculates class-wise or sample-wise
multilabel confusion matrices, and in multiclass tasks, labels are
binarized under a one-vs-rest way; while confusion_matrix calculates
one confusion matrix for confusion between every two classes.
Examples
--------
Multilabel-indicator case:
>>> import numpy as np
>>> from sklearn.metrics import multilabel_confusion_matrix
>>> y_true = np.array([[1, 0, 1],
... [0, 1, 0]])
>>> y_pred = np.array([[1, 0, 0],
... [0, 1, 1]])
>>> multilabel_confusion_matrix(y_true, y_pred)
array([[[1, 0],
[0, 1]],
<BLANKLINE>
[[1, 0],
[0, 1]],
<BLANKLINE>
[[0, 1],
[1, 0]]])
Multiclass case:
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> multilabel_confusion_matrix(y_true, y_pred,
... labels=["ant", "bird", "cat"])
array([[[3, 1],
[0, 2]],
<BLANKLINE>
[[5, 0],
[1, 0]],
<BLANKLINE>
[[2, 1],
[1, 2]]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in ("binary", "multiclass", "multilabel-indicator"):
raise ValueError("%s is not supported" % y_type)
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
if y_true.ndim == 1:
if samplewise:
raise ValueError("Samplewise metrics are not available outside of "
"multilabel classification.")
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = np.bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = np.bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = np.bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
else:
sum_axis = 1 if samplewise else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.array_equal(labels, present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels) for '
'multilabel targets. '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels) for '
'multilabel targets. '
'Got %d < 0' % np.min(labels))
if n_labels is not None:
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
fp = pred_sum - tp_sum
fn = true_sum - tp_sum
tp = tp_sum
if sample_weight is not None and samplewise:
sample_weight = np.array(sample_weight)
tp = np.array(tp)
fp = np.array(fp)
fn = np.array(fn)
tn = sample_weight * y_true.shape[1] - tp - fp - fn
elif sample_weight is not None:
tn = sum(sample_weight) - tp - fp - fn
elif samplewise:
tn = y_true.shape[1] - tp - fp - fn
else:
tn = y_true.shape[0] - tp - fp - fn
return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
def cohen_kappa_score(y1, y2, labels=None, weights=None, sample_weight=None):
r"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
Weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<https://www.mitpressjournals.org/doi/pdf/10.1162/coli.07-034-R2>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels,
sample_weight=sample_weight)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
.. deprecated:: 0.21
This is deprecated to be removed in 0.23, since its handling of
binary and multiclass inputs was broken. `jaccard_score` has an API
that is consistent with precision_score, f_score, etc.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
"""
warnings.warn('jaccard_similarity_score has been deprecated and replaced '
'with jaccard_score. It will be removed in version 0.23. '
'This implementation has surprising behavior for binary '
'and multiclass classification tasks.', DeprecationWarning)
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def jaccard_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float (if average is not None) or array of floats, shape =\
[n_unique_labels]
See also
--------
accuracy_score, f_score, multilabel_confusion_matrix
Notes
-----
:func:`jaccard_score` may be a poor metric if there are no
positives for some samples or classes. Jaccard is undefined if there are
no true or predicted labels, and our implementation will return a score
of 0 with a warning.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_score
>>> y_true = np.array([[0, 1, 1],
... [1, 1, 0]])
>>> y_pred = np.array([[1, 1, 1],
... [1, 0, 0]])
In the binary case:
>>> jaccard_score(y_true[0], y_pred[0])
0.6666...
In the multilabel case:
>>> jaccard_score(y_true, y_pred, average='samples')
0.5833...
>>> jaccard_score(y_true, y_pred, average='macro')
0.6666...
>>> jaccard_score(y_true, y_pred, average=None)
array([0.5, 0.5, 1. ])
In the multiclass case:
>>> y_pred = [0, 2, 1, 2]
>>> y_true = [0, 1, 2, 2]
>>> jaccard_score(y_true, y_pred, average=None)
array([1. , 0. , 0.33...])
"""
labels = _check_set_wise_labels(y_true, y_pred, average, labels,
pos_label)
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred,
sample_weight=sample_weight,
labels=labels, samplewise=samplewise)
numerator = MCM[:, 1, 1]
denominator = MCM[:, 1, 1] + MCM[:, 0, 1] + MCM[:, 1, 0]
if average == 'micro':
numerator = np.array([numerator.sum()])
denominator = np.array([denominator.sum()])
jaccard = _prf_divide(numerator, denominator, 'jaccard',
'true or predicted', average, ('jaccard',))
if average is None:
return jaccard
if average == 'weighted':
weights = MCM[:, 1, 0] + MCM[:, 1, 1]
if not np.any(weights):
# numerator is 0, and warning should have already been issued
weights = None
elif average == 'samples' and sample_weight is not None:
weights = sample_weight
else:
weights = None
return np.average(jaccard, weights=weights)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Binary and multiclass labels are supported. Only in the binary case does
this relate to information about true and false positives and negatives.
See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<https://doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
.. [3] `Gorodkin, (2004). Comparing two K-category assignments by a
K-category correlation coefficient
<https://www.sciencedirect.com/science/article/pii/S1476927104000799>`_
.. [4] `Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC and CEN
Error Measures in MultiClass Prediction
<https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0041882>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred)
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in {"binary", "multiclass"}:
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
t_sum = C.sum(axis=1, dtype=np.float64)
p_sum = C.sum(axis=0, dtype=np.float64)
n_correct = np.trace(C, dtype=np.float64)
n_samples = p_sum.sum()
cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum)
cov_ypyp = n_samples ** 2 - np.dot(p_sum, p_sum)
cov_ytyt = n_samples ** 2 - np.dot(t_sum, t_sum)
mcc = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the average of
the F1 score of each class with weighting depending on the ``average``
parameter.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
See also
--------
fbeta_score, precision_recall_fscore_support, jaccard_score,
multilabel_confusion_matrix
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro')
0.26...
>>> f1_score(y_true, y_pred, average='micro')
0.33...
>>> f1_score(y_true, y_pred, average='weighted')
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([0.8, 0. , 0. ])
Notes
-----
When ``true positive + false positive == 0`` or
``true positive + false negative == 0``, f-score returns 0 and raises
``UndefinedMetricWarning``.
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of recall in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> +inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
See also
--------
precision_recall_fscore_support, multilabel_confusion_matrix
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
array([0.71..., 0. , 0. ])
Notes
-----
When ``true positive + false positive == 0`` or
``true positive + false negative == 0``, f-score returns 0 and raises
``UndefinedMetricWarning``.
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1 # avoid infs/nans
result = numerator / denominator
if not np.any(mask):
return result
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def _check_set_wise_labels(y_true, y_pred, average, labels, pos_label):
"""Validation associated with set-wise metrics
Returns identified labels
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) >= 2:
raise ValueError("pos_label=%r is not a valid label: "
"%r" % (pos_label, present_labels))
labels = [pos_label]
else:
average_options = list(average_options)
if y_type == 'multiclass':
average_options.remove('samples')
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting, one of %r."
% (y_type, average_options))
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
return labels
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall : float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
support : int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
(array([0. , 0. , 0.66...]),
array([0., 0., 1.]), array([0. , 0. , 0.8]),
array([2, 2, 2]))
Notes
-----
When ``true positive + false positive == 0``, precision is undefined;
When ``true positive + false negative == 0``, recall is undefined.
In such cases, the metric will be set to 0, as will f-score, and
``UndefinedMetricWarning`` will be raised.
"""
if beta < 0:
raise ValueError("beta should be >=0 in the F-beta score")
labels = _check_set_wise_labels(y_true, y_pred, average, labels,
pos_label)
# Calculate tp_sum, pred_sum, true_sum ###
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred,
sample_weight=sample_weight,
labels=labels, samplewise=samplewise)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
# Divide, and on zero-division, set scores to 0 and warn:
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
if np.isposinf(beta):
f_score = recall
else:
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
denom = beta2 * precision + recall
denom[denom == 0.] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
See also
--------
precision_recall_fscore_support, multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro')
0.22...
>>> precision_score(y_true, y_pred, average='micro')
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
0.22...
>>> precision_score(y_true, y_pred, average=None)
array([0.66..., 0. , 0. ])
Notes
-----
When ``true positive + false positive == 0``, precision returns 0 and
raises ``UndefinedMetricWarning``.
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
See also
--------
precision_recall_fscore_support, balanced_accuracy_score,
multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro')
0.33...
>>> recall_score(y_true, y_pred, average='micro')
0.33...
>>> recall_score(y_true, y_pred, average='weighted')
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([1., 0., 0.])
Notes
-----
When ``true positive + false negative == 0``, recall returns 0 and raises
``UndefinedMetricWarning``.
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def balanced_accuracy_score(y_true, y_pred, sample_weight=None,
adjusted=False):
"""Compute the balanced accuracy
The balanced accuracy in binary and multiclass classification problems to
deal with imbalanced datasets. It is defined as the average of recall
obtained on each class.
The best value is 1 and the worst value is 0 when ``adjusted=False``.
Read more in the :ref:`User Guide <balanced_accuracy_score>`.
Parameters
----------
y_true : 1d array-like
Ground truth (correct) target values.
y_pred : 1d array-like
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
adjusted : bool, default=False
When true, the result is adjusted for chance, so that random
performance would score 0, and perfect performance scores 1.
Returns
-------
balanced_accuracy : float
See also
--------
recall_score, roc_auc_score
Notes
-----
Some literature promotes alternative definitions of balanced accuracy. Our
definition is equivalent to :func:`accuracy_score` with class-balanced
sample weights, and shares desirable properties with the binary case.
See the :ref:`User Guide <balanced_accuracy_score>`.
References
----------
.. [1] Brodersen, K.H.; Ong, C.S.; Stephan, K.E.; Buhmann, J.M. (2010).
The balanced accuracy and its posterior distribution.
Proceedings of the 20th International Conference on Pattern
Recognition, 3121-24.
.. [2] John. D. Kelleher, Brian Mac Namee, Aoife D'Arcy, (2015).
`Fundamentals of Machine Learning for Predictive Data Analytics:
Algorithms, Worked Examples, and Case Studies
<https://mitpress.mit.edu/books/fundamentals-machine-learning-predictive-data-analytics>`_.
Examples
--------
>>> from sklearn.metrics import balanced_accuracy_score
>>> y_true = [0, 1, 0, 0, 1, 0]
>>> y_pred = [0, 1, 0, 0, 0, 1]
>>> balanced_accuracy_score(y_true, y_pred)
0.625
"""
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide='ignore', invalid='ignore'):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn('y_pred contains classes not in y_true')
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2, output_dict=False):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values.
When ``output_dict`` is ``True``, this will be ignored and the
returned values will not be rounded.
output_dict : bool (default = False)
If True, return output as dict
Returns
-------
report : string / dict
Text summary of the precision, recall, F1 score for each class.
Dictionary returned if output_dict is True. Dictionary has the
following structure::
{'label 1': {'precision':0.5,
'recall':1.0,
'f1-score':0.67,
'support':1},
'label 2': { ... },
...
}
The reported averages include macro average (averaging the unweighted
mean per label), weighted average (averaging the support-weighted mean
per label), sample average (only for multilabel classification) and
micro average (averaging the total true positives, false negatives and
false positives) it is only shown for multi-label or multi-class
with a subset of classes because it is accuracy otherwise.
See also:func:`precision_recall_fscore_support` for more details
on averages.
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
See also
--------
precision_recall_fscore_support, confusion_matrix,
multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
accuracy 0.60 5
macro avg 0.50 0.56 0.49 5
weighted avg 0.70 0.60 0.61 5
<BLANKLINE>
>>> y_pred = [1, 1, 0]
>>> y_true = [1, 1, 1]
>>> print(classification_report(y_true, y_pred, labels=[1, 2, 3]))
precision recall f1-score support
<BLANKLINE>
1 1.00 0.67 0.80 3
2 0.00 0.00 0.00 0
3 0.00 0.00 0.00 0
<BLANKLINE>
micro avg 1.00 0.67 0.80 3
macro avg 0.33 0.22 0.27 3
weighted avg 1.00 0.67 0.80 3
<BLANKLINE>
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
labels_given = True
if labels is None:
labels = unique_labels(y_true, y_pred)
labels_given = False
else:
labels = np.asarray(labels)
# labelled micro average
micro_is_accuracy = ((y_type == 'multiclass' or y_type == 'binary') and
(not labels_given or
(set(labels) == set(unique_labels(y_true, y_pred)))))
if target_names is not None and len(labels) != len(target_names):
if labels_given:
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}"
.format(len(labels), len(target_names))
)
else:
raise ValueError(
"Number of classes, {0}, does not match size of "
"target_names, {1}. Try specifying the labels "
"parameter".format(len(labels), len(target_names))
)
if target_names is None:
target_names = ['%s' % l for l in labels]
headers = ["precision", "recall", "f1-score", "support"]
# compute per-class results without averaging
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
rows = zip(target_names, p, r, f1, s)
if y_type.startswith('multilabel'):
average_options = ('micro', 'macro', 'weighted', 'samples')
else:
average_options = ('micro', 'macro', 'weighted')
if output_dict:
report_dict = {label[0]: label[1:] for label in rows}
for label, scores in report_dict.items():
report_dict[label] = dict(zip(headers,
[i.item() for i in scores]))
else:
longest_last_line_heading = 'weighted avg'
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(longest_last_line_heading), digits)
head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers)
report = head_fmt.format('', *headers, width=width)
report += '\n\n'
row_fmt = '{:>{width}s} ' + ' {:>9.{digits}f}' * 3 + ' {:>9}\n'
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += '\n'
# compute all applicable averages
for average in average_options:
if average.startswith('micro') and micro_is_accuracy:
line_heading = 'accuracy'
else:
line_heading = average + ' avg'
# compute averages with specified averaging method
avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support(
y_true, y_pred, labels=labels,
average=average, sample_weight=sample_weight)
avg = [avg_p, avg_r, avg_f1, np.sum(s)]
if output_dict:
report_dict[line_heading] = dict(
zip(headers, [i.item() for i in avg]))
else:
if line_heading == 'accuracy':
row_fmt_accuracy = '{:>{width}s} ' + \
' {:>9.{digits}}' * 2 + ' {:>9.{digits}f}' + \
' {:>9}\n'
report += row_fmt_accuracy.format(line_heading, '', '',
*avg[2:], width=width,
digits=digits)
else:
report += row_fmt.format(line_heading, *avg,
width=width, digits=digits)
if output_dict:
if 'accuracy' in report_dict.keys():
report_dict['accuracy'] = report_dict['accuracy']['precision']
return report_dict
else:
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default='deprecated')
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
.. deprecated:: 0.21
This parameter ``labels`` is deprecated in version 0.21 and will
be removed in version 0.23. Hamming loss uses ``y_true.shape[1]``
for the number of labels when y_true is binary label indicators,
so it is unnecessary for the user to specify.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
.. versionadded:: 0.18
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss corresponds to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function, when `normalize` parameter is set to
True.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does not entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes only the
individual labels.
The Hamming loss is upperbounded by the subset zero-one loss, when
`normalize` parameter is set to True. It is always between 0 and 1,
lower being better.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if labels is not None:
warnings.warn("The labels parameter is unused. It was"
" deprecated in version 0.21 and"
" will be removed in version 0.23",
DeprecationWarning)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * y_true.shape[1] * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> from sklearn.metrics import log_loss
>>> log_loss(["spam", "ham", "ham", "spam"],
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true, sample_weight)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(random_state=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision)
0.30...
In the multiclass case:
>>> import numpy as np
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC()
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels)
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
np.clip(losses, 0, None, out=losses)
return np.average(losses, weights=sample_weight)
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1). The Brier loss is composed of refinement loss and
calibration loss.
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int or str, default=None
Label of the positive class.
Defaults to the greater label unless y_true is all 0 or all -1
in which case pos_label defaults to 1.
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob)
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0)
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, pos_label="ham")
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
check_consistent_length(y_true, y_prob, sample_weight)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Labels in y_true: %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
# if pos_label=None, when y_true is in {-1, 1} or {0, 1},
# pos_labe is set to 1 (consistent with precision_recall_curve/roc_curve),
# otherwise pos_label is set to the greater label
# (different from precision_recall_curve/roc_curve,
# the purpose is to keep backward compatibility).
if pos_label is None:
if (np.array_equal(labels, [0]) or
np.array_equal(labels, [-1])):
pos_label = 1
else:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| chrsrds/scikit-learn | sklearn/metrics/classification.py | Python | bsd-3-clause | 91,448 | [
"Brian"
] | eb3603623d1c7dcea03e94998c0671364a1d4f60c5979635ec287b8fafda2394 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
class Openmm(CMakePackage, CudaPackage):
"""A high performance toolkit for molecular simulation. Use it as
a library, or as an application. We include extensive language
bindings for Python, C, C++, and even Fortran. The code is open
source and actively maintained on Github, licensed under MIT and
LGPL. Part of the Omnia suite of tools for predictive biomolecular
simulation. """
homepage = "https://openmm.org/"
url = "https://github.com/openmm/openmm/archive/7.4.1.tar.gz"
version('7.5.0', sha256='516748b4f1ae936c4d70cc6401174fc9384244c65cd3aef27bc2c53eac6d6de5')
version('7.4.1', sha256='e8102b68133e6dcf7fcf29bc76a11ea54f30af71d8a7705aec0aee957ebe3a6d')
install_targets = ['install', 'PythonInstall']
depends_on('python@2.7:', type=('build', 'run'))
depends_on('cmake@3.1:', type='build')
depends_on('doxygen', type='build')
depends_on('swig', type='build')
depends_on('fftw')
depends_on('py-cython', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('cuda', when='+cuda', type=('build', 'link', 'run'))
extends('python')
def patch(self):
install_string = "set(PYTHON_SETUP_COMMAND \"install " \
"--prefix={0}\")".format(self.prefix)
filter_file(r'set\(PYTHON_SETUP_COMMAND \"install.*',
install_string,
'wrappers/python/CMakeLists.txt')
def setup_run_environment(self, env):
spec = self.spec
if '+cuda' in spec:
env.set('OPENMM_CUDA_COMPILER',
self.spec['cuda'].prefix.bin.nvcc)
env.prepend_path('PATH',
os.path.dirname(self.compiler.cc))
| LLNL/spack | var/spack/repos/builtin/packages/openmm/package.py | Python | lgpl-2.1 | 1,948 | [
"OpenMM"
] | d4eaa5511659dfed1ed71a800ebf0a61c30dbd00098112e3fa8fc3baeb6fa253 |
# -*- coding: utf-8 -*-
"""Release data for the IPython project."""
#-----------------------------------------------------------------------------
# Copyright (c) 2008, IPython Development Team.
# Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# IPython version information. An empty _version_extra corresponds to a full
# release. 'dev' as a _version_extra string means this is a development
# version
_version_major = 5
_version_minor = 3
_version_patch = 0
_version_extra = '.dev'
# _version_extra = 'rc1'
_version_extra = '' # Uncomment this for full releases
# release.codename is deprecated in 2.0, will be removed in 3.0
codename = ''
# Construct full version string from these.
_ver = [_version_major, _version_minor, _version_patch]
__version__ = '.'.join(map(str, _ver))
if _version_extra:
__version__ = __version__ + _version_extra
version = __version__ # backwards compatibility name
version_info = (_version_major, _version_minor, _version_patch, _version_extra)
# Change this when incrementing the kernel protocol version
kernel_protocol_version_info = (5, 0)
kernel_protocol_version = "%i.%i" % kernel_protocol_version_info
description = "IPython: Productive Interactive Computing"
long_description = \
"""
IPython provides a rich toolkit to help you make the most out of using Python
interactively. Its main components are:
* A powerful interactive Python shell
* A `Jupyter <http://jupyter.org/>`_ kernel to work with Python code in Jupyter
notebooks and other interactive frontends.
The enhanced interactive Python shells have the following main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Extensible tab completion, with support by default for completion of python
variables and keywords, filenames and function keywords.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* A rich configuration system with easy switching between different setups
(simpler than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs and GUIs.
* Integrated access to the pdb debugger and the Python profiler.
The latest development version is always available from IPython's `GitHub
site <http://github.com/ipython>`_.
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'),
'Janko' : ('Janko Hauser','jhauser@zscout.de'),
'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
'Ville' : ('Ville Vainio','vivainio@gmail.com'),
'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com'),
'Thomas' : ('Thomas A. Kluyver', 'takowl@gmail.com'),
'Jorgen' : ('Jorgen Stenarson', 'jorgen.stenarson@bostream.nu'),
'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'),
}
author = 'The IPython Development Team'
author_email = 'ipython-dev@scipy.org'
url = 'http://ipython.org'
platforms = ['Linux','Mac OSX','Windows']
keywords = ['Interactive','Interpreter','Shell', 'Embedding']
classifiers = [
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: System :: Shells'
]
| bgris/ODL_bgris | lib/python3.5/site-packages/IPython/core/release.py | Python | gpl-3.0 | 4,386 | [
"Brian"
] | d48b436a07528dc501534381d1717809699d18bd7b5275a3316c164762a645e5 |
import json
from simstream import PikaAsyncConsumer
settings = {}
with open("../settings.json", 'r') as f:
settings = json.load(f)
settings["routing_key"] = "openmm.log"
def print_log_line(body):
try:
lines = json.loads(body.decode())
if lines is not None:
for line in lines:
print(line)
except json.decoder.JSONDecodeError as e:
print("[Error]: Could not decode %s" % (body))
except UnicodeError as e:
print("[Error]: Could not decode from bytes to string: %s" % (e.reason))
consumer = PikaAsyncConsumer(settings["url"],
settings["exchange"],
"openmm.log", # settings["queue"],
message_handler=print_log_line,
routing_key=settings["routing_key"],
exchange_type=settings["exchange_type"])
if __name__ == "__main__":
try:
consumer.start()
except KeyboardInterrupt:
consumer.stop()
| machristie/airavata | sandbox/simstream/example/openmm_example/openmm_log_consumer.py | Python | apache-2.0 | 1,030 | [
"OpenMM"
] | 9f3d40f3d784ac5977472f8e386d2b69e14dce7a5eb26f5b0826b20a0b3637a4 |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 07 14:13:18 2013
@author: Shreejoy
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 09:54:11 2012
@author: Shreejoy
"""
import os
import os.path
import re
import struct
import gc
from matplotlib.pylab import *
from xml.etree.ElementTree import XML
from urllib import quote_plus, quote
from urllib2 import Request, urlopen, URLError, HTTPError
from xml.etree.ElementTree import XML
import json
from pprint import pprint
from bs4 import BeautifulSoup
import time
from HTMLParser import HTMLParseError
def get_full_text_links():
NUMHITS = 200
firstInd = 1
maxURLTries = 5
waitTime = 10
#totalArticles = NUMHITS + firstInd + 1 # just set this later when it gets searched
totalArticles = 56712
#totalArticles = 3694
searchLinkBase = 'http://onlinelibrary.wiley.com/advanced/search/results/reentry?scope=allContent&dateRange=between&inTheLastList=6&startYear=1996&endYear=2013&queryStringEntered=false&searchRowCriteria[0].queryString=neuron+membrane+potential&searchRowCriteria[0].fieldName=all-fields&searchRowCriteria[0].booleanConnector=and&searchRowCriteria[1].fieldName=all-fields&searchRowCriteria[1].booleanConnector=and&searchRowCriteria[2].fieldName=all-fields&searchRowCriteria[2].booleanConnector=and&start=%s&resultsPerPage=%s&ordering=relevancy&publicationFacet=journal'
#searchLinkBase = 'http://onlinelibrary.wiley.com/advanced/search/results/reentry?scope=allContent&dateRange=between&inTheLastList=6&startYear=1996&endYear=2013&queryStringEntered=false&searchRowCriteria[0].queryString=neuron+membrane+potential&searchRowCriteria[0].fieldName=all-fields&searchRowCriteria[0].booleanConnector=and&searchRowCriteria[1].queryString=European+Journal+of+Neuroscience&searchRowCriteria[1].fieldName=publication-title&searchRowCriteria[1].booleanConnector=or&searchRowCriteria[2].fieldName=all-fields&searchRowCriteria[2].booleanConnector=and&start=%s&resultsPerPage=%s&ordering=relevancy'
# 'http://jn.physiology.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012'
# searchLinkBase = 'http://jn.physiology.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012&hits=' + str(NUMHITS) + '&titleabstract=&flag=&journalcode=jn&volume=&sortspec=date&andorexacttitleabs=and&author2=&andorexactfulltext=and&author1=&fyear=1997&doi=&fulltext=%22input%20resistance%22%20AND%20neuron&FIRSTINDEX=' + str(firstInd)
fullTextLinks = []
pdfLinks = []
while firstInd + NUMHITS <= totalArticles:
print 'searching %d of %d articles' % (firstInd, totalArticles)
try:
# searchLinkFull = 'http://jn.physiology.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012&hits=' + str(NUMHITS) + '&titleabstract=&flag=&journalcode=jn&volume=&sortspec=date&andorexacttitleabs=and&author2=&andorexactfulltext=and&author1=&fyear=1997&doi=&fulltext=%22input%20resistance%22%20AND%20neuron&FIRSTINDEX=' + str(firstInd)
# searchLinkFull = 'http://www.jneurosci.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012&hits=' + str(NUMHITS) + '&titleabstract=&volume=&sortspec=date&andorexacttitleabs=and&author2=&tocsectionid=all&andorexactfulltext=and&author1=&fyear=1997&doi=&fulltext=input%20resistance%20neuron&FIRSTINDEX=' + str(firstInd)
searchLinkFull = searchLinkBase % (firstInd, NUMHITS)
handle = urlopen(searchLinkFull) # open the url
data = handle.read() # read the data
soup = BeautifulSoup(data)
except Exception, e:
print 'skipping'
print e
continue
for link in soup.find_all('a'):
# print link.get('rel')
if link.string == 'Full Article (HTML)':
currLink = link.get('href')
# do a check to see if
pmid = get_pmid_from_doi(currLink)
if len(pmid) == 1:
fullTextLinks.append((currLink, pmid[0]))
firstInd += NUMHITS
print 'now waiting %d secs before next search' % waitTime
time.sleep(waitTime)
return fullTextLinks
MAXURLTRIES = 2
def get_full_text_from_link(fullTextLink, pmid):
os.chdir('C:\Users\Shreejoy\Desktop\wiley_html')
# actually try to get full text
success = False
numTries = 0
waitTimeLong = .5
waitTimeShort = 2
link = 'http://onlinelibrary.wiley.com' + fullTextLink
request = Request(link)
while numTries < MAXURLTRIES and success == False:
try:
fullText = urlopen(request).read()
#print 'file opened successfully'
# fullText get succeeded!
soup = BeautifulSoup(fullText)
fullTextTag = soup.find(id = "fulltext")
accessDeniedTag = soup.find(id = "accessDenied")
if accessDeniedTag is None:
titleTag = soup.find(id="articleTitle")
articleTitle = titleTag.h1.text
titleEncoded = articleTitle.encode("iso-8859-15", "replace")
# save full text to a file
fileName = make_html_filename(titleEncoded, pmid)
if os.path.isfile(fileName):
print 'found identical file'
pass
else:
# file doesn't exist
f = open(fileName, 'wb')
f.write(str(fullTextTag))
f.close()
print 'found unique file'
success = True
time.sleep(waitTimeShort)
else:
print 'access denied to full text'
print link
# full text not available for some reason
break
except Exception, e:
print e
# if e.code == 403:
# #print '%s failed cause access restricted' % (articleTitle)
# fullText = False
# pmid = False
# break
# else:
print link + ' failed %s times' % numTries
numTries += 1
print 'now waiting %d secs before trying search again' % (waitTimeLong*numTries)
time.sleep(waitTimeLong*numTries)
if numTries == MAXURLTRIES:
fullText = False
pmid = False
def get_full_text_from_link_all(fullTextLinkListTuple):
cnt = 0
for fullTextLinkList in fullTextLinkListTuple:
print '%d of %d articles' % (cnt, len(fullTextLinkListTuple))
link = fullTextLinkList[0]
pmid = fullTextLinkList[1]
get_full_text_from_link(link, pmid)
cnt += 1
def get_full_text_from_link_all_dict(fullTextLinkDict):
cnt = 0
num_articles = len(fullTextLinkDict)
for pmid in fullTextLinkDict.keys():
print '%d of %d articles' % (cnt, num_articles)
link = fullTextLinkDict[pmid]
get_full_text_from_link(link, pmid)
cnt += 1
MAXURLTRIES = 2
MAXTITLELEN = 100
def make_html_filename(title, pmid):
title = '%s_%s' % (pmid, title)
title = re.sub('\s', '_', title)
pattern = '[a-zA-Z0-9_]'
title = ''.join(re.findall(pattern, title))
fileName = title[0:min(MAXTITLELEN, len(title))]
fileName = fileName + '.html'
return fileName
def get_pmid_from_doi(doiStr):
doiSearchStr = re.sub('/doi/', '', doiStr)
doiSearchStr = re.sub('/full', '', doiSearchStr)
searchLink = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s[aid]' % (doiSearchStr)
try:
handle = urlopen(searchLink)
data = handle.read()
xml = XML(data) # convert to an xml object so we can apply x-path search fxns to it
pmidList = [x.text for x in xml.findall('.//Id')] # find xml "Id" elements
if len(pmidList) > 1:
pmidList = []
except Exception, e:
pmidList = []
return pmidList
def get_pubmed_id_from_doi(doi):
searchLink = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s[aid]' % (doi)
handle = urlopen(searchLink)
data = handle.read()
xml = XML(data) # convert to an xml object so we can apply x-path search fxns to it
pmidList = [x.text for x in xml.findall('.//Id')] # find xml "Id" elements
return pmidList
MAXURLTRIES = 2
def soupify_plus(link):
success = False
numTries = 0
waitTimeLong = 180
waitTimeShort = 2
while numTries < MAXURLTRIES and success == False:
try:
handle = urlopen(link) # open the url
url = handle.geturl()
data = handle.read() # read the data
soup = BeautifulSoup(data)
success = True
time.sleep(waitTimeShort)
except (URLError, HTTPError, HTMLParseError):
print link + ' failed %s times' % numTries
numTries += 1
print 'now waiting %d secs before trying search again' % (waitTimeLong*numTries)
time.sleep(waitTimeLong*numTries)
url = ''
if numTries == MAXURLTRIES:
soup = False
return (soup, url)
#MAXURLTRIES = 2
def soupify(link):
success = False
numTries = 0
waitTimeLong = 180
waitTimeShort = 2
while numTries < MAXURLTRIES and success == False:
try:
handle = urlopen(link) # open the url
data = handle.read() # read the data
soup = BeautifulSoup(data)
success = True
time.sleep(waitTimeShort)
except (URLError, HTTPError, HTMLParseError):
print link + ' failed %s times' % numTries
numTries += 1
print 'now waiting %d secs before trying search again' % (waitTimeLong*numTries)
time.sleep(waitTimeLong*numTries)
if numTries == MAXURLTRIES:
soup = False
return soup
def checkPdf(link):
newLink = re.sub(r'+html', '', link)
if re.search(r'.pdf', newLink) is not None:
isPdf = True
return (isPdf, newLink) | neuroelectro/neuroelectro_org | article_text_mining/deprecated/db_add_full_text_wiley.py | Python | gpl-2.0 | 10,680 | [
"NEURON"
] | 9b7eb5c68dc71cf1b13b0d5df27158b3cc2a0f2c9b1e05f5769b5d01f3593843 |
"""Traits-based GUI for head-MRI coregistration"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
from ..externals.six.moves import queue
import re
from threading import Thread
import warnings
import numpy as np
from scipy.spatial.distance import cdist
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (error, confirm, warning, OK, YES, information,
FileDialog, GUI)
from traits.api import (Bool, Button, cached_property, DelegatesTo,
Directory, Enum, Float, HasTraits,
HasPrivateTraits, Instance, Int, on_trait_change,
Property, Str)
from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid,
EnumEditor, Handler, Label, TextEditor)
from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except:
from ..utils import trait_wraith
HasTraits = HasPrivateTraits = Handler = object
cached_property = on_trait_change = MayaviScene = MlabSceneModel =\
Bool = Button = DelegatesTo = Directory = Enum = Float = Instance =\
Int = Property = Str = View = Item = Group = HGroup = VGroup = VGrid =\
EnumEditor = Label = TextEditor = Action = UndoButton = CancelButton =\
NoButtons = SceneEditor = trait_wraith
from ..coreg import bem_fname, trans_fname
from ..forward import prepare_bem_model
from ..transforms import (write_trans, read_trans, apply_trans, rotation,
translation, scaling, rotation_angles, Transform)
from ..coreg import (fit_matched_points, fit_point_cloud, scale_mri,
_point_cloud_error)
from ..utils import get_subjects_dir, logger
from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
from ._file_traits import (set_mne_root, trans_wildcard, InstSource,
SubjectSelectorPanel)
from ._viewer import (defaults, HeadViewController, PointObject, SurfaceObject,
_testing_mode)
laggy_float_editor = TextEditor(auto_set=False, enter_set=True, evaluate=float)
class CoregModel(HasPrivateTraits):
"""Traits object for estimating the head mri transform.
Notes
-----
Transform from head to mri space is modelled with the following steps:
* move the head shape to its nasion position
* rotate the head shape with user defined rotation around its nasion
* move the head shape by user defined translation
* move the head shape origin to the mri nasion
If MRI scaling is enabled,
* the MRI is scaled relative to its origin center (prior to any
transformation of the digitizer head)
Don't sync transforms to anything to prevent them from being recomputed
upon every parameter change.
"""
# data sources
mri = Instance(MRIHeadWithFiducialsModel, ())
hsp = Instance(InstSource, ())
# parameters
grow_hair = Float(label="Grow Hair [mm]", desc="Move the back of the MRI "
"head outwards to compensate for hair on the digitizer "
"head shape")
n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
"subject's head shape (a new MRI subject will be "
"created with a name specified upon saving)")
scale_x = Float(1, label="Right (X)")
scale_y = Float(1, label="Anterior (Y)")
scale_z = Float(1, label="Superior (Z)")
rot_x = Float(0, label="Right (X)")
rot_y = Float(0, label="Anterior (Y)")
rot_z = Float(0, label="Superior (Z)")
trans_x = Float(0, label="Right (X)")
trans_y = Float(0, label="Anterior (Y)")
trans_z = Float(0, label="Superior (Z)")
prepare_bem_model = Bool(True, desc="whether to run mne_prepare_bem_model "
"after scaling the MRI")
# secondary to parameters
scale = Property(depends_on=['n_scale_params', 'scale_x', 'scale_y',
'scale_z'])
has_fid_data = Property(Bool, depends_on=['mri_origin', 'hsp.nasion'],
desc="Required fiducials data is present.")
has_pts_data = Property(Bool, depends_on=['mri.points', 'hsp.points'])
# MRI dependent
mri_origin = Property(depends_on=['mri.nasion', 'scale'],
desc="Coordinates of the scaled MRI's nasion.")
# target transforms
mri_scale_trans = Property(depends_on=['scale'])
head_mri_trans = Property(depends_on=['hsp.nasion', 'rot_x', 'rot_y',
'rot_z', 'trans_x', 'trans_y',
'trans_z', 'mri_origin'],
desc="Transformaiton of the head shape to "
"match the scaled MRI.")
# info
subject_has_bem = DelegatesTo('mri')
lock_fiducials = DelegatesTo('mri')
can_prepare_bem_model = Property(Bool, depends_on=['n_scale_params',
'subject_has_bem'])
can_save = Property(Bool, depends_on=['head_mri_trans'])
raw_subject = Property(depends_on='hsp.inst_fname', desc="Subject guess "
"based on the raw file name.")
# transformed geometry
processed_mri_points = Property(depends_on=['mri.points', 'grow_hair'])
transformed_mri_points = Property(depends_on=['processed_mri_points',
'mri_scale_trans'])
transformed_hsp_points = Property(depends_on=['hsp.points',
'head_mri_trans'])
transformed_mri_lpa = Property(depends_on=['mri.lpa', 'mri_scale_trans'])
transformed_hsp_lpa = Property(depends_on=['hsp.lpa', 'head_mri_trans'])
transformed_mri_nasion = Property(depends_on=['mri.nasion',
'mri_scale_trans'])
transformed_hsp_nasion = Property(depends_on=['hsp.nasion',
'head_mri_trans'])
transformed_mri_rpa = Property(depends_on=['mri.rpa', 'mri_scale_trans'])
transformed_hsp_rpa = Property(depends_on=['hsp.rpa', 'head_mri_trans'])
# fit properties
lpa_distance = Property(depends_on=['transformed_mri_lpa',
'transformed_hsp_lpa'])
nasion_distance = Property(depends_on=['transformed_mri_nasion',
'transformed_hsp_nasion'])
rpa_distance = Property(depends_on=['transformed_mri_rpa',
'transformed_hsp_rpa'])
point_distance = Property(depends_on=['transformed_mri_points',
'transformed_hsp_points'])
# fit property info strings
fid_eval_str = Property(depends_on=['lpa_distance', 'nasion_distance',
'rpa_distance'])
points_eval_str = Property(depends_on='point_distance')
@cached_property
def _get_can_prepare_bem_model(self):
return self.subject_has_bem and self.n_scale_params > 0
@cached_property
def _get_can_save(self):
return np.any(self.head_mri_trans != np.eye(4))
@cached_property
def _get_has_pts_data(self):
has = (np.any(self.mri.points) and np.any(self.hsp.points))
return has
@cached_property
def _get_has_fid_data(self):
has = (np.any(self.mri_origin) and np.any(self.hsp.nasion))
return has
@cached_property
def _get_scale(self):
if self.n_scale_params == 0:
return np.array(1)
elif self.n_scale_params == 1:
return np.array(self.scale_x)
else:
return np.array([self.scale_x, self.scale_y, self.scale_z])
@cached_property
def _get_mri_scale_trans(self):
if np.isscalar(self.scale) or self.scale.ndim == 0:
if self.scale == 1:
return np.eye(4)
else:
s = self.scale
return scaling(s, s, s)
else:
return scaling(*self.scale)
@cached_property
def _get_mri_origin(self):
if np.isscalar(self.scale) and self.scale == 1:
return self.mri.nasion
else:
return self.mri.nasion * self.scale
@cached_property
def _get_head_mri_trans(self):
if not self.has_fid_data:
return np.eye(4)
# move hsp so that its nasion becomes the origin
x, y, z = -self.hsp.nasion[0]
trans = translation(x, y, z)
# rotate hsp by rotation parameters
rot = rotation(self.rot_x, self.rot_y, self.rot_z)
trans = np.dot(rot, trans)
# move hsp by translation parameters
transl = translation(self.trans_x, self.trans_y, self.trans_z)
trans = np.dot(transl, trans)
# move the hsp origin(/nasion) to the MRI's nasion
x, y, z = self.mri_origin[0]
tgt_mri_trans = translation(x, y, z)
trans = np.dot(tgt_mri_trans, trans)
return trans
@cached_property
def _get_processed_mri_points(self):
if self.grow_hair:
if len(self.mri.norms):
if self.n_scale_params == 0:
scaled_hair_dist = self.grow_hair / 1000
else:
scaled_hair_dist = self.grow_hair / self.scale / 1000
points = self.mri.points.copy()
hair = points[:, 2] > points[:, 1]
points[hair] += self.mri.norms[hair] * scaled_hair_dist
return points
else:
error(None, "Norms missing form bem, can't grow hair")
self.grow_hair = 0
return self.mri.points
@cached_property
def _get_transformed_mri_points(self):
points = apply_trans(self.mri_scale_trans, self.processed_mri_points)
return points
@cached_property
def _get_transformed_mri_lpa(self):
return apply_trans(self.mri_scale_trans, self.mri.lpa)
@cached_property
def _get_transformed_mri_nasion(self):
return apply_trans(self.mri_scale_trans, self.mri.nasion)
@cached_property
def _get_transformed_mri_rpa(self):
return apply_trans(self.mri_scale_trans, self.mri.rpa)
@cached_property
def _get_transformed_hsp_points(self):
return apply_trans(self.head_mri_trans, self.hsp.points)
@cached_property
def _get_transformed_hsp_lpa(self):
return apply_trans(self.head_mri_trans, self.hsp.lpa)
@cached_property
def _get_transformed_hsp_nasion(self):
return apply_trans(self.head_mri_trans, self.hsp.nasion)
@cached_property
def _get_transformed_hsp_rpa(self):
return apply_trans(self.head_mri_trans, self.hsp.rpa)
@cached_property
def _get_lpa_distance(self):
d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_nasion_distance(self):
d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_rpa_distance(self):
d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_point_distance(self):
if (len(self.transformed_hsp_points) == 0 or
len(self.transformed_mri_points) == 0):
return
dists = cdist(self.transformed_hsp_points, self.transformed_mri_points,
'euclidean')
dists = np.min(dists, 1)
return dists
@cached_property
def _get_fid_eval_str(self):
d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
self.rpa_distance * 1000)
txt = ("Fiducials Error: LPA %.1f mm, NAS %.1f mm, RPA %.1f mm" % d)
return txt
@cached_property
def _get_points_eval_str(self):
if self.point_distance is None:
return ""
av_dist = np.mean(self.point_distance)
return "Average Points Error: %.1f mm" % (av_dist * 1000)
def _get_raw_subject(self):
# subject name guessed based on the inst file name
if '_' in self.hsp.inst_fname:
subject, _ = self.hsp.inst_fname.split('_', 1)
if not subject:
subject = None
else:
subject = None
return subject
@on_trait_change('raw_subject')
def _on_raw_subject_change(self, subject):
if subject in self.mri.subject_source.subjects:
self.mri.subject = subject
elif 'fsaverage' in self.mri.subject_source.subjects:
self.mri.subject = 'fsaverage'
def omit_hsp_points(self, distance=0, reset=False):
"""Exclude head shape points that are far away from the MRI head
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance. Previously excluded points are still excluded unless
reset=True is specified. A value of distance <= 0 excludes nothing.
reset : bool
Reset the filter before calculating new omission (default is
False).
"""
distance = float(distance)
if reset:
logger.info("Coregistration: Reset excluded head shape points")
with warnings.catch_warnings(record=True): # Traits None comp
self.hsp.points_filter = None
if distance <= 0:
return
# find the new filter
hsp_pts = self.transformed_hsp_points
mri_pts = self.transformed_mri_points
point_distance = _point_cloud_error(hsp_pts, mri_pts)
new_sub_filter = point_distance <= distance
n_excluded = np.sum(new_sub_filter == False) # noqa
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# combine the new filter with the previous filter
old_filter = self.hsp.points_filter
if old_filter is None:
new_filter = new_sub_filter
else:
new_filter = np.ones(len(self.hsp.raw_points), np.bool8)
new_filter[old_filter] = new_sub_filter
# set the filter
with warnings.catch_warnings(record=True): # comp to None in Traits
self.hsp.points_filter = new_filter
def fit_auricular_points(self):
"Find rotation to fit LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_matched_points(src_fid, tgt_fid, rotate=True,
translate=False, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = rot
def fit_fiducials(self):
"Find rotation and translation to fit all 3 fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z)
est = fit_matched_points(src_fid, tgt_fid, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:]
def fit_hsp_points(self):
"Find rotation to fit head shapes"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
tgt_pts *= self.scale
tgt_pts -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
x0=x0)
self.rot_x, self.rot_y, self.rot_z = rot
def fit_scale_auricular_points(self):
"Find rotation and MRI scaling based on LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
x = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=False,
scale=1, x0=x0, out='params')
self.scale_x = 1. / x[3]
self.rot_x, self.rot_y, self.rot_z = x[:3]
def fit_scale_fiducials(self):
"Find translation, rotation and scaling based on the three fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z, 1. / self.scale_x,)
est = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=True,
scale=1, x0=x0, out='params')
self.scale_x = 1. / est[6]
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:6]
def fit_scale_hsp_points(self):
"Find MRI scaling and rotation to match head shape points"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
if self.n_scale_params == 1:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=1, x0=x0)
self.scale_x = 1. / est[3]
else:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x,
1. / self.scale_y, 1. / self.scale_z)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=3, x0=x0)
self.scale_x, self.scale_y, self.scale_z = 1. / est[3:]
self.rot_x, self.rot_y, self.rot_z = est[:3]
def get_scaling_job(self, subject_to):
desc = 'Scaling %s' % subject_to
func = scale_mri
args = (self.mri.subject, subject_to, self.scale)
kwargs = dict(overwrite=True, subjects_dir=self.mri.subjects_dir)
return (desc, func, args, kwargs)
def get_prepare_bem_model_job(self, subject_to):
subjects_dir = self.mri.subjects_dir
subject_from = self.mri.subject
bem_name = 'inner_skull-bem'
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_from, name=bem_name)
if not os.path.exists(bem_file):
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name='(.+-bem)')
bem_dir, bem_file = os.path.split(pattern)
m = None
bem_file_pattern = re.compile(bem_file)
for name in os.listdir(bem_dir):
m = bem_file_pattern.match(name)
if m is not None:
break
if m is None:
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name='*-bem')
err = ("No bem file found; looking for files matching "
"%s" % pattern)
error(None, err)
bem_name = m.group(1)
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name=bem_name)
# job
desc = 'mne_prepare_bem_model for %s' % subject_to
func = prepare_bem_model
args = (bem_file,)
kwargs = {}
return (desc, func, args, kwargs)
def load_trans(self, fname):
"""Load the head-mri transform from a fif file
Parameters
----------
fname : str
File path.
"""
info = read_trans(fname)
head_mri_trans = info['trans']
self.set_trans(head_mri_trans)
def reset(self):
"""Reset all the parameters affecting the coregistration"""
self.reset_traits(('grow_hair', 'n_scaling_params', 'scale_x',
'scale_y', 'scale_z', 'rot_x', 'rot_y', 'rot_z',
'trans_x', 'trans_y', 'trans_z'))
def set_trans(self, head_mri_trans):
"""Set rotation and translation parameters from a transformation matrix
Parameters
----------
head_mri_trans : array, shape (4, 4)
Transformation matrix from head to MRI space.
"""
x, y, z = -self.mri_origin[0]
mri_tgt_trans = translation(x, y, z)
head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)
x, y, z = self.hsp.nasion[0]
src_hsp_trans = translation(x, y, z)
src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)
rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])
x, y, z = src_tgt_trans[:3, 3]
self.rot_x = rot_x
self.rot_y = rot_y
self.rot_z = rot_z
self.trans_x = x
self.trans_y = y
self.trans_z = z
def save_trans(self, fname):
"""Save the head-mri transform as a fif file
Parameters
----------
fname : str
Target file path.
"""
if not self.can_save:
raise RuntimeError("Not enough information for saving transform")
write_trans(fname, Transform('head', 'mri', self.head_mri_trans))
class CoregFrameHandler(Handler):
"""Handler that checks for unfinished processes before closing its window
"""
def close(self, info, is_ok):
if info.object.queue.unfinished_tasks:
information(None, "Can not close the window while saving is still "
"in progress. Please wait until all MRIs are "
"processed.", "Saving Still in Progress")
return False
else:
return True
class CoregPanel(HasPrivateTraits):
model = Instance(CoregModel)
# parameters
reset_params = Button(label='Reset')
grow_hair = DelegatesTo('model')
n_scale_params = DelegatesTo('model')
scale_step = Float(1.01)
scale_x = DelegatesTo('model')
scale_x_dec = Button('-')
scale_x_inc = Button('+')
scale_y = DelegatesTo('model')
scale_y_dec = Button('-')
scale_y_inc = Button('+')
scale_z = DelegatesTo('model')
scale_z_dec = Button('-')
scale_z_inc = Button('+')
rot_step = Float(0.01)
rot_x = DelegatesTo('model')
rot_x_dec = Button('-')
rot_x_inc = Button('+')
rot_y = DelegatesTo('model')
rot_y_dec = Button('-')
rot_y_inc = Button('+')
rot_z = DelegatesTo('model')
rot_z_dec = Button('-')
rot_z_inc = Button('+')
trans_step = Float(0.001)
trans_x = DelegatesTo('model')
trans_x_dec = Button('-')
trans_x_inc = Button('+')
trans_y = DelegatesTo('model')
trans_y_dec = Button('-')
trans_y_inc = Button('+')
trans_z = DelegatesTo('model')
trans_z_dec = Button('-')
trans_z_inc = Button('+')
# fitting
has_fid_data = DelegatesTo('model')
has_pts_data = DelegatesTo('model')
# fitting with scaling
fits_hsp_points = Button(label='Fit Head Shape')
fits_fid = Button(label='Fit Fiducials')
fits_ap = Button(label='Fit LPA/RPA')
# fitting without scaling
fit_hsp_points = Button(label='Fit Head Shape')
fit_fid = Button(label='Fit Fiducials')
fit_ap = Button(label='Fit LPA/RPA')
# fit info
fid_eval_str = DelegatesTo('model')
points_eval_str = DelegatesTo('model')
# saving
can_prepare_bem_model = DelegatesTo('model')
can_save = DelegatesTo('model')
prepare_bem_model = DelegatesTo('model')
save = Button(label="Save As...")
load_trans = Button
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
error = Str('')
view = View(VGroup(Item('grow_hair', show_label=True),
Item('n_scale_params', label='MRI Scaling',
style='custom', show_label=True,
editor=EnumEditor(values={0: '1:No Scaling',
1: '2:1 Param',
3: '3:3 Params'},
cols=3)),
VGrid(Item('scale_x', editor=laggy_float_editor,
show_label=True, tooltip="Scale along "
"right-left axis",
enabled_when='n_scale_params > 0'),
Item('scale_x_dec',
enabled_when='n_scale_params > 0'),
Item('scale_x_inc',
enabled_when='n_scale_params > 0'),
Item('scale_step', tooltip="Scaling step",
enabled_when='n_scale_params > 0'),
Item('scale_y', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_y_dec',
enabled_when='n_scale_params > 1'),
Item('scale_y_inc',
enabled_when='n_scale_params > 1'),
Label('(Step)'),
Item('scale_z', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_z_dec',
enabled_when='n_scale_params > 1'),
Item('scale_z_inc',
enabled_when='n_scale_params > 1'),
show_labels=False, columns=4),
HGroup(Item('fits_hsp_points',
enabled_when='n_scale_params',
tooltip="Rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance from each digitizer point to the "
"closest MRI point"),
Item('fits_ap',
enabled_when='n_scale_params == 1',
tooltip="While leaving the nasion in "
"place, rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance of the two auricular points"),
Item('fits_fid',
enabled_when='n_scale_params == 1',
tooltip="Move and rotate the digitizer "
"head shape, and scale the MRI so as to "
"minimize the distance of the three "
"fiducials."),
show_labels=False),
'_',
Label("Translation:"),
VGrid(Item('trans_x', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"right-left axis"),
'trans_x_dec', 'trans_x_inc',
Item('trans_step', tooltip="Movement step"),
Item('trans_y', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_y_dec', 'trans_y_inc',
Label('(Step)'),
Item('trans_z', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_z_dec', 'trans_z_inc',
show_labels=False, columns=4),
Label("Rotation:"),
VGrid(Item('rot_x', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"right-left axis"),
'rot_x_dec', 'rot_x_inc',
Item('rot_step', tooltip="Rotation step"),
Item('rot_y', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_y_dec', 'rot_y_inc',
Label('(Step)'),
Item('rot_z', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_z_dec', 'rot_z_inc',
show_labels=False, columns=4),
# buttons
HGroup(Item('fit_hsp_points',
enabled_when='has_pts_data',
tooltip="Rotate the head shape (around the "
"nasion) so as to minimize the distance "
"from each head shape point to its closest "
"MRI point"),
Item('fit_ap', enabled_when='has_fid_data',
tooltip="Try to match the LPA and the RPA, "
"leaving the Nasion in place"),
Item('fit_fid', enabled_when='has_fid_data',
tooltip="Move and rotate the head shape so "
"as to minimize the distance between the "
"MRI and head shape fiducials"),
Item('load_trans', enabled_when='has_fid_data'),
show_labels=False),
'_',
Item('fid_eval_str', style='readonly'),
Item('points_eval_str', style='readonly'),
'_',
HGroup(Item('prepare_bem_model'),
Label("Run mne_prepare_bem_model"),
show_labels=False,
enabled_when='can_prepare_bem_model'),
HGroup(Item('save', enabled_when='can_save',
tooltip="Save the trans file and (if "
"scaling is enabled) the scaled MRI"),
Item('reset_params', tooltip="Reset all "
"coregistration parameters"),
show_labels=False),
Item('queue_feedback', style='readonly'),
Item('queue_current', style='readonly'),
Item('queue_len_str', style='readonly'),
show_labels=False),
kind='panel', buttons=[UndoButton])
def __init__(self, *args, **kwargs):
super(CoregPanel, self).__init__(*args, **kwargs)
# setup save worker
def worker():
while True:
desc, cmd, args, kwargs = self.queue.get()
self.queue_len -= 1
self.queue_current = 'Processing: %s' % desc
# task
try:
cmd(*args, **kwargs)
except Exception as err:
self.error = str(err)
res = "Error in %s"
else:
res = "Done: %s"
# finalize
self.queue_current = ''
self.queue_feedback = res % desc
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
@cached_property
def _get_rotation(self):
rot = np.array([self.rot_x, self.rot_y, self.rot_z])
return rot
@cached_property
def _get_src_pts(self):
return self.hsp_pts - self.hsp_fid[0]
@cached_property
def _get_src_fid(self):
return self.hsp_fid - self.hsp_fid[0]
@cached_property
def _get_tgt_origin(self):
return self.mri_fid[0] * self.scale
@cached_property
def _get_tgt_pts(self):
pts = self.mri_pts * self.scale
pts -= self.tgt_origin
return pts
@cached_property
def _get_tgt_fid(self):
fid = self.mri_fid * self.scale
fid -= self.tgt_origin
return fid
@cached_property
def _get_translation(self):
trans = np.array([self.trans_x, self.trans_y, self.trans_z])
return trans
def _fit_ap_fired(self):
GUI.set_busy()
self.model.fit_auricular_points()
GUI.set_busy(False)
def _fit_fid_fired(self):
GUI.set_busy()
self.model.fit_fiducials()
GUI.set_busy(False)
def _fit_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_hsp_points()
GUI.set_busy(False)
def _fits_ap_fired(self):
GUI.set_busy()
self.model.fit_scale_auricular_points()
GUI.set_busy(False)
def _fits_fid_fired(self):
GUI.set_busy()
self.model.fit_scale_fiducials()
GUI.set_busy(False)
def _fits_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_scale_hsp_points()
GUI.set_busy(False)
def _n_scale_params_changed(self, new):
if not new:
return
# Make sure that MNE_ROOT environment variable is set
if not set_mne_root(True):
err = ("MNE_ROOT environment variable could not be set. "
"You will be able to scale MRIs, but the "
"mne_prepare_bem_model tool will fail. Please install "
"MNE.")
warning(None, err, "MNE_ROOT Not Set")
def _reset_params_fired(self):
self.model.reset()
def _rot_x_dec_fired(self):
self.rot_x -= self.rot_step
def _rot_x_inc_fired(self):
self.rot_x += self.rot_step
def _rot_y_dec_fired(self):
self.rot_y -= self.rot_step
def _rot_y_inc_fired(self):
self.rot_y += self.rot_step
def _rot_z_dec_fired(self):
self.rot_z -= self.rot_step
def _rot_z_inc_fired(self):
self.rot_z += self.rot_step
def _load_trans_fired(self):
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
subject = self.model.mri.subject
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
dlg = FileDialog(action="open", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
self.model.load_trans(trans_file)
def _save_fired(self):
if self.n_scale_params:
subjects_dir = self.model.mri.subjects_dir
subject_from = self.model.mri.subject
subject_to = self.model.raw_subject or self.model.mri.subject
else:
subject_to = self.model.mri.subject
# ask for target subject
if self.n_scale_params:
mridlg = NewMriDialog(subjects_dir=subjects_dir,
subject_from=subject_from,
subject_to=subject_to)
ui = mridlg.edit_traits(kind='modal')
if ui.result != True: # noqa
return
subject_to = mridlg.subject_to
# find bem file to run mne_prepare_bem_model
if self.can_prepare_bem_model and self.prepare_bem_model:
bem_job = self.model.get_prepare_bem_model_job(subject_to)
else:
bem_job = None
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
dlg = FileDialog(action="save as", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
if not trans_file.endswith('.fif'):
trans_file = trans_file + '.fif'
if os.path.exists(trans_file):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
# save the trans file
try:
self.model.save_trans(trans_file)
except Exception as e:
error(None, str(e), "Error Saving Trans File")
return
# save the scaled MRI
if self.n_scale_params:
job = self.model.get_scaling_job(subject_to)
self.queue.put(job)
self.queue_len += 1
if bem_job is not None:
self.queue.put(bem_job)
self.queue_len += 1
def _scale_x_dec_fired(self):
step = 1. / self.scale_step
self.scale_x *= step
def _scale_x_inc_fired(self):
self.scale_x *= self.scale_step
def _scale_x_changed(self, old, new):
if self.n_scale_params == 1:
self.scale_y = new
self.scale_z = new
def _scale_y_dec_fired(self):
step = 1. / self.scale_step
self.scale_y *= step
def _scale_y_inc_fired(self):
self.scale_y *= self.scale_step
def _scale_z_dec_fired(self):
step = 1. / self.scale_step
self.scale_z *= step
def _scale_z_inc_fired(self):
self.scale_z *= self.scale_step
def _trans_x_dec_fired(self):
self.trans_x -= self.trans_step
def _trans_x_inc_fired(self):
self.trans_x += self.trans_step
def _trans_y_dec_fired(self):
self.trans_y -= self.trans_step
def _trans_y_inc_fired(self):
self.trans_y += self.trans_step
def _trans_z_dec_fired(self):
self.trans_z -= self.trans_step
def _trans_z_inc_fired(self):
self.trans_z += self.trans_step
class NewMriDialog(HasPrivateTraits):
# Dialog to determine target subject name for a scaled MRI
subjects_dir = Directory
subject_to = Str
subject_from = Str
subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
subject_to_exists = Property(Bool, depends_on='subject_to_dir')
feedback = Str(' ' * 100)
can_overwrite = Bool
overwrite = Bool
can_save = Bool
view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
"new folder with this name will be created in the "
"current subjects_dir for the scaled MRI files"),
Item('feedback', show_label=False, style='readonly'),
Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
"subject with the chosen name exists, delete the old "
"subject"),
width=500,
buttons=[CancelButton,
Action(name='OK', enabled_when='can_save')])
def _can_overwrite_changed(self, new):
if not new:
self.overwrite = False
@cached_property
def _get_subject_to_dir(self):
return os.path.join(self.subjects_dir, self.subject_to)
@cached_property
def _get_subject_to_exists(self):
if not self.subject_to:
return False
elif os.path.exists(self.subject_to_dir):
return True
else:
return False
@on_trait_change('subject_to_dir,overwrite')
def update_dialog(self):
if not self.subject_to:
self.feedback = "No subject specified..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to == self.subject_from:
self.feedback = "Must be different from MRI source subject..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to_exists:
if self.overwrite:
self.feedback = "%s will be overwritten." % self.subject_to
self.can_save = True
self.can_overwrite = True
else:
self.feedback = "Subject already exists..."
self.can_save = False
self.can_overwrite = True
else:
self.feedback = "Name ok."
self.can_save = True
self.can_overwrite = False
def _make_view(tabbed=False, split=False, scene_width=-1):
"""Create a view for the CoregFrame
Parameters
----------
tabbed : bool
Combine the data source panel and the coregistration panel into a
single panel with tabs.
split : bool
Split the main panels with a movable splitter (good for QT4 but
unnecessary for wx backend).
scene_width : int
Specify a minimum width for the 3d scene (in pixels).
returns
-------
view : traits View
View object for the CoregFrame.
"""
view_options = VGroup(Item('headview', style='custom'), 'view_options',
show_border=True, show_labels=False, label='View')
scene = VGroup(Item('scene', show_label=False,
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', width=500),
view_options)
data_panel = VGroup(VGroup(Item('subject_panel', style='custom'),
label="MRI Subject", show_border=True,
show_labels=False),
VGroup(Item('lock_fiducials', style='custom',
editor=EnumEditor(cols=2,
values={False: '2:Edit',
True: '1:Lock'}),
enabled_when='fid_ok'),
HGroup('hsp_always_visible',
Label("Always Show Head Shape Points"),
show_labels=False),
Item('fid_panel', style='custom'),
label="MRI Fiducials", show_border=True,
show_labels=False),
VGroup(Item('raw_src', style="custom"),
HGroup(Item('distance', show_label=True),
'omit_points', 'reset_omit_points',
show_labels=False),
Item('omitted_info', style='readonly',
show_label=False),
label='Head Shape Source (Raw/Epochs/Evoked)',
show_border=True, show_labels=False),
show_labels=False, label="Data Source")
coreg_panel = VGroup(Item('coreg_panel', style='custom'),
label="Coregistration", show_border=True,
show_labels=False,
enabled_when="fid_panel.locked")
if split:
main_layout = 'split'
else:
main_layout = 'normal'
if tabbed:
main = HGroup(scene,
Group(data_panel, coreg_panel, show_labels=False,
layout='tabbed'),
layout=main_layout)
else:
main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
layout=main_layout)
view = View(main, resizable=True, handler=CoregFrameHandler(),
buttons=NoButtons)
return view
class ViewOptionsPanel(HasTraits):
mri_obj = Instance(SurfaceObject)
hsp_obj = Instance(PointObject)
view = View(VGroup(Item('mri_obj', style='custom', # show_border=True,
label="MRI Head Surface"),
Item('hsp_obj', style='custom', # show_border=True,
label="Head Shape Points")),
title="View Options")
class CoregFrame(HasTraits):
"""GUI for head-MRI coregistration
"""
model = Instance(CoregModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
subject_panel = Instance(SubjectSelectorPanel)
fid_panel = Instance(FiducialsPanel)
coreg_panel = Instance(CoregPanel)
raw_src = DelegatesTo('model', 'hsp')
# Omit Points
distance = Float(5., label="Distance [mm]", desc="Maximal distance for "
"head shape points from MRI in mm")
omit_points = Button(label='Omit Points', desc="Omit head shape points "
"for the purpose of the automatic coregistration "
"procedure.")
reset_omit_points = Button(label='Reset Omission', desc="Reset the "
"omission of head shape points to include all.")
omitted_info = Property(Str, depends_on=['model.hsp.n_omitted'])
fid_ok = DelegatesTo('model', 'mri.fid_ok')
lock_fiducials = DelegatesTo('model')
hsp_always_visible = Bool(False, label="Always Show Head Shape")
# visualization
hsp_obj = Instance(PointObject)
mri_obj = Instance(SurfaceObject)
lpa_obj = Instance(PointObject)
nasion_obj = Instance(PointObject)
rpa_obj = Instance(PointObject)
hsp_lpa_obj = Instance(PointObject)
hsp_nasion_obj = Instance(PointObject)
hsp_rpa_obj = Instance(PointObject)
hsp_visible = Property(depends_on=['hsp_always_visible', 'lock_fiducials'])
view_options = Button(label="View Options")
picker = Instance(object)
view_options_panel = Instance(ViewOptionsPanel)
# Processing
queue = DelegatesTo('coreg_panel')
view = _make_view()
def _subject_panel_default(self):
return SubjectSelectorPanel(model=self.model.mri.subject_source)
def _fid_panel_default(self):
panel = FiducialsPanel(model=self.model.mri, headview=self.headview)
return panel
def _coreg_panel_default(self):
panel = CoregPanel(model=self.model)
return panel
def _headview_default(self):
return HeadViewController(scene=self.scene, system='RAS')
def __init__(self, raw=None, subject=None, subjects_dir=None):
super(CoregFrame, self).__init__()
subjects_dir = get_subjects_dir(subjects_dir)
if (subjects_dir is not None) and os.path.isdir(subjects_dir):
self.model.mri.subjects_dir = subjects_dir
if subject is not None:
self.model.mri.subject = subject
if raw is not None:
self.model.hsp.file = raw
@on_trait_change('scene.activated')
def _init_plot(self):
self.scene.disable_render = True
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# MRI scalp
color = defaults['mri_color']
self.mri_obj = SurfaceObject(points=self.model.transformed_mri_points,
color=color, tri=self.model.mri.tris,
scene=self.scene)
# on_trait_change was unreliable, so link it another way:
self.model.mri.on_trait_change(self._on_mri_src_change, 'tris')
self.model.sync_trait('transformed_mri_points', self.mri_obj, 'points',
mutual=False)
self.fid_panel.hsp_obj = self.mri_obj
# MRI Fiducials
point_scale = defaults['mri_fid_scale']
self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
point_scale=point_scale)
self.model.mri.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.lpa_obj, 'trans', mutual=False)
self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
point_scale=point_scale)
self.model.mri.sync_trait('nasion', self.nasion_obj, 'points',
mutual=False)
self.model.sync_trait('scale', self.nasion_obj, 'trans', mutual=False)
self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
point_scale=point_scale)
self.model.mri.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.rpa_obj, 'trans', mutual=False)
# Digitizer Head Shape
color = defaults['hsp_point_color']
point_scale = defaults['hsp_points_scale']
p = PointObject(view='cloud', scene=self.scene, color=color,
point_scale=point_scale, resolution=5)
self.hsp_obj = p
self.model.hsp.sync_trait('points', p, mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
# Digitizer Fiducials
point_scale = defaults['hsp_fid_scale']
opacity = defaults['hsp_fid_opacity']
p = PointObject(scene=self.scene, color=lpa_color, opacity=opacity,
point_scale=point_scale)
self.hsp_lpa_obj = p
self.model.hsp.sync_trait('lpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=nasion_color, opacity=opacity,
point_scale=point_scale)
self.hsp_nasion_obj = p
self.model.hsp.sync_trait('nasion', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=rpa_color, opacity=opacity,
point_scale=point_scale)
self.hsp_rpa_obj = p
self.model.hsp.sync_trait('rpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
on_pick = self.scene.mayavi_scene.on_mouse_pick
if not _testing_mode():
self.picker = on_pick(self.fid_panel._on_pick, type='cell')
self.headview.left = True
self.scene.disable_render = False
self.view_options_panel = ViewOptionsPanel(mri_obj=self.mri_obj,
hsp_obj=self.hsp_obj)
@cached_property
def _get_hsp_visible(self):
return self.hsp_always_visible or self.lock_fiducials
@cached_property
def _get_omitted_info(self):
if self.model.hsp.n_omitted == 0:
return "No points omitted"
elif self.model.hsp.n_omitted == 1:
return "1 point omitted"
else:
return "%i points omitted" % self.model.hsp.n_omitted
def _omit_points_fired(self):
distance = self.distance / 1000.
self.model.omit_hsp_points(distance)
def _reset_omit_points_fired(self):
self.model.omit_hsp_points(0, True)
@on_trait_change('model.mri.tris')
def _on_mri_src_change(self):
if self.mri_obj is None:
return
if not (np.any(self.model.mri.points) and np.any(self.model.mri.tris)):
self.mri_obj.clear()
return
self.mri_obj.points = self.model.mri.points
self.mri_obj.tri = self.model.mri.tris
self.mri_obj.plot()
# automatically lock fiducials if a good fiducials file is loaded
@on_trait_change('model.mri.fid_file')
def _on_fid_file_loaded(self):
if self.model.mri.fid_file:
self.fid_panel.locked = True
else:
self.fid_panel.locked = False
def _view_options_fired(self):
self.view_options_panel.edit_traits()
| rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/gui/_coreg_gui.py | Python | bsd-3-clause | 53,959 | [
"Mayavi"
] | 4da167cbcee14bde26ff6cc75ebd8fe86fee9d089a96677a8c3e56757377548c |
import operator, os, socket
from datetime import *
from time import mktime, strftime, localtime
import calendar
from galaxy.webapps.reports.base.controller import *
import galaxy.model
import pkg_resources
pkg_resources.require( "SQLAlchemy >= 0.4" )
import sqlalchemy as sa
import logging
log = logging.getLogger( __name__ )
class Jobs( BaseController ):
@web.expose
def today_all( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
monitor_email = params.get( 'monitor_email', 'monitor@bx.psu.edu' )
year, month, day = map( int, datetime.utcnow().strftime( "%Y-%m-%d" ).split( "-" ) )
start_date = date( year, month, day )
end_date = start_date + timedelta( days=1 )
day_label = start_date.strftime( "%A" )
month_label = start_date.strftime( "%B" )
year_label = start_date.strftime( "%Y" )
day_of_month = start_date.strftime( "%d" )
q = sa.select( ( sa.func.date( galaxy.model.Job.table.c.create_time ).label( 'date' ),
sa.func.sum( sa.case( [( galaxy.model.User.table.c.email == monitor_email, 1 )], else_=0 ) ).label( 'monitor_jobs' ),
sa.func.count( galaxy.model.Job.table.c.id ).label( 'total_jobs' ) ),
whereclause = sa.and_( galaxy.model.Job.table.c.create_time >= start_date,
galaxy.model.Job.table.c.create_time < end_date ),
from_obj = [ sa.outerjoin( galaxy.model.Job.table,
galaxy.model.History.table ).outerjoin( galaxy.model.User.table ) ],
group_by = [ 'date' ] )
jobs = []
for row in q.execute():
jobs.append( ( row.date.strftime( "%A" ),
row.date,
row.total_jobs - row.monitor_jobs,
row.monitor_jobs,
row.total_jobs,
row.date.strftime( "%d" ) ) )
return trans.fill_template( 'jobs_today_all.mako',
day_label=day_label,
month_label=month_label,
year_label=year_label,
day_of_month=day_of_month,
month=month,
jobs=jobs,
msg=msg )
@web.expose
def specified_month_all( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
monitor_email = params.get( 'monitor_email', 'monitor@bx.psu.edu' )
year, month = map( int, params.get( 'month', datetime.utcnow().strftime( "%Y-%m" ) ).split( "-" ) )
start_date = date( year, month, 1 )
end_date = start_date + timedelta( days=calendar.monthrange( year, month )[1] )
month_label = start_date.strftime( "%B" )
year_label = start_date.strftime( "%Y" )
q = sa.select( ( sa.func.date( galaxy.model.Job.table.c.create_time ).label( 'date' ),
sa.func.sum( sa.case( [( galaxy.model.User.table.c.email == monitor_email, 1 )], else_=0 ) ).label( 'monitor_jobs' ),
sa.func.count( galaxy.model.Job.table.c.id ).label( 'total_jobs' ) ),
whereclause = sa.and_( galaxy.model.Job.table.c.create_time >= start_date,
galaxy.model.Job.table.c.create_time < end_date ),
from_obj = [ sa.outerjoin( galaxy.model.Job.table,
galaxy.model.History.table ).outerjoin( galaxy.model.User.table ) ],
group_by = [ 'date' ],
order_by = [ sa.desc( 'date' ) ] )
jobs = []
for row in q.execute():
jobs.append( ( row.date.strftime( "%A" ),
row.date,
row.total_jobs - row.monitor_jobs,
row.monitor_jobs,
row.total_jobs,
row.date.strftime( "%d" ) ) )
return trans.fill_template( 'jobs_specified_month_all.mako',
month_label=month_label,
year_label=year_label,
month=month,
jobs=jobs,
msg=msg )
@web.expose
def specified_month_in_error( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
year, month = map( int, params.get( 'month', datetime.utcnow().strftime( "%Y-%m" ) ).split( "-" ) )
start_date = date( year, month, 1 )
end_date = start_date + timedelta( days=calendar.monthrange( year, month )[1] )
month_label = start_date.strftime( "%B" )
year_label = start_date.strftime( "%Y" )
q = sa.select( ( sa.func.date( galaxy.model.Job.table.c.create_time ).label( 'date' ),
sa.func.count( galaxy.model.Job.table.c.id ).label( 'total_jobs' ) ),
whereclause = sa.and_( galaxy.model.Job.table.c.state == 'error',
galaxy.model.Job.table.c.create_time >= start_date,
galaxy.model.Job.table.c.create_time < end_date ),
from_obj = [ sa.outerjoin( galaxy.model.Job.table,
galaxy.model.History.table ).outerjoin( galaxy.model.User.table ) ],
group_by = [ 'date' ],
order_by = [ sa.desc( 'date' ) ] )
jobs = []
for row in q.execute():
jobs.append( ( row.date.strftime( "%A" ),
row.date,
row.total_jobs,
row.date.strftime( "%d" ) ) )
return trans.fill_template( 'jobs_specified_month_in_error.mako',
month_label=month_label,
year_label=year_label,
month=month,
jobs=jobs,
msg=msg )
@web.expose
def specified_date_in_error( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
year, month, day = map( int, params.get( 'specified_date', datetime.utcnow().strftime( "%Y-%m-%d" ) ).split( "-" ) )
start_date = date( year, month, day )
end_date = start_date + timedelta( days=1 )
day_label = start_date.strftime( "%A" )
month_label = start_date.strftime( "%B" )
year_label = start_date.strftime( "%Y" )
day_of_month = start_date.strftime( "%d" )
q = sa.select( ( galaxy.model.Job.table.c.id,
galaxy.model.Job.table.c.state,
galaxy.model.Job.table.c.create_time,
galaxy.model.Job.table.c.update_time,
galaxy.model.Job.table.c.tool_id,
galaxy.model.Job.table.c.command_line,
galaxy.model.Job.table.c.stderr,
galaxy.model.Job.table.c.session_id,
( galaxy.model.Job.table.c.traceback ).label( 'stack_trace' ),
galaxy.model.Job.table.c.info,
( galaxy.model.User.table.c.email ).label( 'user_email' ),
galaxy.model.GalaxySession.table.c.remote_addr ),
whereclause = sa.and_( galaxy.model.Job.table.c.state == 'error',
galaxy.model.Job.table.c.create_time >= start_date,
galaxy.model.Job.table.c.create_time < end_date ),
from_obj = [ sa.outerjoin( galaxy.model.Job.table,
galaxy.model.History.table ) \
.outerjoin( galaxy.model.User.table ) \
.outerjoin( galaxy.model.GalaxySession.table,
galaxy.model.Job.table.c.session_id == galaxy.model.GalaxySession.table.c.id ) ],
order_by = [ sa.desc( galaxy.model.Job.table.c.id ) ] )
jobs = []
for row in q.execute():
remote_host = row.remote_addr
if row.remote_addr:
try:
remote_host = socket.gethostbyaddr( row.remote_addr )[0]
except:
pass
jobs.append( ( row.state,
row.id,
row.create_time,
row.update_time,
row.session_id,
row.tool_id,
row.user_email,
remote_host,
row.command_line,
row.stderr,
row.stack_trace,
row.info ) )
return trans.fill_template( 'jobs_specified_date_in_error.mako',
specified_date=start_date,
day_label=day_label,
month_label=month_label,
year_label=year_label,
day_of_month=day_of_month,
jobs=jobs,
msg=msg )
@web.expose
def specified_date_all( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
monitor_email = params.get( 'monitor_email', 'monitor@bx.psu.edu' )
year, month, day = map( int, params.get( 'specified_date', datetime.utcnow().strftime( "%Y-%m-%d" ) ).split( "-" ) )
start_date = date( year, month, day )
end_date = start_date + timedelta( days=1 )
day_label = start_date.strftime( "%A" )
month_label = start_date.strftime( "%B" )
year_label = start_date.strftime( "%Y" )
day_of_month = start_date.strftime( "%d" )
q = sa.select( ( galaxy.model.Job.table.c.id,
galaxy.model.Job.table.c.state,
galaxy.model.Job.table.c.create_time,
galaxy.model.Job.table.c.update_time,
galaxy.model.Job.table.c.tool_id,
galaxy.model.Job.table.c.command_line,
galaxy.model.Job.table.c.stderr,
galaxy.model.Job.table.c.session_id,
( galaxy.model.Job.table.c.traceback ).label( 'stack_trace' ),
galaxy.model.Job.table.c.info,
( galaxy.model.User.table.c.email ).label( 'user_email' ),
galaxy.model.GalaxySession.table.c.remote_addr ),
whereclause = sa.and_( galaxy.model.Job.table.c.create_time >= start_date,
galaxy.model.Job.table.c.create_time < end_date ),
from_obj = [ sa.outerjoin( galaxy.model.Job.table,
galaxy.model.History.table ) \
.outerjoin( galaxy.model.User.table ) \
.outerjoin( galaxy.model.GalaxySession.table,
galaxy.model.Job.table.c.session_id == galaxy.model.GalaxySession.table.c.id ) ],
order_by = [ sa.desc( galaxy.model.Job.table.c.id ) ] )
jobs = []
for row in q.execute():
remote_host = row.remote_addr
if row.remote_addr:
try:
remote_host = socket.gethostbyaddr( row.remote_addr )[0]
except:
pass
jobs.append( ( row.state,
row.id,
row.create_time,
row.update_time,
row.session_id,
row.tool_id,
row.user_email,
remote_host,
row.command_line,
row.stderr,
row.stack_trace,
row.info ) )
return trans.fill_template( 'jobs_specified_date_all.mako',
specified_date=start_date,
day_label=day_label,
month_label=month_label,
year_label=year_label,
day_of_month=day_of_month,
jobs=jobs,
msg=msg )
@web.expose
def all_unfinished( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
jobs = []
q = sa.select( ( galaxy.model.Job.table.c.id,
galaxy.model.Job.table.c.state,
galaxy.model.Job.table.c.create_time,
galaxy.model.Job.table.c.update_time,
galaxy.model.Job.table.c.tool_id,
galaxy.model.Job.table.c.command_line,
galaxy.model.Job.table.c.stderr,
galaxy.model.Job.table.c.session_id,
( galaxy.model.Job.table.c.traceback ).label( 'stack_trace' ),
galaxy.model.Job.table.c.info,
( galaxy.model.User.table.c.email ).label( 'user_email' ),
galaxy.model.GalaxySession.table.c.remote_addr ),
whereclause = sa.not_( sa.or_( galaxy.model.Job.table.c.state == 'ok',
galaxy.model.Job.table.c.state == 'error',
galaxy.model.Job.table.c.state == 'deleted' ) ),
from_obj = [ sa.outerjoin( galaxy.model.Job.table,
galaxy.model.History.table ) \
.outerjoin( galaxy.model.User.table ) \
.outerjoin( galaxy.model.GalaxySession.table,
galaxy.model.Job.table.c.session_id == galaxy.model.GalaxySession.table.c.id ) ],
order_by = [ sa.desc( galaxy.model.Job.table.c.id ) ] )
for row in q.execute():
remote_host = row.remote_addr
if row.remote_addr:
try:
remote_host = socket.gethostbyaddr( row.remote_addr )[0]
except:
pass
jobs.append( ( row.state,
row.id,
row.create_time,
row.update_time,
row.session_id,
row.tool_id,
row.user_email,
remote_host,
row.command_line,
row.stderr,
row.stack_trace,
row.info ) )
return trans.fill_template( 'jobs_all_unfinished.mako', jobs=jobs, msg=msg )
@web.expose
def per_month_all( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
monitor_email = params.get( 'monitor_email', 'monitor@bx.psu.edu' )
q = sa.select( ( sa.func.date_trunc( 'month', sa.func.date( galaxy.model.Job.table.c.create_time ) ).label( 'date' ),
sa.func.sum( sa.case( [( galaxy.model.User.table.c.email == monitor_email, 1 )], else_=0 ) ).label( 'monitor_jobs' ),
sa.func.count( galaxy.model.Job.table.c.id ).label( 'total_jobs' ) ),
from_obj = [ sa.outerjoin( galaxy.model.Job.table,
galaxy.model.History.table ).outerjoin( galaxy.model.User.table ) ],
group_by = [ sa.func.date_trunc( 'month', sa.func.date( galaxy.model.Job.table.c.create_time ) ) ],
order_by = [ sa.desc( 'date' ) ] )
jobs = []
for row in q.execute():
jobs.append( ( row.date.strftime( "%Y-%m" ),
row.total_jobs - row.monitor_jobs,
row.monitor_jobs,
row.total_jobs,
row.date.strftime( "%B" ),
row.date.strftime( "%Y" ) ) )
return trans.fill_template( 'jobs_per_month_all.mako', jobs=jobs, msg=msg )
@web.expose
def per_month_in_error( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
q = sa.select( ( sa.func.date_trunc( 'month', sa.func.date( galaxy.model.Job.table.c.create_time ) ).label( 'date' ),
sa.func.count( galaxy.model.Job.table.c.id ).label( 'total_jobs' ) ),
whereclause = galaxy.model.Job.table.c.state == 'error',
from_obj = [ galaxy.model.Job.table ],
group_by = [ sa.func.date_trunc( 'month', sa.func.date( galaxy.model.Job.table.c.create_time ) ) ],
order_by = [ sa.desc( 'date' ) ] )
jobs = []
for row in q.execute():
jobs.append( ( row.date.strftime( "%Y-%m" ),
row.total_jobs,
row.date.strftime( "%B" ),
row.date.strftime( "%Y" ) ) )
return trans.fill_template( 'jobs_per_month_in_error.mako', jobs=jobs, msg=msg )
@web.expose
def per_user( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
jobs = []
q = sa.select( ( galaxy.model.User.table.c.email.label( 'user_email' ),
sa.func.count( galaxy.model.Job.table.c.id ).label( 'total_jobs' ) ),
from_obj = [ sa.outerjoin( galaxy.model.Job.table,
galaxy.model.GalaxySession.table ).outerjoin( galaxy.model.User.table ) ],
group_by = [ 'user_email' ],
order_by = [ sa.desc( 'total_jobs' ), 'user_email' ] )
for row in q.execute():
jobs.append( ( row.user_email,
row.total_jobs ) )
return trans.fill_template( 'jobs_per_user.mako', jobs=jobs, msg=msg )
@web.expose
def user_per_month( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
email = params.get( 'email', None )
if email is not None:
# The @ char has been converted to an 'X'
email = email.replace( 'X', '@' )
q = sa.select( ( sa.func.date_trunc( 'month', sa.func.date( galaxy.model.Job.table.c.create_time ) ).label( 'date' ),
sa.func.count( galaxy.model.Job.table.c.id ).label( 'total_jobs' ) ),
whereclause = galaxy.model.User.table.c.email == email,
from_obj = [ sa.outerjoin( galaxy.model.Job.table,
galaxy.model.GalaxySession.table ).outerjoin( galaxy.model.User.table ) ],
group_by = [ sa.func.date_trunc( 'month', sa.func.date( galaxy.model.Job.table.c.create_time ) ) ],
order_by = [ sa.desc( 'date' ) ] )
jobs = []
for row in q.execute():
jobs.append( ( row.date.strftime( "%Y-%m" ),
row.total_jobs,
row.date.strftime( "%B" ),
row.date.strftime( "%Y" ) ) )
return trans.fill_template( 'jobs_user_per_month.mako', email=email, jobs=jobs, msg=msg )
@web.expose
def user_for_month( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
email = params.get( 'email', None )
if email is not None:
# The @ char has been converted to an 'X'
email = email.replace( 'X', '@' )
year, month = map( int, params.get( 'month', datetime.utcnow().strftime( "%Y-%m" ) ).split( "-" ) )
start_date = date( year, month, 1 )
end_date = start_date + timedelta( days=calendar.monthrange( year, month )[1] )
month_label = start_date.strftime( "%B" )
year_label = start_date.strftime( "%Y" )
jobs = []
q = sa.select( ( galaxy.model.Job.table.c.id,
galaxy.model.Job.table.c.state,
galaxy.model.Job.table.c.create_time,
galaxy.model.Job.table.c.update_time,
galaxy.model.Job.table.c.tool_id,
galaxy.model.Job.table.c.command_line,
galaxy.model.Job.table.c.stderr,
galaxy.model.Job.table.c.session_id,
( galaxy.model.Job.table.c.traceback ).label( 'stack_trace' ),
galaxy.model.Job.table.c.info,
( galaxy.model.User.table.c.email ).label( 'user_email' ),
galaxy.model.GalaxySession.table.c.remote_addr ),
whereclause = sa.and_( galaxy.model.User.table.c.email == email,
galaxy.model.Job.table.c.create_time >= start_date,
galaxy.model.Job.table.c.create_time < end_date ),
from_obj = [ sa.outerjoin( galaxy.model.Job.table,
galaxy.model.History.table ) \
.outerjoin( galaxy.model.User.table ) \
.outerjoin( galaxy.model.GalaxySession.table,
galaxy.model.Job.table.c.session_id == galaxy.model.GalaxySession.table.c.id ) ],
order_by = [ sa.desc( galaxy.model.Job.table.c.id ) ] )
for row in q.execute():
remote_host = row.remote_addr
if row.remote_addr:
try:
remote_host = socket.gethostbyaddr( row.remote_addr )[0]
except:
pass
jobs.append( ( row.state,
row.id,
row.create_time,
row.update_time,
row.session_id,
row.tool_id,
row.user_email,
remote_host,
row.command_line,
row.stderr,
row.stack_trace,
row.info ) )
return trans.fill_template( 'jobs_user_for_month.mako',
email=email,
month=month,
month_label=month_label,
year_label=year_label,
jobs=jobs,
msg=msg )
@web.expose
def per_tool( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
jobs = []
q = sa.select( ( galaxy.model.Job.table.c.tool_id.label( 'tool_id' ),
sa.func.count( galaxy.model.Job.table.c.id ).label( 'total_jobs' ) ),
from_obj = [ galaxy.model.Job.table ],
group_by = [ 'tool_id' ],
order_by = [ 'tool_id' ] )
for row in q.execute():
jobs.append( ( row.tool_id,
row.total_jobs ) )
return trans.fill_template( 'jobs_per_tool.mako', jobs=jobs, msg=msg )
@web.expose
def tool_per_month( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
tool_id = params.get( 'tool_id', 'Add a column1' )
q = sa.select( ( sa.func.date_trunc( 'month', sa.func.date( galaxy.model.Job.table.c.create_time ) ).label( 'date' ),
sa.func.count( galaxy.model.Job.table.c.id ).label( 'total_jobs' ) ),
whereclause = galaxy.model.Job.table.c.tool_id == tool_id,
from_obj = [ galaxy.model.Job.table ],
group_by = [ sa.func.date_trunc( 'month', sa.func.date( galaxy.model.Job.table.c.create_time ) ) ],
order_by = [ sa.desc( 'date' ) ] )
jobs = []
for row in q.execute():
jobs.append( ( row.date.strftime( "%Y-%m" ),
row.total_jobs,
row.date.strftime( "%B" ),
row.date.strftime( "%Y" ) ) )
return trans.fill_template( 'jobs_tool_per_month.mako', tool_id=tool_id, jobs=jobs, msg=msg )
@web.expose
def tool_for_month( self, trans, **kwd ):
params = util.Params( kwd )
msg = ''
tool_id = params.get( 'tool_id', 'Add a column1' )
year, month = map( int, params.get( 'month', datetime.utcnow().strftime( "%Y-%m" ) ).split( "-" ) )
start_date = date( year, month, 1 )
end_date = start_date + timedelta( days=calendar.monthrange( year, month )[1] )
month_label = start_date.strftime( "%B" )
year_label = start_date.strftime( "%Y" )
jobs = []
q = sa.select( ( galaxy.model.Job.table.c.id,
galaxy.model.Job.table.c.state,
galaxy.model.Job.table.c.create_time,
galaxy.model.Job.table.c.update_time,
galaxy.model.Job.table.c.tool_id,
galaxy.model.Job.table.c.command_line,
galaxy.model.Job.table.c.stderr,
galaxy.model.Job.table.c.session_id,
( galaxy.model.Job.table.c.traceback ).label( 'stack_trace' ),
galaxy.model.Job.table.c.info,
( galaxy.model.User.table.c.email ).label( 'user_email' ),
galaxy.model.GalaxySession.table.c.remote_addr ),
whereclause = sa.and_( galaxy.model.Job.table.c.tool_id == tool_id,
galaxy.model.Job.table.c.create_time >= start_date,
galaxy.model.Job.table.c.create_time < end_date ),
from_obj = [ sa.outerjoin( galaxy.model.Job.table,
galaxy.model.History.table ) \
.outerjoin( galaxy.model.User.table ) \
.outerjoin( galaxy.model.GalaxySession.table,
galaxy.model.Job.table.c.session_id == galaxy.model.GalaxySession.table.c.id ) ],
order_by = [ sa.desc( galaxy.model.Job.table.c.id ) ] )
for row in q.execute():
remote_host = row.remote_addr
if row.remote_addr:
try:
remote_host = socket.gethostbyaddr( row.remote_addr )[0]
except:
pass
jobs.append( ( row.state,
row.id,
row.create_time,
row.update_time,
row.session_id,
row.tool_id,
row.user_email,
remote_host,
row.command_line,
row.stderr,
row.stack_trace,
row.info ) )
return trans.fill_template( 'jobs_tool_for_month.mako',
tool_id=tool_id,
month=month,
month_label=month_label,
year_label=year_label,
jobs=jobs,
msg=msg )
@web.expose
def per_domain( self, trans, **kwd ):
# TODO: rewrite using alchemy
params = util.Params( kwd )
msg = ''
engine = galaxy.model.mapping.metadata.engine
jobs = []
s = """
SELECT
substr(bar.first_pass_domain, bar.dot_position, 4) AS domain,
count(job_id) AS total_jobs
FROM
(SELECT
user_email AS user_email,
first_pass_domain,
position('.' in first_pass_domain) AS dot_position,
job_id AS job_id
FROM
(SELECT
email AS user_email,
substr(email, char_length(email)-3, char_length(email)) AS first_pass_domain,
job.id AS job_id
FROM
job
LEFT OUTER JOIN galaxy_session ON galaxy_session.id = job.session_id
LEFT OUTER JOIN galaxy_user ON galaxy_session.user_id = galaxy_user.id
WHERE
job.session_id = galaxy_session.id
AND
galaxy_session.user_id = galaxy_user.id
) AS foo
) AS bar
GROUP BY
domain
ORDER BY
total_jobs DESC
"""
job_rows = engine.text( s ).execute().fetchall()
for job in job_rows:
jobs.append( ( job.domain, job.total_jobs ) )
return trans.fill_template( 'jobs_per_domain.mako', jobs=jobs, msg=msg )
| volpino/Yeps-EURAC | lib/galaxy/webapps/reports/controllers/jobs.py | Python | mit | 30,355 | [
"Galaxy"
] | fbbb523b869cdf78bd514c5ee0410ede6800abd9b333c5ee299a2ca7763840a8 |
import json
import logging
from datetime import datetime
import ddt
from django.core.urlresolvers import reverse
from django.http import Http404
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from django.utils import translation
from mock import ANY, Mock, call, patch
from nose.tools import assert_true
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from django_comment_client.constants import TYPE_ENTRY, TYPE_SUBCATEGORY
from django_comment_client.permissions import get_team
from django_comment_client.tests.group_id import (
CohortedTopicGroupIdTestMixin,
GroupIdAssertionMixin,
NonCohortedTopicGroupIdTestMixin
)
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_client.tests.utils import (
CohortedTestCase,
ForumsEnableMixin,
config_course_discussions,
topic_name_to_id
)
from django_comment_client.utils import strip_none
from django_comment_common.models import CourseDiscussionSettings, ForumsConfig
from django_comment_common.utils import ThreadContext
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect
from lms.djangoapps.discussion import views
from lms.djangoapps.discussion.views import course_discussions_settings_handler
from lms.djangoapps.teams.tests.factories import CourseTeamFactory
from lms.lib.comment_client.utils import CommentClientPaginatedResult
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.course_groups.tests.helpers import config_course_cohorts
from openedx.core.djangoapps.course_groups.tests.test_views import CohortViewsTestCase
from openedx.core.djangoapps.util.testing import ContentGroupTestCase
from openedx.features.enterprise_support.tests.mixins.enterprise import EnterpriseTestConsentRequired
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MONGO_MODULESTORE,
ModuleStoreTestCase,
SharedModuleStoreTestCase
)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
log = logging.getLogger(__name__)
# pylint: disable=missing-docstring
class ViewsExceptionTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsExceptionTestCase, self).setUp()
# create a course
self.course = CourseFactory.create(org='MITx', course='999',
display_name='Robot Super Course')
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
password = 'test'
# Create the student
self.student = UserFactory(username=uname, password=password, email=email)
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
# Log the student in
self.client = Client()
assert_true(self.client.login(username=uname, password=password))
config = ForumsConfig.current()
config.enabled = True
config.save()
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_user_profile_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = [], 1, 1
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('discussion.views.user_profile',
kwargs={'course_id': self.course.id.to_deprecated_string(), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.subscribed_threads')
def test_user_followed_threads_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = CommentClientPaginatedResult(collection=[], page=1, num_pages=1)
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('discussion.views.followed_threads',
kwargs={'course_id': self.course.id.to_deprecated_string(), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
def make_mock_thread_data(
course,
text,
thread_id,
num_children,
group_id=None,
group_name=None,
commentable_id=None,
is_commentable_divided=None,
):
data_commentable_id = (
commentable_id or course.discussion_topics.get('General', {}).get('id') or "dummy_commentable_id"
)
thread_data = {
"id": thread_id,
"type": "thread",
"title": text,
"body": text,
"commentable_id": data_commentable_id,
"resp_total": 42,
"resp_skip": 25,
"resp_limit": 5,
"group_id": group_id,
"context": (
ThreadContext.COURSE if get_team(data_commentable_id) is None else ThreadContext.STANDALONE
)
}
if group_id is not None:
thread_data['group_name'] = group_name
if is_commentable_divided is not None:
thread_data['is_commentable_divided'] = is_commentable_divided
if num_children is not None:
thread_data["children"] = [{
"id": "dummy_comment_id_{}".format(i),
"type": "comment",
"body": text,
} for i in range(num_children)]
return thread_data
def make_mock_request_impl(
course,
text,
thread_id="dummy_thread_id",
group_id=None,
commentable_id=None,
num_thread_responses=1,
):
def mock_request_impl(*args, **kwargs):
url = args[1]
data = None
if url.endswith("threads") or url.endswith("user_profile"):
data = {
"collection": [
make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=None,
group_id=group_id,
commentable_id=commentable_id,
)
]
}
elif thread_id and url.endswith(thread_id):
data = make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=num_thread_responses,
group_id=group_id,
commentable_id=commentable_id
)
elif "/users/" in url:
data = {
"default_sort_key": "date",
"upvoted_ids": [],
"downvoted_ids": [],
"subscribed_thread_ids": [],
}
# comments service adds these attributes when course_id param is present
if kwargs.get('params', {}).get('course_id'):
data.update({
"threads_count": 1,
"comments_count": 2
})
if data:
return Mock(status_code=200, text=json.dumps(data), json=Mock(return_value=data))
return Mock(status_code=404)
return mock_request_impl
class StringEndsWithMatcher(object):
def __init__(self, suffix):
self.suffix = suffix
def __eq__(self, other):
return other.endswith(self.suffix)
class PartialDictMatcher(object):
def __init__(self, expected_values):
self.expected_values = expected_values
def __eq__(self, other):
return all([
key in other and other[key] == value
for key, value in self.expected_values.iteritems()
])
@patch('requests.request', autospec=True)
class SingleThreadTestCase(ForumsEnableMixin, ModuleStoreTestCase):
CREATE_USER = False
def setUp(self):
super(SingleThreadTestCase, self).setUp()
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def test_ajax(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({"mark_as_read": True, "user_id": 1, "recursive": True}),
headers=ANY,
timeout=ANY
)
def test_skip_limit(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
response_skip = "45"
response_limit = "15"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
{"resp_skip": response_skip, "resp_limit": response_limit},
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({
"mark_as_read": True,
"user_id": 1,
"recursive": True,
"resp_skip": response_skip,
"resp_limit": response_limit,
}),
headers=ANY,
timeout=ANY
)
def test_post(self, mock_request):
request = RequestFactory().post("dummy_url")
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"dummy_thread_id"
)
self.assertEquals(response.status_code, 405)
def test_not_found(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
# Mock request to return 404 for thread request
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id=None)
self.assertRaises(
Http404,
views.single_thread,
request,
self.course.id.to_deprecated_string(),
"test_discussion_id",
"test_thread_id"
)
@ddt.ddt
@patch('requests.request', autospec=True)
class SingleThreadQueryCountTestCase(ForumsEnableMixin, ModuleStoreTestCase):
"""
Ensures the number of modulestore queries and number of sql queries are
independent of the number of responses retrieved for a given discussion thread.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
def setUp(self):
super(SingleThreadQueryCountTestCase, self).setUp()
@ddt.data(
# Old mongo with cache. There is an additional SQL query for old mongo
# because the first time that disabled_xblocks is queried is in call_single_thread,
# vs. the creation of the course (CourseFactory.create). The creation of the
# course is outside the context manager that is verifying the number of queries,
# and with split mongo, that method ends up querying disabled_xblocks (which is then
# cached and hence not queried as part of call_single_thread).
(ModuleStoreEnum.Type.mongo, False, 1, 5, 3, 13, 1),
(ModuleStoreEnum.Type.mongo, False, 50, 5, 3, 13, 1),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, False, 1, 3, 3, 12, 1),
(ModuleStoreEnum.Type.split, False, 50, 3, 3, 12, 1),
# Enabling Enterprise integration should have no effect on the number of mongo queries made.
(ModuleStoreEnum.Type.mongo, True, 1, 5, 3, 13, 1),
(ModuleStoreEnum.Type.mongo, True, 50, 5, 3, 13, 1),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, True, 1, 3, 3, 12, 1),
(ModuleStoreEnum.Type.split, True, 50, 3, 3, 12, 1),
)
@ddt.unpack
def test_number_of_mongo_queries(
self,
default_store,
enterprise_enabled,
num_thread_responses,
num_uncached_mongo_calls,
num_cached_mongo_calls,
num_uncached_sql_queries,
num_cached_sql_queries,
mock_request
):
with modulestore().default_store(default_store):
course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=course.id)
test_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=course, text="dummy content", thread_id=test_thread_id, num_thread_responses=num_thread_responses
)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = student
def call_single_thread():
"""
Call single_thread and assert that it returns what we expect.
"""
with override_settings(ENABLE_ENTERPRISE_INTEGRATION=enterprise_enabled):
response = views.single_thread(
request,
course.id.to_deprecated_string(),
"dummy_discussion_id",
test_thread_id
)
self.assertEquals(response.status_code, 200)
self.assertEquals(len(json.loads(response.content)["content"]["children"]), num_thread_responses)
# Test uncached first, then cached now that the cache is warm.
cached_calls = [
[num_uncached_mongo_calls, num_uncached_sql_queries],
[num_cached_mongo_calls, num_cached_sql_queries],
]
for expected_mongo_calls, expected_sql_queries in cached_calls:
with self.assertNumQueries(expected_sql_queries):
with check_mongo_calls(expected_mongo_calls):
call_single_thread()
@patch('requests.request', autospec=True)
class SingleCohortedThreadTestCase(CohortedTestCase):
def _create_mock_cohorted_thread(self, mock_request):
self.mock_text = "dummy content"
self.mock_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.mock_text,
thread_id=self.mock_thread_id,
group_id=self.student_cohort.id,
commentable_id="cohorted_topic",
)
def test_ajax(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"cohorted_topic",
self.mock_thread_id
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEquals(
response_data["content"],
make_mock_thread_data(
course=self.course,
commentable_id="cohorted_topic",
text=self.mock_text,
thread_id=self.mock_thread_id,
num_children=1,
group_id=self.student_cohort.id,
group_name=self.student_cohort.name,
is_commentable_divided=True,
)
)
def test_html(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
self.client.login(username=self.student.username, password='test')
response = self.client.get(
reverse('single_thread', kwargs={
'course_id': unicode(self.course.id),
'discussion_id': "cohorted_topic",
'thread_id': self.mock_thread_id,
})
)
self.assertEquals(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
# Verify that the group name is correctly included in the HTML
self.assertRegexpMatches(html, r'"group_name": "student_cohort"')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class SingleThreadAccessTestCase(CohortedTestCase):
def call_view(self, mock_request, commentable_id, user, group_id, thread_group_id=None, pass_group_id=True):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", thread_id=thread_id, group_id=thread_group_id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.single_thread(
request,
self.course.id.to_deprecated_string(),
commentable_id,
thread_id
)
def test_student_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.student, self.student_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_student_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
# this test ensures that a thread response from the cs with group_id: null
# behaves the same as a thread response without a group_id (see: TNL-444)
def test_student_global_thread_in_cohorted_topic(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=None
)
self.assertEqual(resp.status_code, 200)
def test_student_different_cohort(self, mock_request):
self.assertRaises(
Http404,
lambda: self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.moderator_cohort.id
)
)
def test_moderator_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.moderator, self.moderator_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_moderator_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.moderator_cohort.id
)
self.assertEqual(resp.status_code, 200)
def test_moderator_different_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class SingleThreadGroupIdTestCase(CohortedTestCase, GroupIdAssertionMixin):
cs_endpoint = "/threads/dummy_thread_id"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", group_id=self.student_cohort.id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=user.username, password='test')
return self.client.get(
reverse('single_thread', args=[unicode(self.course.id), commentable_id, "dummy_thread_id"]),
data=request_data,
**headers
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['content']
)
@patch('requests.request', autospec=True)
class SingleThreadContentGroupTestCase(ForumsEnableMixin, UrlResetMixin, ContentGroupTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(SingleThreadContentGroupTestCase, self).setUp()
def assert_can_access(self, user, discussion_id, thread_id, should_have_access):
"""
Verify that a user has access to a thread within a given
discussion_id when should_have_access is True, otherwise
verify that the user does not have access to that thread.
"""
def call_single_thread():
self.client.login(username=user.username, password='test')
return self.client.get(
reverse('single_thread', args=[unicode(self.course.id), discussion_id, thread_id])
)
if should_have_access:
self.assertEqual(call_single_thread().status_code, 200)
else:
self.assertEqual(call_single_thread().status_code, 404)
def test_staff_user(self, mock_request):
"""
Verify that the staff user can access threads in the alpha,
beta, and global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.alpha_module, self.beta_module, self.global_module]:
self.assert_can_access(self.staff_user, discussion_xblock.discussion_id, thread_id, True)
def test_alpha_user(self, mock_request):
"""
Verify that the alpha user can access threads in the alpha and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.alpha_module, self.global_module]:
self.assert_can_access(self.alpha_user, discussion_xblock.discussion_id, thread_id, True)
self.assert_can_access(self.alpha_user, self.beta_module.discussion_id, thread_id, False)
def test_beta_user(self, mock_request):
"""
Verify that the beta user can access threads in the beta and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_xblock in [self.beta_module, self.global_module]:
self.assert_can_access(self.beta_user, discussion_xblock.discussion_id, thread_id, True)
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_non_cohorted_user(self, mock_request):
"""
Verify that the non-cohorted user can access threads in just the
global discussion module.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
self.assert_can_access(self.non_cohorted_user, self.global_module.discussion_id, thread_id, True)
self.assert_can_access(self.non_cohorted_user, self.alpha_module.discussion_id, thread_id, False)
self.assert_can_access(self.non_cohorted_user, self.beta_module.discussion_id, thread_id, False)
def test_course_context_respected(self, mock_request):
"""
Verify that course threads go through discussion_category_id_access method.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id
)
# Beta user does not have access to alpha_module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_standalone_context_respected(self, mock_request):
"""
Verify that standalone threads don't go through discussion_category_id_access method.
"""
# For this rather pathological test, we are assigning the alpha module discussion_id (commentable_id)
# to a team so that we can verify that standalone threads don't go through discussion_category_id_access.
thread_id = "test_thread_id"
CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.alpha_module.discussion_id
)
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id,
commentable_id=self.alpha_module.discussion_id
)
# If a thread returns context other than "course", the access check is not done, and the beta user
# can see the alpha discussion module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, True)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionContextTestCase(ForumsEnableMixin, ModuleStoreTestCase):
def setUp(self):
super(InlineDiscussionContextTestCase, self).setUp()
self.course = CourseFactory.create()
CourseEnrollmentFactory(user=self.user, course_id=self.course.id)
self.discussion_topic_id = "dummy_topic"
self.team = CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.discussion_topic_id
)
self.team.add_user(self.user) # pylint: disable=no-member
def test_context_can_be_standalone(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy text",
commentable_id=self.discussion_topic_id
)
request = RequestFactory().get("dummy_url")
request.user = self.user
response = views.inline_discussion(
request,
unicode(self.course.id),
self.discussion_topic_id,
)
json_response = json.loads(response.content)
self.assertEqual(json_response['discussion_data'][0]['context'], ThreadContext.STANDALONE)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionGroupIdTestCase(
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def setUp(self):
super(InlineDiscussionGroupIdTestCase, self).setUp()
self.cohorted_commentable_id = 'cohorted_topic'
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {'commentable_id': self.cohorted_commentable_id}
if group_id:
# avoid causing a server error when the LMS chokes attempting
# to find a group name for the group_id, when we're testing with
# an invalid one.
try:
CourseUserGroup.objects.get(id=group_id)
kwargs['group_id'] = group_id
except CourseUserGroup.DoesNotExist:
pass
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data
)
request.user = user
return views.inline_discussion(
request,
self.course.id.to_deprecated_string(),
commentable_id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
self.cohorted_commentable_id,
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class ForumFormDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=user.username, password='test')
return self.client.get(
reverse("discussion.views.forum_form_discussion", args=[unicode(self.course.id)]),
data=request_data,
**headers
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class UserProfileDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/active_threads"
def call_view_for_profiled_user(
self, mock_request, requesting_user, profiled_user, group_id, pass_group_id, is_ajax=False
):
"""
Calls "user_profile" view method on behalf of "requesting_user" to get information about
the user "profiled_user".
"""
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
self.client.login(username=requesting_user.username, password='test')
return self.client.get(
reverse('user_profile', args=[unicode(self.course.id), profiled_user.id]),
data=request_data,
**headers
)
def call_view(self, mock_request, _commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
return self.call_view_for_profiled_user(
mock_request, user, user, group_id, pass_group_id=pass_group_id, is_ajax=is_ajax
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
def _test_group_id_passed_to_user_profile(
self, mock_request, expect_group_id_in_request, requesting_user, profiled_user, group_id, pass_group_id
):
"""
Helper method for testing whether or not group_id was passed to the user_profile request.
"""
def get_params_from_user_info_call(for_specific_course):
"""
Returns the request parameters for the user info call with either course_id specified or not,
depending on value of 'for_specific_course'.
"""
# There will be 3 calls from user_profile. One has the cs_endpoint "active_threads", and it is already
# tested. The other 2 calls are for user info; one of those calls is for general information about the user,
# and it does not specify a course_id. The other call does specify a course_id, and if the caller did not
# have discussion moderator privileges, it should also contain a group_id.
for r_call in mock_request.call_args_list:
if not r_call[0][1].endswith(self.cs_endpoint):
params = r_call[1]["params"]
has_course_id = "course_id" in params
if (for_specific_course and has_course_id) or (not for_specific_course and not has_course_id):
return params
self.assertTrue(
False,
"Did not find appropriate user_profile call for 'for_specific_course'=" + for_specific_course
)
mock_request.reset_mock()
self.call_view_for_profiled_user(
mock_request,
requesting_user,
profiled_user,
group_id,
pass_group_id=pass_group_id,
is_ajax=False
)
# Should never have a group_id if course_id was not included in the request.
params_without_course_id = get_params_from_user_info_call(False)
self.assertNotIn("group_id", params_without_course_id)
params_with_course_id = get_params_from_user_info_call(True)
if expect_group_id_in_request:
self.assertIn("group_id", params_with_course_id)
self.assertEqual(group_id, params_with_course_id["group_id"])
else:
self.assertNotIn("group_id", params_with_course_id)
def test_group_id_passed_to_user_profile_student(self, mock_request):
"""
Test that the group id is always included when requesting user profile information for a particular
course if the requester does not have discussion moderation privileges.
"""
def verify_group_id_always_present(profiled_user, pass_group_id):
"""
Helper method to verify that group_id is always present for student in course
(non-privileged user).
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.student, profiled_user, self.student_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the student (non-privileged user).
# The profile returned on behalf of the student is for the profiled_user.
verify_group_id_always_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=False)
def test_group_id_user_profile_moderator(self, mock_request):
"""
Test that the group id is only included when a privileged user requests user profile information for a
particular course and user if the group_id is explicitly passed in.
"""
def verify_group_id_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
def verify_group_id_not_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is not present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, False, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the moderator (privileged user).
# If the group_id is explicitly passed, it will be present in the request.
verify_group_id_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_present(
profiled_user=self.student, pass_group_id=True, requested_cohort=self.student_cohort
)
# If the group_id is not explicitly passed, it will not be present because the requesting_user
# has discussion moderator privileges.
verify_group_id_not_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_not_present(profiled_user=self.moderator, pass_group_id=False)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class FollowedThreadsDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/subscribed_threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.followed_threads(
request,
self.course.id.to_deprecated_string(),
user.id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionTestCase(ForumsEnableMixin, ModuleStoreTestCase):
def setUp(self):
super(InlineDiscussionTestCase, self).setUp()
self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course")
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
display_name='Discussion1',
discussion_category="Chapter",
discussion_target="Discussion1"
)
def send_request(self, mock_request, params=None):
"""
Creates and returns a request with params set, and configures
mock_request to return appropriate values.
"""
request = RequestFactory().get("dummy_url", params if params else {})
request.user = self.student
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", commentable_id=self.discussion1.discussion_id
)
return views.inline_discussion(
request, self.course.id.to_deprecated_string(), self.discussion1.discussion_id
)
def verify_response(self, response):
"""Verifies that the response contains the appropriate courseware_url and courseware_title"""
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
expected_courseware_title = 'Chapter / Discussion1'
self.assertEqual(response_data["discussion_data"][0]["courseware_title"], expected_courseware_title)
def test_courseware_data(self, mock_request):
self.verify_response(self.send_request(mock_request))
def test_context(self, mock_request):
team = CourseTeamFactory(
name='Team Name',
topic_id='A topic',
course_id=self.course.id,
discussion_topic_id=self.discussion1.discussion_id
)
team.add_user(self.student) # pylint: disable=no-member
response = self.send_request(mock_request)
self.assertEqual(mock_request.call_args[1]['params']['context'], ThreadContext.STANDALONE)
self.verify_response(response)
@patch('requests.request', autospec=True)
class UserProfileTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
TEST_THREAD_TEXT = 'userprofile-test-text'
TEST_THREAD_ID = 'userprofile-test-thread-id'
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UserProfileTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
self.profiled_user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.profiled_user, course_id=self.course.id)
def get_response(self, mock_request, params, **headers):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
self.client.login(username=self.student.username, password='test')
response = self.client.get(
reverse('user_profile', kwargs={
'course_id': unicode(self.course.id),
'user_id': self.profiled_user.id,
}),
data=params,
**headers
)
mock_request.assert_any_call(
"get",
StringEndsWithMatcher('/users/{}/active_threads'.format(self.profiled_user.id)),
data=None,
params=PartialDictMatcher({
"course_id": self.course.id.to_deprecated_string(),
"page": params.get("page", 1),
"per_page": views.THREADS_PER_PAGE
}),
headers=ANY,
timeout=ANY
)
return response
def check_html(self, mock_request, **params):
response = self.get_response(mock_request, params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
self.assertRegexpMatches(html, r'data-page="1"')
self.assertRegexpMatches(html, r'data-num-pages="1"')
self.assertRegexpMatches(html, r'<span class="discussion-count">1</span> discussion started')
self.assertRegexpMatches(html, r'<span class="discussion-count">2</span> comments')
self.assertRegexpMatches(html, r''id': '{}''.format(self.TEST_THREAD_ID))
self.assertRegexpMatches(html, r''title': '{}''.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r''body': '{}''.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r''username': u'{}''.format(self.student.username))
def check_ajax(self, mock_request, **params):
response = self.get_response(mock_request, params, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json; charset=utf-8')
response_data = json.loads(response.content)
self.assertEqual(
sorted(response_data.keys()),
["annotated_content_info", "discussion_data", "num_pages", "page"]
)
self.assertEqual(len(response_data['discussion_data']), 1)
self.assertEqual(response_data["page"], 1)
self.assertEqual(response_data["num_pages"], 1)
self.assertEqual(response_data['discussion_data'][0]['id'], self.TEST_THREAD_ID)
self.assertEqual(response_data['discussion_data'][0]['title'], self.TEST_THREAD_TEXT)
self.assertEqual(response_data['discussion_data'][0]['body'], self.TEST_THREAD_TEXT)
def test_html(self, mock_request):
self.check_html(mock_request)
def test_ajax(self, mock_request):
self.check_ajax(mock_request)
def test_404_non_enrolled_user(self, __):
"""
Test that when student try to visit un-enrolled students' discussion profile,
the system raises Http404.
"""
unenrolled_user = UserFactory.create()
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
self.course.id.to_deprecated_string(),
unenrolled_user.id
)
def test_404_profiled_user(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
self.course.id.to_deprecated_string(),
-999
)
def test_404_course(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
"non/existent/course",
self.profiled_user.id
)
def test_post(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().post("dummy_url")
request.user = self.student
response = views.user_profile(
request,
self.course.id.to_deprecated_string(),
self.profiled_user.id
)
self.assertEqual(response.status_code, 405)
@patch('requests.request', autospec=True)
class CommentsServiceRequestHeadersTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
CREATE_USER = False
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CommentsServiceRequestHeadersTestCase, self).setUp()
username = "foo"
password = "bar"
# Invoke UrlResetMixin
super(CommentsServiceRequestHeadersTestCase, self).setUp()
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(
self.client.login(username=username, password=password)
)
self.addCleanup(translation.deactivate)
def assert_all_calls_have_header(self, mock_request, key, value):
expected = call(
ANY, # method
ANY, # url
data=ANY,
params=ANY,
headers=PartialDictMatcher({key: value}),
timeout=ANY
)
for actual in mock_request.call_args_list:
self.assertEqual(expected, actual)
def test_accept_language(self, mock_request):
lang = "eo"
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
self.client.get(
reverse(
"discussion.views.single_thread",
kwargs={
"course_id": self.course.id.to_deprecated_string(),
"discussion_id": "dummy_discussion_id",
"thread_id": thread_id,
}
),
HTTP_ACCEPT_LANGUAGE=lang,
)
self.assert_all_calls_have_header(mock_request, "Accept-Language", lang)
@override_settings(COMMENTS_SERVICE_KEY="test_api_key")
def test_api_key(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id="dummy")
self.client.get(
reverse(
"discussion.views.forum_form_discussion",
kwargs={"course_id": self.course.id.to_deprecated_string()}
),
)
self.assert_all_calls_have_header(mock_request, "X-Edx-Api-Key", "test_api_key")
class InlineDiscussionUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(InlineDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(InlineDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(InlineDiscussionUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
response = views.inline_discussion(
request, self.course.id.to_deprecated_string(), self.course.discussion_topics['General']['id']
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class ForumFormDiscussionUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumFormDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumFormDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(ForumFormDiscussionUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, self.course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
@ddt.ddt
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class ForumDiscussionXSSTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ForumDiscussionXSSTestCase, self).setUp()
username = "foo"
password = "bar"
self.course = CourseFactory.create()
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(self.client.login(username=username, password=password))
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
def test_forum_discussion_xss_prevent(self, malicious_code, mock_user, mock_req): # pylint: disable=unused-argument
"""
Test that XSS attack is prevented
"""
mock_user.return_value.to_dict.return_value = {}
reverse_url = "%s%s" % (reverse(
"discussion.views.forum_form_discussion",
kwargs={"course_id": unicode(self.course.id)}), '/forum_form_discussion')
# Test that malicious code does not appear in html
url = "%s?%s=%s" % (reverse_url, 'sort_key', malicious_code)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(malicious_code, resp.content)
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_forum_user_profile_xss_prevent(self, malicious_code, mock_threads, mock_from_django_user, mock_request):
"""
Test that XSS attack is prevented
"""
mock_threads.return_value = [], 1, 1
mock_from_django_user.return_value.to_dict.return_value = {}
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
url = reverse('discussion.views.user_profile',
kwargs={'course_id': unicode(self.course.id), 'user_id': str(self.student.id)})
# Test that malicious code does not appear in html
url_string = "%s?%s=%s" % (url, 'page', malicious_code)
resp = self.client.get(url_string)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(malicious_code, resp.content)
class ForumDiscussionSearchUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumDiscussionSearchUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumDiscussionSearchUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(ForumDiscussionSearchUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
data = {
"ajax": 1,
"text": text,
}
request = RequestFactory().get("dummy_url", data)
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, self.course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class SingleThreadUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(SingleThreadUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create(discussion_topics={'dummy_discussion_id': {'id': 'dummy_discussion_id'}})
@classmethod
def setUpTestData(cls):
super(SingleThreadUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(SingleThreadUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.single_thread(request, self.course.id.to_deprecated_string(), "dummy_discussion_id", thread_id)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["content"]["title"], text)
self.assertEqual(response_data["content"]["body"], text)
class UserProfileUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(UserProfileUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UserProfileUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(UserProfileUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.user_profile(request, self.course.id.to_deprecated_string(), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class FollowedThreadsUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(FollowedThreadsUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(FollowedThreadsUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
def setUp(self):
super(FollowedThreadsUnicodeTestCase, self).setUp()
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.followed_threads(request, self.course.id.to_deprecated_string(), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class EnrollmentTestCase(ForumsEnableMixin, ModuleStoreTestCase):
"""
Tests for the behavior of views depending on if the student is enrolled
in the course
"""
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(EnrollmentTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_unenrolled(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
request = RequestFactory().get('dummy_url')
request.user = self.student
with self.assertRaises(CourseAccessRedirect):
views.forum_form_discussion(request, course_id=self.course.id.to_deprecated_string())
@patch('requests.request', autospec=True)
class EnterpriseConsentTestCase(EnterpriseTestConsentRequired, ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""
Ensure that the Enterprise Data Consent redirects are in place only when consent is required.
"""
CREATE_USER = False
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Invoke UrlResetMixin setUp
super(EnterpriseConsentTestCase, self).setUp()
username = "foo"
password = "bar"
self.discussion_id = 'dummy_discussion_id'
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': self.discussion_id}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(
self.client.login(username=username, password=password)
)
self.addCleanup(translation.deactivate)
def test_consent_required(self, mock_request):
"""
Test that enterprise data sharing consent is required when enabled for the various discussion views.
"""
thread_id = 'dummy'
course_id = unicode(self.course.id)
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy', thread_id=thread_id)
for url in (
reverse('discussion.views.forum_form_discussion',
kwargs=dict(course_id=course_id)),
reverse('discussion.views.single_thread',
kwargs=dict(course_id=course_id, discussion_id=self.discussion_id, thread_id=thread_id)),
):
self.verify_consent_required(self.client, url)
class DividedDiscussionsTestCase(CohortViewsTestCase):
def create_divided_discussions(self):
"""
Set up a divided discussion in the system, complete with all the fixings
"""
divided_inline_discussions = ['Topic A']
divided_course_wide_discussions = ["Topic B"]
divided_discussions = divided_inline_discussions + divided_course_wide_discussions
# inline discussion
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_name_to_id(self.course, "Topic A"),
discussion_category="Chapter",
discussion_target="Discussion",
start=datetime.now()
)
# course-wide discussion
discussion_topics = {
"Topic B": {"id": "Topic B"},
}
config_course_cohorts(
self.course,
is_cohorted=True,
)
config_course_discussions(
self.course,
discussion_topics=discussion_topics,
divided_discussions=divided_discussions
)
return divided_inline_discussions, divided_course_wide_discussions
class CourseDiscussionTopicsTestCase(DividedDiscussionsTestCase):
"""
Tests the `divide_discussion_topics` view.
"""
def test_non_staff(self):
"""
Verify that we cannot access divide_discussion_topics if we're a non-staff user.
"""
self._verify_non_staff_cannot_access(views.discussion_topics, "GET", [unicode(self.course.id)])
def test_get_discussion_topics(self):
"""
Verify that discussion_topics is working for HTTP GET.
"""
# create inline & course-wide discussion to verify the different map.
self.create_divided_discussions()
response = self.get_handler(self.course, handler=views.discussion_topics)
start_date = response['inline_discussions']['subcategories']['Chapter']['start_date']
expected_response = {
"course_wide_discussions": {
'children': [['Topic B', TYPE_ENTRY]],
'entries': {
'Topic B': {
'sort_key': 'A',
'is_divided': True,
'id': topic_name_to_id(self.course, "Topic B"),
'start_date': response['course_wide_discussions']['entries']['Topic B']['start_date']
}
}
},
"inline_discussions": {
'subcategories': {
'Chapter': {
'subcategories': {},
'children': [['Discussion', TYPE_ENTRY]],
'entries': {
'Discussion': {
'sort_key': None,
'is_divided': True,
'id': topic_name_to_id(self.course, "Topic A"),
'start_date': start_date
}
},
'sort_key': 'Chapter',
'start_date': start_date
}
},
'children': [['Chapter', TYPE_SUBCATEGORY]]
}
}
self.assertEqual(response, expected_response)
class CourseDiscussionsHandlerTestCase(DividedDiscussionsTestCase):
"""
Tests the course_discussion_settings_handler
"""
def get_expected_response(self):
"""
Returns the static response dict.
"""
return {
u'always_divide_inline_discussions': False,
u'divided_inline_discussions': [],
u'divided_course_wide_discussions': [],
u'id': 1,
u'division_scheme': u'cohort',
u'available_division_schemes': [u'cohort']
}
def test_non_staff(self):
"""
Verify that we cannot access course_discussions_settings_handler if we're a non-staff user.
"""
self._verify_non_staff_cannot_access(
course_discussions_settings_handler, "GET", [unicode(self.course.id)]
)
self._verify_non_staff_cannot_access(
course_discussions_settings_handler, "PATCH", [unicode(self.course.id)]
)
def test_update_always_divide_inline_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for always_divide_inline_discussions via HTTP PATCH.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=course_discussions_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
expected_response['always_divide_inline_discussions'] = True
response = self.patch_handler(
self.course, data=expected_response, handler=course_discussions_settings_handler
)
self.assertEqual(response, expected_response)
def test_update_course_wide_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for divided_course_wide_discussions via HTTP PATCH.
"""
# course-wide discussion
discussion_topics = {
"Topic B": {"id": "Topic B"},
}
config_course_cohorts(self.course, is_cohorted=True)
config_course_discussions(self.course, discussion_topics=discussion_topics)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
expected_response['divided_course_wide_discussions'] = [topic_name_to_id(self.course, "Topic B")]
response = self.patch_handler(
self.course, data=expected_response, handler=views.course_discussions_settings_handler
)
self.assertEqual(response, expected_response)
def test_update_inline_discussion_settings(self):
"""
Verify that course_discussions_settings_handler is working for divided_inline_discussions via HTTP PATCH.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
self.assertEqual(response, expected_response)
now = datetime.now()
# inline discussion
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="Topic_A",
discussion_category="Chapter",
discussion_target="Discussion",
start=now
)
expected_response['divided_inline_discussions'] = ["Topic_A"]
response = self.patch_handler(
self.course, data=expected_response, handler=views.course_discussions_settings_handler
)
self.assertEqual(response, expected_response)
def test_get_settings(self):
"""
Verify that course_discussions_settings_handler is working for HTTP GET.
"""
divided_inline_discussions, divided_course_wide_discussions = self.create_divided_discussions()
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
expected_response['divided_inline_discussions'] = [topic_name_to_id(self.course, name)
for name in divided_inline_discussions]
expected_response['divided_course_wide_discussions'] = [topic_name_to_id(self.course, name)
for name in divided_course_wide_discussions]
self.assertEqual(response, expected_response)
def test_update_settings_with_invalid_field_data_type(self):
"""
Verify that course_discussions_settings_handler return HTTP 400 if field data type is incorrect.
"""
config_course_cohorts(self.course, is_cohorted=True)
response = self.patch_handler(
self.course,
data={'always_divide_inline_discussions': ''},
expected_response_code=400,
handler=views.course_discussions_settings_handler
)
self.assertEqual(
"Incorrect field type for `{}`. Type must be `{}`".format('always_divide_inline_discussions', bool.__name__),
response.get("error")
)
def test_available_schemes(self):
# Cohorts disabled, single enrollment mode.
config_course_cohorts(self.course, is_cohorted=False)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response = self.get_expected_response()
expected_response['available_division_schemes'] = []
self.assertEqual(response, expected_response)
# Add 2 enrollment modes
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response['available_division_schemes'] = [CourseDiscussionSettings.ENROLLMENT_TRACK]
self.assertEqual(response, expected_response)
# Enable cohorts
config_course_cohorts(self.course, is_cohorted=True)
response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)
expected_response['available_division_schemes'] = [
CourseDiscussionSettings.COHORT, CourseDiscussionSettings.ENROLLMENT_TRACK
]
self.assertEqual(response, expected_response)
| miptliot/edx-platform | lms/djangoapps/discussion/tests/test_views.py | Python | agpl-3.0 | 76,837 | [
"VisIt"
] | 5e96f28d0445ebb2c96118d048d4f77f53e40496dc3c180e559a8695836e028b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.