text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
#Directory Rescue Crawler, direscraw v1.43
#Copyright (c) 2014 by Brian Mikolajczyk, brianm12@gmail.com
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import fnmatch
import os.path
from pipes import quote
import subprocess
import time
def main(input_dir=None, output_dir=None, blacklist=None, nosum=False,
resume=False, debug=False):
if debug:
print("Debugging mode: drclog retention enabled." + '\n')
input_dir = os.path.abspath(input_dir)
output_dir = os.path.abspath(output_dir)
try:
os.mkdir(output_dir)
except OSError:
pass
#Handling of blacklist
oblist = set(['drclog', 'error_summary', 'full_error_summary'])
if blacklist is None:
blwild = False
blacklist = oblist
else:
blwild = True
blacklist = set(blacklist) | oblist
tfmt = '%Y-%m-%d %H:%M:%S'
top_input_dir = os.path.split(input_dir)[1]
fullskipset = set([])
if os.path.isfile(os.path.join(output_dir, 'full_error_summary')) and not\
resume:
os.remove(os.path.join(output_dir, 'full_error_summary'))
with open(os.path.join(output_dir, 'full_error_summary'),
'a+') as full_error_summary:
if not nosum and not resume:
full_error_summary.write('File Error% RunTime' + '\n')
#Starting os.walk for loop
for current_dir, dirnames, unfilenames in os.walk(input_dir):
#Check for wildcards in blacklist
if blwild:
infiles = set([f for f in os.listdir(
current_dir)]) - blacklist
wildlist = []
for el in blacklist:
wildlist = wildlist + fnmatch.filter(infiles, el)
blacklist = blacklist | set(wildlist)
dirnames[:] = set(dirnames) - blacklist
filenames = sorted(unfilenames)
relative_dir = os.path.relpath(current_dir, input_dir)
current_out_dir = os.path.join(output_dir,
top_input_dir, relative_dir)
if os.path.split(current_out_dir)[1] == '.':
current_out_dir = os.path.split(current_out_dir)[0]
try:
os.makedirs(current_out_dir)
except OSError:
pass
if resume:
outfiles = list(set([f for f in os.listdir(current_out_dir)
if os.path.isfile(os.path.join(
current_out_dir, f))]) - blacklist)
if len(filenames) == len(outfiles) and not\
os.path.isfile(os.path.join(current_out_dir, 'copylog')):
continue
try:
sindex = filenames.index(outfiles[-1])
except (IndexError, ValueError):
sindex = 0
else:
sindex = 0
skipset = set([])
with open(os.path.join(current_out_dir, 'drclog'),
'w+') as drclog, open(os.path.join(current_out_dir,
'copylog'), 'w'):
#Start directory loop
for filename in [f for f in filenames[sindex:]
if f not in blacklist]:
in_file_path = os.path.join(current_dir, filename)
out_file_path = os.path.join(current_out_dir, filename)
files = in_file_path, out_file_path
drclog.write('{}\n'.format(filename))
drclog.flush()
print('\n' + in_file_path)
try:
subprocess.call('ddrescue {} | tee -a {}'
.format(' '.join(map(quote, files)),
quote(os.path.join(current_out_dir, 'drclog'))),
shell=True)
except KeyboardInterrupt:
skipset.add(in_file_path)
fullskipset.add(in_file_path)
print('\n' + in_file_path + ' has been skipped')
drclog.seek(0)
#Creating error report
if not nosum:
with open(os.path.join(current_out_dir, 'error_summary'),
'w') as error_summary:
error_summary.write(time.strftime(tfmt) + '\n')
error_summary.write(current_out_dir + '\n')
if len(skipset) > 0:
error_summary.write('\n' + 'Files Skipped: '\
+ str(len(skipset)) + ';' + '\n')
for skipel in skipset:
error_summary.write(skipel + '\n')
error_summary.write('\n')
error_summary.write('File Error% RunTime' + '\n')
error_summary.flush()
full_error_summary.write(current_out_dir + '\n')
full_error_summary.flush()
subprocess.call(['errcalc',
os.path.join(current_out_dir,
'drclog')], stdout=error_summary)
subprocess.call(['errcalc',
os.path.join(current_out_dir,
'drclog')], stdout=full_error_summary)
if not debug:
os.remove(os.path.join(current_out_dir, 'drclog'))
os.remove(os.path.join(current_out_dir, 'copylog'))
#Creating full error report
if not nosum:
FES = open(os.path.join(output_dir, 'full_error_summary'))
FES_lines = FES.readlines()
FES.close()
with open(os.path.join(output_dir, 'full_error_summary'),
'w') as full_error_summary:
full_error_summary.write(time.strftime(tfmt) + '\n')
if len(fullskipset) > 0:
full_error_summary.write('\n' + 'Files Skipped: '\
+ str(len(fullskipset)) + ';' + '\n')
for skipel in fullskipset:
full_error_summary.write(skipel + '\n')
full_error_summary.write('\n')
full_error_summary.flush()
for lines in FES_lines:
full_error_summary.write(lines)
else:
os.remove(os.path.join(output_dir, 'full_error_summary'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir')
parser.add_argument('output_dir')
parser.add_argument('--version', action='version',
version='direscraw v1.43; errcalc v2.1')
parser.add_argument('-d', '--debug', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('-b', '--blacklist', nargs='+',
help='Add arguments separated by spaces to omit\
filenames/directories')
parser.add_argument('-n', '--nosum', action='store_true',
help='No error percentage and runtime summary in subdirectories')
parser.add_argument('-r', '--resume', action='store_true',
help='Resumes a previously-interrupted direscraw session skipping\
already recovered directories')
main(**vars(parser.parse_args()))
|
bmikolaj/direscraw
|
direscraw.py
|
Python
|
gpl-3.0
| 8,305
|
[
"Brian"
] |
44e6ae2c2feaa9b95eed9a47d389e72e6f6a9f55808e02fa8d8db3b12aac4e50
|
# Copyright (C) 2017
# Jakub Krajniak (jkrajniak at gmail.com)
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import filecmp
import espressopp
import unittest
expected_files = ['expected.xtc']
class TestDumpXTCAdress(unittest.TestCase):
def setUp(self):
system, integrator = espressopp.standard_system.LennardJones(0, (20,20,20))
self.system = system
self.integrator = integrator
self.ftpl = espressopp.FixedTupleListAdress(self.system.storage)
self.system.storage.setFixedTuplesAdress(self.ftpl)
def test_simple_xtc(self):
particle_list = [
(1, espressopp.Real3D(4.75575, 5.82131, 16.9163), 0),
(2, espressopp.Real3D(3.04417, 11.7107, 3.86951), 1),
(3, espressopp.Real3D(16.2125, 3.47061, 9.69966), 1),
(4, espressopp.Real3D(3.03725, 7.33914, 9.83473), 0),
(5, espressopp.Real3D(18.2019, 5.30514, 17.8638), 1),
(6, espressopp.Real3D(4.40702, 12.636, 11.4215), 1),
(7, espressopp.Real3D(6.64315, 2.0891, 10.0586), 0),
(8, espressopp.Real3D(11.3479, 17.0833, 0.802817), 1),
(9, espressopp.Real3D(2.16045, 12.7879, 0.26222), 1)]
self.system.storage.addParticles(particle_list, 'id', 'pos', 'adrat')
adress_tuple = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]
self.ftpl.addTuples(adress_tuple)
file_xtc_9atoms = "test.xtc"
dump_xtc = espressopp.io.DumpXTCAdress(
self.system,
self.ftpl,
self.integrator,
filename=file_xtc_9atoms,
unfolded=False,
length_factor=1.0,
append=False)
dump_xtc.dump()
# self.assertTrue(
# filecmp.cmp(file_xtc_9atoms, expected_files[0], shallow = False),
# "!!! Error! Files are not equal!! They should be equal!")
def tearDown(self):
os.remove("test.xtc")
if __name__ == '__main__':
unittest.main()
|
espressopp/espressopp
|
testsuite/FileIOTests/dump_xtc_adress/test_dump_xtc_adress.py
|
Python
|
gpl-3.0
| 2,615
|
[
"ESPResSo"
] |
e8e0e33d0e35f22b76126f36aee3aa1674cefc39df71ede6f79c49b745446ea4
|
# Copyright (C) 2019-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import functools
import logging
from unittest.mock import patch
import pytest
from swh.journal.client import JournalClient
from swh.model.tests.swh_model_data import TEST_OBJECTS
from swh.storage import get_storage
from swh.storage.backfill import (
PARTITION_KEY,
JournalBackfiller,
byte_ranges,
compute_query,
raw_extrinsic_metadata_target_ranges,
)
from swh.storage.in_memory import InMemoryStorage
from swh.storage.replay import ModelObjectDeserializer, process_replay_objects
from swh.storage.tests.test_replay import check_replayed
TEST_CONFIG = {
"journal_writer": {
"brokers": ["localhost"],
"prefix": "swh.tmp_journal.new",
"client_id": "swh.journal.client.test",
},
"storage": {"cls": "postgresql", "db": "service=swh-dev"},
}
def test_config_ko_missing_mandatory_key():
"""Missing configuration key will make the initialization fail
"""
for key in TEST_CONFIG.keys():
config = TEST_CONFIG.copy()
config.pop(key)
with pytest.raises(ValueError) as e:
JournalBackfiller(config)
error = "Configuration error: The following keys must be provided: %s" % (
",".join([key]),
)
assert e.value.args[0] == error
def test_config_ko_unknown_object_type():
"""Parse arguments will fail if the object type is unknown
"""
backfiller = JournalBackfiller(TEST_CONFIG)
with pytest.raises(ValueError) as e:
backfiller.parse_arguments("unknown-object-type", 1, 2)
error = (
"Object type unknown-object-type is not supported. "
"The only possible values are %s" % (", ".join(sorted(PARTITION_KEY)))
)
assert e.value.args[0] == error
def test_compute_query_content():
query, where_args, column_aliases = compute_query("content", "\x000000", "\x000001")
assert where_args == ["\x000000", "\x000001"]
assert column_aliases == [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"status",
"ctime",
]
assert (
query
== """
select sha1,sha1_git,sha256,blake2s256,length,status,ctime
from content
where (sha1) >= %s and (sha1) < %s
"""
)
def test_compute_query_skipped_content():
query, where_args, column_aliases = compute_query("skipped_content", None, None)
assert where_args == []
assert column_aliases == [
"sha1",
"sha1_git",
"sha256",
"blake2s256",
"length",
"ctime",
"status",
"reason",
]
assert (
query
== """
select sha1,sha1_git,sha256,blake2s256,length,ctime,status,reason
from skipped_content
"""
)
def test_compute_query_origin_visit():
query, where_args, column_aliases = compute_query("origin_visit", 1, 10)
assert where_args == [1, 10]
assert column_aliases == [
"visit",
"type",
"origin",
"date",
]
assert (
query
== """
select visit,type,origin.url as origin,date
from origin_visit
left join origin on origin_visit.origin=origin.id
where (origin_visit.origin) >= %s and (origin_visit.origin) < %s
"""
)
def test_compute_query_release():
query, where_args, column_aliases = compute_query("release", "\x000002", "\x000003")
assert where_args == ["\x000002", "\x000003"]
assert column_aliases == [
"id",
"date",
"date_offset_bytes",
"comment",
"name",
"synthetic",
"target",
"target_type",
"author_id",
"author_name",
"author_email",
"author_fullname",
"raw_manifest",
]
assert (
query
== """
select release.id as id,date,date_offset_bytes,comment,release.name as name,synthetic,target,target_type,a.id as author_id,a.name as author_name,a.email as author_email,a.fullname as author_fullname,raw_manifest
from release
left join person a on release.author=a.id
where (release.id) >= %s and (release.id) < %s
""" # noqa
)
@pytest.mark.parametrize("numbits", [2, 3, 8, 16])
def test_byte_ranges(numbits):
ranges = list(byte_ranges(numbits))
assert len(ranges) == 2 ** numbits
assert ranges[0][0] is None
assert ranges[-1][1] is None
bounds = []
for i, (left, right) in enumerate(zip(ranges[:-1], ranges[1:])):
assert left[1] == right[0], f"Mismatched bounds in {i}th range"
bounds.append(left[1])
assert bounds == sorted(bounds)
def test_raw_extrinsic_metadata_target_ranges():
ranges = list(raw_extrinsic_metadata_target_ranges())
assert ranges[0][0] == ""
assert ranges[-1][1] is None
bounds = []
for i, (left, right) in enumerate(zip(ranges[:-1], ranges[1:])):
assert left[1] == right[0], f"Mismatched bounds in {i}th range"
bounds.append(left[1])
assert bounds == sorted(bounds)
RANGE_GENERATORS = {
"content": lambda start, end: [(None, None)],
"skipped_content": lambda start, end: [(None, None)],
"directory": lambda start, end: [(None, None)],
"extid": lambda start, end: [(None, None)],
"metadata_authority": lambda start, end: [(None, None)],
"metadata_fetcher": lambda start, end: [(None, None)],
"revision": lambda start, end: [(None, None)],
"release": lambda start, end: [(None, None)],
"snapshot": lambda start, end: [(None, None)],
"origin": lambda start, end: [(None, 10000)],
"origin_visit": lambda start, end: [(None, 10000)],
"origin_visit_status": lambda start, end: [(None, 10000)],
"raw_extrinsic_metadata": lambda start, end: [(None, None)],
}
@patch("swh.storage.backfill.RANGE_GENERATORS", RANGE_GENERATORS)
def test_backfiller(
swh_storage_backend_config,
kafka_prefix: str,
kafka_consumer_group: str,
kafka_server: str,
caplog,
):
prefix1 = f"{kafka_prefix}-1"
prefix2 = f"{kafka_prefix}-2"
journal1 = {
"cls": "kafka",
"brokers": [kafka_server],
"client_id": "kafka_writer-1",
"prefix": prefix1,
}
swh_storage_backend_config["journal_writer"] = journal1
storage = get_storage(**swh_storage_backend_config)
# fill the storage and the journal (under prefix1)
for object_type, objects in TEST_OBJECTS.items():
method = getattr(storage, object_type + "_add")
method(objects)
# now apply the backfiller on the storage to fill the journal under prefix2
backfiller_config = {
"journal_writer": {
"brokers": [kafka_server],
"client_id": "kafka_writer-2",
"prefix": prefix2,
},
"storage": swh_storage_backend_config,
}
# Backfilling
backfiller = JournalBackfiller(backfiller_config)
for object_type in TEST_OBJECTS:
backfiller.run(object_type, None, None)
# Trace log messages for unhandled object types in the replayer
caplog.set_level(logging.DEBUG, "swh.storage.replay")
# now check journal content are the same under both topics
# use the replayer scaffolding to fill storages to make is a bit easier
# Replaying #1
deserializer = ModelObjectDeserializer()
sto1 = get_storage(cls="memory")
replayer1 = JournalClient(
brokers=kafka_server,
group_id=f"{kafka_consumer_group}-1",
prefix=prefix1,
stop_on_eof=True,
value_deserializer=deserializer.convert,
)
worker_fn1 = functools.partial(process_replay_objects, storage=sto1)
replayer1.process(worker_fn1)
# Replaying #2
sto2 = get_storage(cls="memory")
replayer2 = JournalClient(
brokers=kafka_server,
group_id=f"{kafka_consumer_group}-2",
prefix=prefix2,
stop_on_eof=True,
value_deserializer=deserializer.convert,
)
worker_fn2 = functools.partial(process_replay_objects, storage=sto2)
replayer2.process(worker_fn2)
# Compare storages
assert isinstance(sto1, InMemoryStorage) # needed to help mypy
assert isinstance(sto2, InMemoryStorage)
check_replayed(sto1, sto2)
for record in caplog.records:
assert (
"this should not happen" not in record.message
), "Replayer ignored some message types, see captured logging"
|
SoftwareHeritage/swh-storage
|
swh/storage/tests/test_backfill.py
|
Python
|
gpl-3.0
| 8,545
|
[
"VisIt"
] |
d8155cf31f8ba6a9035c8384c9968b13a48fc5b0ee17f45aa52fe47b1bd14f3f
|
# Orca
#
# Copyright 2008-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Utilities for obtaining tutorial utterances for objects. In general,
there probably should be a singleton instance of the TutorialGenerator
class. For those wishing to override the generators, however,
one can create a new instance and replace/extend the tutorial generators
as they see fit."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2008-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
from . import debug
from . import orca_state
from . import settings
from .orca_i18n import _ # for gettext support
class TutorialGenerator:
"""Takes accessible objects and produces a tutorial string to speak
for those objects. See the getTutorialString method, which is the
primary entry point. Subclasses can feel free to override/extend
the getTutorialGenerators instance field as they see fit."""
def __init__(self, script):
# The script that created us. This allows us to ask the
# script for information if we need it.
#
self._script = script
# storing the last spoken message.
self.lastTutorial = ""
self.lastRole = None
# Set up a dictionary that maps role names to functions
# that generate tutorial strings for objects that implement that role.
#
self.tutorialGenerators = {}
self.tutorialGenerators[pyatspi.ROLE_CHECK_BOX] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_COMBO_BOX] = \
self._getTutorialForComboBox
self.tutorialGenerators[pyatspi.ROLE_FRAME] = \
self._getTutorialForFrame
self.tutorialGenerators[pyatspi.ROLE_ICON] = \
self._getTutorialForIcon
self.tutorialGenerators[pyatspi.ROLE_LAYERED_PANE] = \
self._getTutorialForLayeredPane
self.tutorialGenerators[pyatspi.ROLE_LIST] = \
self._getTutorialForList
self.tutorialGenerators[pyatspi.ROLE_LIST_ITEM] = \
self._getTutorialForListItem
self.tutorialGenerators[pyatspi.ROLE_PAGE_TAB] = \
self._getTutorialForPageTab
self.tutorialGenerators[pyatspi.ROLE_PARAGRAPH] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_PASSWORD_TEXT] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_ENTRY] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_PUSH_BUTTON] = \
self._getTutorialForPushButton
self.tutorialGenerators[pyatspi.ROLE_SPIN_BUTTON] = \
self._getTutorialForSpinButton
self.tutorialGenerators[pyatspi.ROLE_TABLE_CELL] = \
self._getTutorialForTableCellRow
self.tutorialGenerators[pyatspi.ROLE_TEXT] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_TOGGLE_BUTTON] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_RADIO_BUTTON] = \
self._getTutorialForRadioButton
self.tutorialGenerators[pyatspi.ROLE_MENU] = \
self._getTutorialForMenu
self.tutorialGenerators[pyatspi.ROLE_CHECK_MENU_ITEM] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_MENU_ITEM] = \
self._getTutorialForMenuItem
self.tutorialGenerators[pyatspi.ROLE_RADIO_MENU_ITEM] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_SLIDER] = \
self._getTutorialForSlider
def _debugGenerator(self, generatorName, obj, alreadyFocused, utterances):
"""Prints debug.LEVEL_FINER information regarding
the tutorial generator.
Arguments:
- generatorName: the name of the generator
- obj: the object being presented
- alreadyFocused: False if object just received focus
- utterances: the generated text
"""
debug.println(debug.LEVEL_FINER,
"GENERATOR: %s" % generatorName)
debug.println(debug.LEVEL_FINER,
" obj = %s" % obj.name)
debug.println(debug.LEVEL_FINER,
" role = %s" % obj.getRoleName())
debug.println(debug.LEVEL_FINER,
" alreadyFocused = %s" % alreadyFocused)
debug.println(debug.LEVEL_FINER,
" utterances:")
for text in utterances:
debug.println(debug.LEVEL_FINER,
" (%s)" % text)
def _getDefaultTutorial(
self, obj, alreadyFocused, forceTutorial, role=None):
"""The default tutorial generator returns the empty tutorial string
because We have no associated tutorial function for the object.
Arguments:
- obj: an Accessible
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
- role: A role that should be used instead of the Accessible's
possible role.
Returns the empty list []
"""
return []
def _getTutorialForCheckBox(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a check box.
Arguments:
- obj: the check box
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user on how to toggle a checkbox.
msg = _("Press space to toggle.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForCheckBox",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForComboBox(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a combobox.
Arguments:
- obj: the combo box
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user on how to interact
# with a combobox.
msg = _("Press space to expand, and use up and down to select an item.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForComboBox",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForFrame(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a frame.
Arguments:
- obj: the frame
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
name = self._script.utilities.displayedText(obj)
if not name and obj.description:
name = obj.description
# Translators: If this application has more than one unfocused alert or
# dialog window, inform user of how to refocus these.
childWindowsMsg = _("Press alt+f6 to give focus to child windows.")
# If this application has more than one unfocused alert or
# dialog window, tell user how to give them focus.
try:
alertAndDialogCount = \
self._script.utilities.unfocusedAlertAndDialogCount(obj)
except:
alertAndDialogCount = 0
if alertAndDialogCount > 0:
utterances.append(childWindowsMsg)
self._debugGenerator("_getTutorialForFrame",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForIcon(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for an icon.
Arguments:
- obj: the icon
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
if obj.parent.getRole() == pyatspi.ROLE_LAYERED_PANE:
utterances = self._getTutorialForLayeredPane(obj.parent,
alreadyFocused,
forceTutorial)
else:
utterances = self._getDefaultTutorial(obj,
alreadyFocused,
forceTutorial)
self._debugGenerator("_getTutorialForIcon",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForLayeredPane(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a layered pane.
Arguments:
- obj: the layered pane
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
name = self._script.utilities.displayedText(obj)
if not name and obj.description:
name = obj.description
# Translators: this gives tips on how to navigate items in a
# layered pane.
msg = _("To move to items, use either " \
"the arrow keys or type ahead searching.")
utterances.append(msg)
# Translators: this is the tutorial string for when first landing
# on the desktop, describing how to access the system menus.
desktopMsg = _("To get to the system menus press the alt+f1 key.")
scriptName = self._script.name
try:
sibling = obj.parent.getChildAtIndex(0)
except AttributeError:
sibling = None
if 'nautilus' in scriptName and obj == sibling:
utterances.append(desktopMsg)
if (not alreadyFocused and self.lastTutorial != utterances) \
or forceTutorial:
pass
else:
utterances = []
self._debugGenerator("_getTutorialForLayeredPane",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForList(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a list.
Arguments:
- obj: the list
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string when navigating lists.
msg = _("Use up and down to select an item.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForList",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForListItem(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a listItem.
Arguments:
- obj: the listitem
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to collapse the node.
expandedMsg = _("To collapse, press shift plus left.")
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to expand the node.
collapsedMsg = _("To expand, press shift plus right.")
# If already in focus then the tree probably collapsed or expanded
state = obj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE):
if state.contains(pyatspi.STATE_EXPANDED):
if (self.lastTutorial != [expandedMsg]) or forceTutorial:
utterances.append(expandedMsg)
else:
if (self.lastTutorial != [collapsedMsg]) or forceTutorial:
utterances.append(collapsedMsg)
self._debugGenerator("_getTutorialForListItem",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForMenuItem(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a menu item
Arguments:
- obj: the menu item
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for activating a menu item
msg = _("To activate press return.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForMenuItem",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForText(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a text object.
Arguments:
- obj: the text component
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
if not obj.getState().contains(pyatspi.STATE_EDITABLE):
return []
utterances = []
# Translators: This is the tutorial string for when landing
# on text fields.
msg = _("Type in text.")
if (not alreadyFocused or forceTutorial) and \
not self._script.utilities.isReadOnlyTextArea(obj):
utterances.append(msg)
self._debugGenerator("_getTutorialForText",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForPageTab(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a page tab.
Arguments:
- obj: the page tab
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for landing
# on a page tab, we are informing the
# user how to navigate these.
msg = _("Use left and right to view other tabs.")
if (self.lastTutorial != [msg]) or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForPageTabList",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForPushButton(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a push button
Arguments:
- obj: the push button
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for activating a push button.
msg = _("To activate press space.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForPushButton",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForSpinButton(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a spin button. If the object already has
focus, then no tutorial is given.
Arguments:
- obj: the spin button
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for when landing
# on a spin button.
msg = _("Use up or down arrow to select value." \
" Or type in the desired numerical value.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForSpinButton",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForTableCell(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial utterances for a single table cell
Arguments:
- obj: the table
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to collapse the node.
expandedMsg = _("To collapse, press shift plus left.")
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to expand the node.
collapsedMsg = _("To expand, press shift plus right.")
# If this table cell has 2 children and one of them has a
# 'toggle' action and the other does not, then present this
# as a checkbox where:
# 1) we get the checked state from the cell with the 'toggle' action
# 2) we get the label from the other cell.
# See Orca bug #376015 for more details.
#
if obj.childCount == 2:
cellOrder = []
hasToggle = [ False, False ]
for i, child in enumerate(obj):
if self._script.utilities.hasMeaningfulToggleAction(child):
hasToggle[i] = True
break
if hasToggle[0] and not hasToggle[1]:
cellOrder = [ 1, 0 ]
elif not hasToggle[0] and hasToggle[1]:
cellOrder = [ 0, 1 ]
if cellOrder:
for i in cellOrder:
# Don't speak the label if just the checkbox state has
# changed.
#
if alreadyFocused and not hasToggle[i]:
pass
else:
utterances.extend( \
self._getTutorialForTableCell(obj[i],
alreadyFocused, forceTutorial))
return utterances
# [[[TODO: WDW - Attempt to infer the cell type. There's a
# bunch of stuff we can do here, such as check the EXPANDABLE
# state, check the NODE_CHILD_OF relation, etc. Logged as
# bugzilla bug 319750.]]]
#
if self._script.utilities.hasMeaningfulToggleAction(obj):
utterances = self._getTutorialForCheckBox(
obj, alreadyFocused, forceTutorial)
state = obj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE):
if state.contains(pyatspi.STATE_EXPANDED):
if self.lastTutorial != [expandedMsg] or forceTutorial:
utterances.append(expandedMsg)
else:
if self.lastTutorial != [collapsedMsg] or forceTutorial:
utterances.append(collapsedMsg)
self._debugGenerator("_getTutorialForTableCell",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForTableCellRow(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for the active table cell in the table row.
Arguments:
- obj: the table
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
if (not alreadyFocused):
try:
parent_table = obj.parent.queryTable()
except NotImplementedError:
parent_table = None
if settings.readTableCellRow and parent_table \
and not self._script.utilities.isLayoutOnly(obj.parent):
parent = obj.parent
index = self._script.utilities.cellIndex(obj)
row = parent_table.getRowAtIndex(index)
column = parent_table.getColumnAtIndex(index)
# This is an indication of whether we should speak all the
# table cells (the user has moved focus up or down a row),
# or just the current one (focus has moved left or right in
# the same row).
#
speakAll = True
if "lastRow" in self._script.pointOfReference and \
"lastColumn" in self._script.pointOfReference:
pointOfReference = self._script.pointOfReference
speakAll = (pointOfReference["lastRow"] != row) or \
((row == 0 or row == parent_table.nRows-1) and \
pointOfReference["lastColumn"] == column)
utterances.extend(self._getTutorialForTableCell(obj,
alreadyFocused, forceTutorial))
else:
utterances = self._getTutorialForTableCell(obj,
alreadyFocused, forceTutorial)
else:
utterances = self._getTutorialForTableCell(obj, alreadyFocused, \
forceTutorial)
self._debugGenerator("_getTutorialForTableCellRow",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForRadioButton(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a radio button.
Arguments:
- obj: the radio button
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user, how to navigate radiobuttons.
msg = _("Use arrow keys to change.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForRadioButton",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForMenu(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a menu.
Arguments:
- obj: the menu
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user, how to navigate menus.
mainMenuMsg = _("To navigate, press left or right arrow. " \
"To move through items press up or down arrow.")
# Translators: this is a tip for the user, how to
# navigate into sub menus.
subMenuMsg = _("To enter sub menu, press right arrow.")
# Checking if we are a submenu,
# we can't rely on our parent being just a menu.
if obj.parent.name != "" and obj.parent.__class__ == obj.__class__:
if (self.lastTutorial != [subMenuMsg]) or forceTutorial:
utterances.append(subMenuMsg)
else:
if (self.lastTutorial != [mainMenuMsg]) or forceTutorial:
utterances.append(mainMenuMsg)
self._debugGenerator("_getTutorialForMenu",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForSlider(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a slider. If the object already has
focus, then no tutorial is given.
Arguments:
- obj: the slider
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for when landing
# on a slider.
msg = _("To decrease press left arrow, to increase press right arrow." \
" To go to minimum press home, and for maximum press end.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForSlider",
obj,
alreadyFocused,
utterances)
return utterances
def _getBindingsForHandler(self, handlerName):
handler = self._script.inputEventHandlers.get(handlerName)
if not handler:
return None
bindings = self._script.keyBindings.getBindingsForHandler(handler)
if not bindings:
return None
binding = bindings[0]
return binding.asString()
def _getModeTutorial(self, obj, alreadyFocused, forceTutorial):
return []
def getTutorial(self, obj, alreadyFocused, forceTutorial=False, role=None):
"""Get the tutorial for an Accessible object. This will look
first to the specific tutorial generators and if this
does not exist then return the empty tutorial.
This method is the primary method
that external callers of this class should use.
Arguments:
- obj: the object
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
- role: Alternative role to use
Returns a list of utterances to be spoken.
"""
if not settings.enableTutorialMessages:
return []
if not obj == orca_state.locusOfFocus:
return []
utterances = []
role = role or obj.getRole()
msg = self._getModeTutorial(obj, alreadyFocused, forceTutorial)
if not msg:
if role in self.tutorialGenerators:
generator = self.tutorialGenerators[role]
else:
generator = self._getDefaultTutorial
msg = generator(obj, alreadyFocused, forceTutorial)
if msg == self.lastTutorial and role == self.lastRole \
and not forceTutorial:
msg = []
if msg:
utterances = [" ".join(msg)]
self.lastTutorial = msg
self.lastRole = role
if forceTutorial:
self.lastTutorial = ""
self.lastRole = None
self._debugGenerator("getTutorial",
obj,
alreadyFocused,
utterances)
return utterances
|
pvagner/orca
|
src/orca/tutorialgenerator.py
|
Python
|
lgpl-2.1
| 30,615
|
[
"ORCA"
] |
95a58604b9b93745f2698b4cde69f1532fdd7a6950c92878a640b9be72ad3cfa
|
# -*- coding: utf-8 -*-
"""Test sequences for graphiness.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from collections import defaultdict
import heapq
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult (dschult@colgate.edu)'
'Joel Miller (joel.c.miller.research@gmail.com)'
'Ben Edwards'
'Brian Cloteaux <brian.cloteaux@nist.gov>'])
__all__ = ['is_graphical',
'is_multigraphical',
'is_pseudographical',
'is_digraphical',
'is_valid_degree_sequence_erdos_gallai',
'is_valid_degree_sequence_havel_hakimi',
'is_valid_degree_sequence', # deprecated
]
def is_graphical(sequence, method='eg'):
"""Returns True if sequence is a valid degree sequence.
A degree sequence is valid if some graph can realize it.
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
method : "eg" | "hh"
The method used to validate the degree sequence.
"eg" corresponds to the Erdős-Gallai algorithm, and
"hh" to the Havel-Hakimi algorithm.
Returns
-------
valid : bool
True if the sequence is a valid degree sequence and False if not.
Examples
--------
>>> G = nx.path_graph(4)
>>> sequence = G.degree().values()
>>> nx.is_valid_degree_sequence(sequence)
True
References
----------
Erdős-Gallai
[EG1960]_, [choudum1986]_
Havel-Hakimi
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
if method == 'eg':
valid = is_valid_degree_sequence_erdos_gallai(list(sequence))
elif method == 'hh':
valid = is_valid_degree_sequence_havel_hakimi(list(sequence))
else:
msg = "`method` must be 'eg' or 'hh'"
raise nx.NetworkXException(msg)
return valid
is_valid_degree_sequence = is_graphical
def _basic_graphical_tests(deg_sequence):
# Sort and perform some simple tests on the sequence
if not nx.utils.is_list_of_ints(deg_sequence):
raise nx.NetworkXUnfeasible
p = len(deg_sequence)
num_degs = [0]*p
dmax, dmin, dsum, n = 0, p, 0, 0
for d in deg_sequence:
# Reject if degree is negative or larger than the sequence length
if d<0 or d>=p:
raise nx.NetworkXUnfeasible
# Process only the non-zero integers
elif d>0:
dmax, dmin, dsum, n = max(dmax,d), min(dmin,d), dsum+d, n+1
num_degs[d] += 1
# Reject sequence if it has odd sum or is oversaturated
if dsum%2 or dsum>n*(n-1):
raise nx.NetworkXUnfeasible
return dmax,dmin,dsum,n,num_degs
def is_valid_degree_sequence_havel_hakimi(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation proceeds using the Havel-Hakimi theorem.
Worst-case run time is: O(s) where s is the sum of the sequence.
Parameters
----------
deg_sequence : list
A list of integers where each element specifies the degree of a node
in a graph.
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [1]_.
References
----------
.. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
try:
dmax,dmin,dsum,n,num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n==0 or 4*dmin*n >= (dmax+dmin+1) * (dmax+dmin+1):
return True
modstubs = [0]*(dmax+1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while num_degs[dmax] == 0:
dmax -= 1;
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n-1:
return False
# Remove largest stub in list
num_degs[dmax], n = num_degs[dmax]-1, n-1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while num_degs[k] == 0:
k -= 1
num_degs[k], n = num_degs[k]-1, n-1
if k > 1:
modstubs[mslen] = k-1
mslen += 1
# Add back to the list any non-zero stubs that were removed
for i in range(mslen):
stub = modstubs[i]
num_degs[stub], n = num_degs[stub]+1, n+1
return True
def is_valid_degree_sequence_erdos_gallai(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation is done using the Erdős-Gallai theorem [EG1960]_.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
This implementation uses an equivalent form of the Erdős-Gallai criterion.
Worst-case run time is: O(n) where n is the length of the sequence.
Specifically, a sequence d is graphical if and only if the
sum of the sequence is even and for all strong indices k in the sequence,
.. math::
\sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k)
= k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j )
A strong index k is any index where `d_k \geq k` and the value `n_j` is the
number of occurrences of j in d. The maximal strong index is called the
Durfee index.
This particular rearrangement comes from the proof of Theorem 3 in [2]_.
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [2]_.
References
----------
.. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai",
Discrete Mathematics, 265, pp. 417-420 (2003).
.. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[EG1960]_, [choudum1986]_
"""
try:
dmax,dmin,dsum,n,num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n==0 or 4*dmin*n >= (dmax+dmin+1) * (dmax+dmin+1):
return True
# Perform the EG checks using the reformulation of Zverovich and Zverovich
k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0
for dk in range(dmax, dmin-1, -1):
if dk < k+1: # Check if already past Durfee index
return True
if num_degs[dk] > 0:
run_size = num_degs[dk] # Process a run of identical-valued degrees
if dk < k+run_size: # Check if end of run is past Durfee index
run_size = dk-k # Adjust back to Durfee index
sum_deg += run_size * dk
for v in range(run_size):
sum_nj += num_degs[k+v]
sum_jnj += (k+v) * num_degs[k+v]
k += run_size
if sum_deg > k*(n-1) - k*sum_nj + sum_jnj:
return False
return True
def is_multigraphical(sequence):
"""Returns True if some multigraph can realize the sequence.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is a multigraphic degree sequence and False if not.
Notes
-----
The worst-case run time is O(n) where n is the length of the sequence.
References
----------
.. [1] S. L. Hakimi. "On the realizability of a set of integers as
degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506
(1962).
"""
deg_sequence = list(sequence)
if not nx.utils.is_list_of_ints(deg_sequence):
return False
dsum, dmax = 0, 0
for d in deg_sequence:
if d<0:
return False
dsum, dmax = dsum+d, max(dmax,d)
if dsum%2 or dsum<2*dmax:
return False
return True
def is_pseudographical(sequence):
"""Returns True if some pseudograph can realize the sequence.
Every nonnegative integer sequence with an even sum is pseudographical
(see [1]_).
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
Returns
-------
valid : bool
True if the sequence is a pseudographic degree sequence and False if not.
Notes
-----
The worst-case run time is O(n) where n is the length of the sequence.
References
----------
.. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs
and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12),
pp. 778-782 (1976).
"""
s = list(sequence)
if not nx.utils.is_list_of_ints(s):
return False
return sum(s)%2 == 0 and min(s) >= 0
def is_digraphical(in_sequence, out_sequence):
r"""Returns True if some directed graph can realize the in- and out-degree
sequences.
Parameters
----------
in_sequence : list or iterable container
A sequence of integer node in-degrees
out_sequence : list or iterable container
A sequence of integer node out-degrees
Returns
-------
valid : bool
True if in and out-sequences are digraphic False if not.
Notes
-----
This algorithm is from Kleitman and Wang [1]_.
The worst case runtime is O(s * log n) where s and n are the sum and length
of the sequences respectively.
References
----------
.. [1] D.J. Kleitman and D.L. Wang
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
in_deg_sequence = list(in_sequence)
out_deg_sequence = list(out_sequence)
if not nx.utils.is_list_of_ints(in_deg_sequence):
return False
if not nx.utils.is_list_of_ints(out_deg_sequence):
return False
# Process the sequences and form two heaps to store degree pairs with
# either zero or non-zero out degrees
sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
maxn = max(nin, nout)
maxin = 0
if maxn==0:
return True
stubheap, zeroheap = [ ], [ ]
for n in range(maxn):
in_deg, out_deg = 0, 0
if n<nout:
out_deg = out_deg_sequence[n]
if n<nin:
in_deg = in_deg_sequence[n]
if in_deg<0 or out_deg<0:
return False
sumin, sumout, maxin = sumin+in_deg, sumout+out_deg, max(maxin, in_deg)
if in_deg > 0:
stubheap.append((-1*out_deg, -1*in_deg))
elif out_deg > 0:
zeroheap.append(-1*out_deg)
if sumin != sumout:
return False
heapq.heapify(stubheap)
heapq.heapify(zeroheap)
modstubs = [(0,0)]*(maxin+1)
# Successively reduce degree sequence by removing the maximum out degree
while stubheap:
# Take the first value in the sequence with non-zero in degree
(freeout, freein) = heapq.heappop( stubheap )
freein *= -1
if freein > len(stubheap)+len(zeroheap):
return False
# Attach out stubs to the nodes with the most in stubs
mslen = 0
for i in range(freein):
if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]):
stubout = heapq.heappop(zeroheap)
stubin = 0
else:
(stubout, stubin) = heapq.heappop(stubheap)
if stubout == 0:
return False
# Check if target is now totally connected
if stubout+1<0 or stubin<0:
modstubs[mslen] = (stubout+1, stubin)
mslen += 1
# Add back the nodes to the heap that still have available stubs
for i in range(mslen):
stub = modstubs[i]
if stub[1] < 0:
heapq.heappush(stubheap, stub)
else:
heapq.heappush(zeroheap, stub[0])
if freeout<0:
heapq.heappush(zeroheap, freeout)
return True
|
LumPenPacK/NetworkExtractionFromImages
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/networkx/algorithms/graphical.py
|
Python
|
bsd-2-clause
| 12,990
|
[
"Brian"
] |
2898b48f50c02d053711ddcc3b19ede7d122f77f979a2fa333d50d7c0c142951
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# List of types and corresponding file extensions.
types = [[ 'ImageData', 'vti'],
['RectilinearGrid', 'vtr'],
['StructuredGrid', 'vts'],
['PolyData', 'vtp'],
['UnstructuredGrid', 'vtu']]
# We intentionally cause vtkErrorMacro calls to be made below. Dump
# errors to a file to prevent a window from coming up.
fow = vtk.vtkFileOutputWindow()
fow.SetFileName("TestEmptyXMLErrors.txt")
fow.SetFlush(0)
fow.SetInstance(fow)
# Prepare some test files.
f = open('emptyFile.vtk', 'wt')
f.close()
f = open('junkFile.vtk', 'wt')
f.write("v9np7598mapwcawoiur-,rjpmW9MJV28nun-q38ynq-9.8ugujqvt-8n3-nv8")
f.close()
# Test each writer/reader.
for t in types:
type = t[0]
ext = t[1]
input = eval('vtk.vtk' + type + '()')
writer = eval('vtk.vtkXML' + type + 'Writer()')
writer.SetFileName('empty' + type + '.' + ext)
sys.stdout.write('Attempting ' + type + ' write with no input.\n')
writer.Write()
sys.stdout.write('Attempting ' + type + ' write with empty input.\n')
writer.SetInputData(input)
writer.Write()
reader = eval('vtk.vtkXML' + type + 'Reader()')
reader.SetFileName('empty' + type + '.' + ext)
sys.stdout.write('Attempting read from file with empty ' + type + '.\n')
reader.Update()
pwriter = eval('vtk.vtkXMLP' + type + 'Writer()')
pwriter.SetFileName('emptyP' + type + '.p' + ext)
sys.stdout.write('Attempting P' + type + ' write with no input.\n')
pwriter.Write()
sys.stdout.write('Attempting P' + type + ' write with empty input.\n')
pwriter.SetInputData(input)
pwriter.Write()
preader = eval('vtk.vtkXMLP' + type + 'Reader()')
preader.SetFileName('emptyP' + type + '.p' + ext)
sys.stdout.write('Attempting read from file with empty P' + type + '.\n')
preader.Update()
reader.SetFileName("emptyFile.vtk")
preader.SetFileName("emptyFile.vtk")
sys.stdout.write('Attempting read ' + type + ' from empty file.\n')
reader.Update()
sys.stdout.write('Attempting read P' + type + ' from empty file.\n')
preader.Update()
reader.SetFileName("junkFile.vtk")
preader.SetFileName("junkFile.vtk")
sys.stdout.write('Attempting read ' + type + ' from junk file.\n')
reader.Update()
sys.stdout.write('Attempting read P' + type + ' from junk file.\n')
preader.Update()
del input
del writer
del reader
del pwriter
del preader
# Test the data set writers.
for t in types:
type = t[0]
ext = t[1]
writer = vtk.vtkXMLDataSetWriter()
pwriter = vtk.vtkXMLPDataSetWriter()
input = eval('vtk.vtk' + type + '()')
writer.SetFileName('empty' + type + 'DataSet.' + ext)
sys.stdout.write('Attempting DataSet ' + type + ' write with no input.\n')
writer.Write()
sys.stdout.write('Attempting DataSet ' + type + ' write with empty input.\n')
writer.SetInputData(input)
writer.Write()
pwriter.SetFileName('emptyP' + type + 'DataSet.p' + ext)
sys.stdout.write('Attempting DataSet ' + type + ' write with no input.\n')
pwriter.SetNumberOfPieces(1)
pwriter.Write()
sys.stdout.write('Attempting DataSet ' + type + ' write with empty input.\n')
pwriter.SetInputData(input)
pwriter.Write()
del input
del pwriter
del writer
# Done with the file output window.
fow.SetInstance(None)
del fow
# Delete the test files.
for t in types:
type = t[0]
ext = t[1]
os.remove('empty' + type + '.' + ext)
os.remove('empty' + type + 'DataSet.' + ext)
os.remove('emptyP' + type + '.p' + ext)
os.remove('emptyP' + type + '_0.' + ext)
os.remove('emptyP' + type + 'DataSet.p' + ext)
os.remove('emptyP' + type + 'DataSet_0.' + ext)
os.remove('junkFile.vtk')
os.remove('emptyFile.vtk')
os.remove('TestEmptyXMLErrors.txt')
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/IO/XML/Testing/Python/TestEmptyXML.py
|
Python
|
gpl-3.0
| 3,957
|
[
"VTK"
] |
888e688ecead5c71af00b9540e35f4037d5c94fa9856e3ad5ddc5a28b8cd6a6e
|
from distutils.core import setup
import clumpy
packages = ['clumpy']
install_requires = ['numpy>=1.8']
ext_modules = []
setup(
name='clumpy',
author="Brian Kimmig",
author_email='brian.kimmig@gmail.com',
url="https://github.com/bkimmig/clumpy",
license="",
description="Python Expectation Maximization for Astronomy",
long_description=open("README.md").read(),
classifiers=["Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Astronomy"],
platforms='any',
# version=clumpy.__version__,
packages=packages,
ext_modules=ext_modules,
install_requires=install_requires,
)
|
bkimmig/clumpy
|
setup.py
|
Python
|
mit
| 1,055
|
[
"Brian"
] |
f0382517148fdbedec06edfb19ea0a4e03e6125ace026ed8d277451f91d1b246
|
"""
Implementation of a language model class.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import itertools
import logging
import pickle as pkl
import theano
import theano.tensor as TT
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog.utils import id_generator
from groundhog.layers.basic import Model
logger = logging.getLogger(__name__)
class LM_Model(Model):
def __init__(self,
cost_layer = None,
sample_fn = None,
valid_fn = None,
noise_fn = None,
clean_before_noise_fn = False,
clean_noise_validation=True,
weight_noise_amount = 0,
indx_word="/data/lisa/data/PennTreebankCorpus/dictionaries.npz",
need_inputs_for_generating_noise=False,
indx_word_src=None,
character_level = False,
exclude_params_for_norm=None,
rng = None):
"""
Constructs a model, that respects the interface required by the
trainer class.
:type cost_layer: groundhog layer
:param cost_layer: the cost (last) layer of the model
:type sample_fn: function or None
:param sample_fn: function used to sample from the model
:type valid_fn: function or None
:param valid_fn: function used to compute the validation error on a
minibatch of examples
:type noise_fn: function or None
:param noise_fn: function called to corrupt an input (that
potentially will be denoised by the model)
:type clean_before_noise_fn: bool
:param clean_before_noise_fn: If the weight noise should be removed
before calling the `noise_fn` to corrupt some input
:type clean_noise_validation: bool
:param clean_noise_validation: If the weight noise should be removed
before calling the validation function
:type weight_noise_amount: float or theano scalar
:param weight_noise_amount: weight noise scale (standard deviation
of the Gaussian from which it is sampled)
:type indx_word: string or None
:param indx_word: path to the file describing how to match indices
to words (or characters)
:type need_inputs_for_generating_noise: bool
:param need_inputs_for_generating_noise: flag saying if the shape of
the inputs affect the shape of the weight noise that is generated at
each step
:type indx_word_src: string or None
:param indx_word_src: similar to indx_word (but for the source
language
:type character_level: bool
:param character_level: flag used when sampling, saying if we are
running the model on characters or words
:type excluding_params_for_norm: None or list of theano variables
:param excluding_params_for_norm: list of parameters that should not
be included when we compute the norm of the gradient (for norm
clipping). Usually the output weights if the output layer is
large
:type rng: numpy random generator
:param rng: numpy random generator
"""
super(LM_Model, self).__init__(output_layer=cost_layer,
sample_fn=sample_fn,
indx_word=indx_word,
indx_word_src=indx_word_src,
rng=rng)
if exclude_params_for_norm is None:
self.exclude_params_for_norm = []
else:
self.exclude_params_for_norm = exclude_params_for_norm
self.need_inputs_for_generating_noise=need_inputs_for_generating_noise
self.cost_layer = cost_layer
self.validate_step = valid_fn
self.clean_noise_validation = clean_noise_validation
self.noise_fn = noise_fn
self.clean_before = clean_before_noise_fn
self.weight_noise_amount = weight_noise_amount
self.character_level = character_level
self.valid_costs = ['cost','ppl']
# Assume a single cost
# We need to merge these lists
state_below = self.cost_layer.state_below
if hasattr(self.cost_layer, 'mask') and self.cost_layer.mask:
num_words = TT.sum(self.cost_layer.mask)
else:
num_words = TT.cast(state_below.shape[0], 'float32')
scale = getattr(self.cost_layer, 'cost_scale', numpy.float32(1))
if not scale:
scale = numpy.float32(1)
scale *= numpy.float32(numpy.log(2))
grad_norm = TT.sqrt(sum(TT.sum(x**2)
for x,p in zip(self.param_grads, self.params) if p not in
self.exclude_params_for_norm))
new_properties = [
('grad_norm', grad_norm),
('log2_p_word', self.train_cost / num_words / scale),
('log2_p_expl', self.cost_layer.cost_per_sample.mean() / scale)]
self.properties += new_properties
if len(self.noise_params) >0 and weight_noise_amount:
if self.need_inputs_for_generating_noise:
inps = self.inputs
else:
inps = []
self.add_noise = theano.function(inps,[],
name='add_noise',
updates = [(p,
self.trng.normal(shp_fn(self.inputs),
avg =0,
std=weight_noise_amount,
dtype=p.dtype))
for p, shp_fn in
zip(self.noise_params,
self.noise_params_shape_fn)],
on_unused_input='ignore')
self.del_noise = theano.function(inps,[],
name='del_noise',
updates=[(p,
TT.zeros(shp_fn(self.inputs),
p.dtype))
for p, shp_fn in
zip(self.noise_params,
self.noise_params_shape_fn)],
on_unused_input='ignore')
else:
self.add_noise = None
self.del_noise = None
def validate(self, data_iterator, train=False):
cost = 0
n_batches = 0
n_steps = 0
if self.del_noise and self.clean_noise_validation:
if self.need_inputs_for_generating_noise:
self.del_noise(**vals)
else:
self.del_noise()
for vals in data_iterator:
n_batches += 1
if isinstance(vals, dict):
val = list(vals.values())[0]
if val.ndim ==3:
n_steps += val.shape[0]*val.shape[1]
else:
n_steps += val.shape[0]
_rvals = self.validate_step( **vals)
cost += _rvals
else:
# not dict
if vals[0].ndim ==3:
n_steps += vals[0].shape[0]*vals[1].shape[1]
else:
n_steps += vals[0].shape[0]
if self.del_noise and self.clean_noise_validation:
if self.need_inputs_for_generating_noise:
self.del_noise(*vals)
else:
self.del_noise()
inps = list(vals)
_rvals = self.validate_step(*inps)
_cost += _rvals
n_steps = numpy.log(2.)*n_steps
cost = cost / n_steps
entropy = cost# (numpy.log(2.))
ppl = 10**(numpy.log(2)*cost/numpy.log(10))
return [('cost',entropy), ('ppl',ppl)]
def load_dict(self, opts):
"""
Loading the dictionary that goes from indices to actual words
"""
if self.indx_word and '.pkl' in self.indx_word[-4:]:
data_dict = pkl.load(open(self.indx_word, "r"))
self.word_indxs = data_dict
self.word_indxs[opts['null_sym_target']] = '<eol>'
self.word_indxs[opts['unk_sym_target']] = opts['oov']
elif self.indx_word and '.np' in self.indx_word[-4:]:
self.word_indxs = numpy.load(self.indx_word)['unique_words']
if self.indx_word_src and '.pkl' in self.indx_word_src[-4:]:
data_dict = pkl.load(open(self.indx_word_src, "r"))
self.word_indxs_src = data_dict
self.word_indxs_src[opts['null_sym_source']] = '<eol>'
self.word_indxs_src[opts['unk_sym_source']] = opts['oov']
elif self.indx_word_src and '.np' in self.indx_word_src[-4:]:
self.word_indxs_src = numpy.load(self.indx_word_src)['unique_words']
# added by Zhaopeng Tu, 2015-11-03
self.maintain_coverage = opts['maintain_coverage']
self.coverage_dim = opts['coverage_dim']
def get_samples(self, length = 30, temp=1, *inps):
if not hasattr(self, 'word_indxs'):
self.load_dict()
self._get_samples(self, length, temp, *inps)
def perturb(self, *args, **kwargs):
if args:
inps = args
assert not kwargs
if kwargs:
inps = kwargs
assert not args
if self.noise_fn:
if self.clean_before and self.del_noise:
if self.need_inputs_for_generating_noise:
self.del_noise(*args, **kwargs)
else:
self.del_noise()
inps = self.noise_fn(*args, **kwargs)
if self.add_noise:
if self.need_inputs_for_generating_noise:
self.add_noise(*args, **kwargs)
else:
self.add_noise()
return inps
|
neozhangthe1/coverage_model
|
build/lib/groundhog/models/LM_model.py
|
Python
|
bsd-3-clause
| 10,546
|
[
"Gaussian"
] |
5e74658376eea16ff81dc31b9ba9dd609c4670a357a89a76859b9ee9c6ce17fb
|
# Simple program to call pyqrcode to generate a URL based QR code image
# and save the generated image to disk
# The generated QR image was tested with a Samsung Galaxy II running the
# Norton Snap QR Code Reader from the Android Play Store
import os, sys, inspect
#
# Much ugly bother to work with a local copy of pyqrcode
#
curpath = os.path.split(inspect.getfile(inspect.currentframe()))[0]
newpath = os.path.join(curpath, "../pyqrcode-read-only")
cmd_folder = os.path.realpath(os.path.abspath(newpath))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
try:
import pyqrcode
except ImportError:
print "you need to run the script to obtain the pyqrcode module"
sys.exit (1)
URL = 'https://education.10gen.com/courses/10gen/M101/2012_Fall/'
image = 'image/generate_QR_image.png'
qr_image = pyqrcode.MakeQRImage(URL, rounding = 0, fg = "black", bg = "burlywood", br = False)
qr_image.save(image)
print "Test QR image saved to", image
# the following command runs ImageMagick on my system to display the image in a Window
# qr_image.show()
|
astokes/SynVinQR
|
etude/generate_QR_image.py
|
Python
|
bsd-2-clause
| 1,119
|
[
"Galaxy"
] |
16a8712638a063e80e882bed01703ffcb64288d2d3e6e9b47dd0b3c63442ce58
|
# Copyright 2017 Hugh Salimbeni
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from gpflow import settings
from gpflow import params_as_tensors, Parameterized
from gpflow.likelihoods import Gaussian
def reparameterize(mean, var, z, full_cov=False):
"""
Implements the 'reparameterization trick' for the Gaussian, either full rank or diagonal
If the z is a sample from N(0, 1), the output is a sample from N(mean, var)
If full_cov=True then var must be of shape S,N,N,D and the full covariance is used. Otherwise
var must be S,N,D and the operation is elementwise
:param mean: mean of shape S,N,D
:param var: covariance of shape S,N,D or S,N,N,D
:param z: samples form unit Gaussian of shape S,N,D
:param full_cov: bool to indicate whether var is of shape S,N,N,D or S,N,D
:return sample from N(mean, var) of shape S,N,D
"""
if var is None:
return mean
if full_cov is False:
return mean + z * (var + settings.jitter) ** 0.5
else:
S, N, D = tf.shape(mean)[0], tf.shape(mean)[1], tf.shape(mean)[2] # var is SNND
mean = tf.transpose(mean, (0, 2, 1)) # SND -> SDN
var = tf.transpose(var, (0, 3, 1, 2)) # SNND -> SDNN
I = settings.jitter * tf.eye(N, dtype=settings.float_type)[None, None, :, :] # 11NN
chol = tf.cholesky(var + I) # SDNN
z_SDN1 = tf.transpose(z, [0, 2, 1])[:, :, :, None] # SND->SDN1
f = mean + tf.matmul(chol, z_SDN1)[:, :, :, 0] # SDN(1)
return tf.transpose(f, (0, 2, 1)) # SND
class BroadcastingLikelihood(Parameterized):
"""
A wrapper for the likelihood to broadcast over the samples dimension. The Gaussian doesn't
need this, but for the others we can apply reshaping and tiling.
With this wrapper all likelihood functions behave correctly with inputs of shape S,N,D,
but with Y still of shape N,D
"""
def __init__(self, likelihood):
Parameterized.__init__(self)
self.likelihood = likelihood
if isinstance(likelihood, Gaussian):
self.needs_broadcasting = False
else:
self.needs_broadcasting = True
def _broadcast(self, f, vars_SND, vars_ND):
if self.needs_broadcasting is False:
return f(vars_SND, [tf.expand_dims(v, 0) for v in vars_ND])
else:
S, N, D = [tf.shape(vars_SND[0])[i] for i in range(3)]
vars_tiled = [tf.tile(x[None, :, :], [S, 1, 1]) for x in vars_ND]
flattened_SND = [tf.reshape(x, [S*N, D]) for x in vars_SND]
flattened_tiled = [tf.reshape(x, [S*N, -1]) for x in vars_tiled]
flattened_result = f(flattened_SND, flattened_tiled)
if isinstance(flattened_result, tuple):
return [tf.reshape(x, [S, N, -1]) for x in flattened_result]
else:
return tf.reshape(flattened_result, [S, N, -1])
@params_as_tensors
def variational_expectations(self, Fmu, Fvar, Y):
f = lambda vars_SND, vars_ND: self.likelihood.variational_expectations(vars_SND[0],
vars_SND[1],
vars_ND[0])
return self._broadcast(f,[Fmu, Fvar], [Y])
@params_as_tensors
def logp(self, F, Y):
f = lambda vars_SND, vars_ND: self.likelihood.logp(vars_SND[0], vars_ND[0])
return self._broadcast(f, [F], [Y])
@params_as_tensors
def conditional_mean(self, F):
f = lambda vars_SND, vars_ND: self.likelihood.conditional_mean(vars_SND[0])
return self._broadcast(f,[F], [])
@params_as_tensors
def conditional_variance(self, F):
f = lambda vars_SND, vars_ND: self.likelihood.conditional_variance(vars_SND[0])
return self._broadcast(f,[F], [])
@params_as_tensors
def predict_mean_and_var(self, Fmu, Fvar):
f = lambda vars_SND, vars_ND: self.likelihood.predict_mean_and_var(vars_SND[0],
vars_SND[1])
return self._broadcast(f,[Fmu, Fvar], [])
@params_as_tensors
def predict_density(self, Fmu, Fvar, Y):
f = lambda vars_SND, vars_ND: self.likelihood.predict_density(vars_SND[0],
vars_SND[1],
vars_ND[0])
return self._broadcast(f,[Fmu, Fvar], [Y])
|
ICL-SML/Doubly-Stochastic-DGP
|
doubly_stochastic_dgp/utils.py
|
Python
|
apache-2.0
| 5,060
|
[
"Gaussian"
] |
2f7c07f57bcf2a00ec86798a0d0768e3e0343451bf7bddcdfee40fa9f394e92b
|
# -*- coding: utf-8 -*-
# Tokyo Cabinet Python ctypes binding.
from ctypes import CDLL, CFUNCTYPE, POINTER
from ctypes import c_int, c_int8, c_int32, c_int64
from ctypes import c_uint, c_uint8, c_uint32, c_uint64
from ctypes import c_bool, c_size_t
from ctypes import c_double
from ctypes import c_char_p, c_void_p
from ctypes import cast
from ctypes.util import find_library
c_int_p = POINTER(c_int)
c_uint64_p = POINTER(c_uint64)
c_double_p = POINTER(c_double)
c_time = c_uint64 # FIX: This is valid in 64 bit architecture.
class tc_char_p(c_char_p):
"""Automatic garbage collectable ctypes.c_char_p type."""
def __del__(self):
if self and libtc:
libtc.tcfree(self)
class tc_void_p(c_void_p):
"""Automatic garbage collectable ctypes.c_void_p type."""
def __del__(self):
if self and libtc:
libtc.tcfree(self)
# Load Tokyo Cabinet library
libtc = CDLL(find_library('tokyocabinet'))
__version__ = c_char_p.in_dll(libtc, 'tcversion').value
# Every XXX_errmsg() message is driven by this class
class TCException(Exception):
pass
# Extracted from 'cxcore.py' file
# ctypes-opencv - A Python wrapper for OpenCV using ctypes
# Copyright (c) 2008, Minh-Tri Pham
def cfunc(name, dll, result, *args):
"""Build and apply a ctypes prototype complete with parameter
flags.
e.g.
cvMinMaxLoc = cfunc('cvMinMaxLoc', _cxDLL, None,
('image', IplImage_p, 1),
('min_val', c_double_p, 2),
('max_val', c_double_p, 2),
('min_loc', CvPoint_p, 2),
('max_loc', CvPoint_p, 2),
('mask', IplImage_p, 1, None))
Means locate cvMinMaxLoc in dll _cxDLL, it returns nothing.
The first argument is an input parameter. The next 4 arguments
are output, and the last argument is input with an optional value.
A typical call might look like:
min_val,max_val,min_loc,max_loc = cvMinMaxLoc(img)
"""
atypes = []
aflags = []
for arg in args:
atypes.append(arg[1])
aflags.append((arg[2], arg[0]) + arg[3:])
return CFUNCTYPE(result, *atypes)((name, dll), tuple(aflags))
def _ctypes(type_):
"""Convert a Python type to a ctypes type."""
types = { int: c_int,
float: c_double,
str: c_char_p }
return types[type_]
def cfunc_va(name, dll, result, *args):
"""Build and apply a ctypes prototype complete with variable
arguments.
This functions is similar to cfunc, but use a closure to variable
argument ctype conversion.
"""
def create_closure(func, fix_args):
def call(*args):
var_args = [_ctypes(type(arg)) for arg in args[len(fix_args):]]
func.argtypes = fix_args + var_args
return func(*args)
return call
fix_args = [arg[1] for arg in args]
func = getattr(dll, name)
func.restype = result
return create_closure(func, fix_args)
def cfunc_fast(name, dll, result, *args):
"""Build and apply a ctypes prototype complete without parameter
flags.
This functions is similar to cfunc, but the call is faster.
"""
func = getattr(dll, name)
func.argtypes = [arg[1] for arg in args]
func.restype = result
return func
class ListPOINTER(object):
"""Just like a POINTER but accept a list of ctype as an
argument."""
def __init__(self, etype):
self.etype = etype
def from_param(self, param):
if isinstance(param, (list, tuple)):
return (self.etype * len(param))(*param)
def py_list(data, size, type_):
"""Convert a C array to a Python list.
data -- is a tc_void_p data type.
size -- is a c_int_p data type.
type_ -- a Python data type.
"""
ptr = cast(data, POINTER(type_))
values = [ptr[i] for i in range(size.value)]
return values
#
# Functions from tcutil.h
#
tcmalloc = cfunc('tcmalloc', libtc, c_void_p,
('size', c_size_t, 1))
tcmalloc.__doc__ =\
"""Allocate a region on memory.
size -- specifies the size of the region.
The return value is the pointer to the allocated region.
This function handles failure of memory allocation implicitly.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
tccalloc = cfunc('tccalloc', libtc, c_void_p,
('nmemb', c_size_t, 1),
('size', c_size_t, 1))
tccalloc.__doc__ =\
"""Allocate a nullified region on memory.
nmemb -- specifies the number of elements.
size -- specifies the size of each element.
The return value is the pointer to the allocated nullified region.
This function handles failure of memory allocation implicitly.
Because the region of the return value is allocated with the 'calloc'
call, it should be released with the free' call when it is no longer
in use.
"""
tcrealloc = cfunc('tcrealloc', libtc, c_void_p,
('ptr', c_void_p, 1),
('size', c_size_t, 1))
tcrealloc.__doc__ =\
"""Re-allocate a region on memory.
ptr -- specifies the pointer to the region.
size -- specifies the size of the region.
The return value is the pointer to the re-allocated region.
This function handles failure of memory allocation implicitly.
Because the region of the return value is allocated with the 'realloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
tcmemdup = cfunc('tcmemdup', libtc, c_void_p,
('ptr', c_void_p, 1),
('size', c_size_t, 1))
tcmemdup.__doc__ =\
"""Duplicate a region on memory.
ptr -- specifies the pointer to the region.
size -- specifies the size of the region.
The return value is the pointer to the allocated region of the
duplicate.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the free' call when it is no
longer in use.
"""
tcstrdup = cfunc('tcstrdup', libtc, c_char_p,
('str', c_char_p, 1))
tcstrdup.__doc__ =\
"""Duplicate a string on memory.
str -- specifies the string.
The return value is the allocated string equivalent to the specified
string.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
tcfree = cfunc('tcfree', libtc, None,
('ptr', c_void_p, 1))
tcfree.__doc__ =\
"""Free a region on memory.
ptr -- specifies the pointer to the region. If it is 'NULL', this
function has no effect.
Although this function is just a wrapper of 'free' call, this is
useful in applications using another package of the 'malloc' series.
"""
# basic utilities (for experts)
TCCMP = CFUNCTYPE(c_int, c_char_p, c_int, c_char_p, c_int, c_void_p)
TCCMP.__doc__ =\
"""Type of the pointer to a comparison function.
aptr -- specifies the pointer to the region of one key.
asiz -- specifies the size of the region of one key.
bptr -- specifies the pointer to the region of the other key.
bsiz -- specifies the size of the region of the other key.
op -- specifies the pointer to the optional opaque object.
The return value is positive if the former is big, negative if the
latter is big, 0 if both are equivalent.
"""
TCCMP_P = POINTER(TCCMP)
TCCODEC = CFUNCTYPE(c_void_p, c_void_p, c_int, c_int_p, c_void_p)
TCCODEC.__doc__ =\
"""Type of the pointer to a encoding or decoding function.
ptr -- specifies the pointer to the region.
size -- specifies the size of the region.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
op -- specifies the pointer to the optional opaque object.
If successful, the return value is the pointer to the result object
allocated with 'malloc' call, else, it is 'NULL'.
"""
TCCODEC_P = POINTER(TCCODEC)
TCPDPROC = CFUNCTYPE(c_void_p, c_void_p, c_int, c_int_p, c_void_p)
TCPDPROC.__doc__ =\
"""Type of the pointer to a callback function to process record
duplication.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
op -- specifies the pointer to the optional opaque object.
The return value is the pointer to the result object allocated with
'malloc'. It is released by the caller. If it is 'NULL', the record
is not modified.
"""
TCPDPROC_P = POINTER(TCPDPROC)
TCITER = CFUNCTYPE(c_bool, c_void_p, c_int, c_void_p, c_int, c_void_p)
TCITER.__doc__ =\
"""Type of the pointer to a iterator function.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
op -- specifies the pointer to the optional opaque object.
The return value is true to continue iteration or false to stop
iteration.
"""
TCITER_P = POINTER(TCITER)
# extensible string
class TCXSTR_P(c_void_p):
"""Type of structure for an extensible string object."""
# We treat it as a opaque structure. We can use ctype.Structure
# if needed.
def __del__(self):
if self and libtc:
libtc.tcxstrdel(self)
tcxstrnew = cfunc('tcxstrnew', libtc, TCXSTR_P)
tcxstrnew.__doc__ =\
"""Create an extensible string object.
The return value is the new extensible string object.
"""
tcxstrnew2 = cfunc('tcxstrnew2', libtc, TCXSTR_P,
('str', c_char_p, 1))
tcxstrnew2.__doc__ =\
"""Create an extensible string object from a character string.
str -- specifies the string of the initial content.
The return value is the new extensible string object containing the
specified string.
"""
tcxstrnew3 = cfunc('tcxstrnew3', libtc, TCXSTR_P,
('asiz', c_int, 1))
tcxstrnew3.__doc__ =\
"""Create an extensible string object with the initial allocation
size.
asiz -- specifies the initial allocation size.
The return value is the new extensible string object.
"""
tcxstrdup = cfunc('tcxstrdup', libtc, TCXSTR_P,
('xstr', TCXSTR_P, 1))
tcxstrdup.__doc__ =\
"""Copy an extensible string object.
xstr -- specifies the extensible string object.
The return value is the new extensible string object equivalent to the
specified object.
"""
tcxstrdel = cfunc('tcxstrdel', libtc, None,
('xstr', TCXSTR_P, 1))
tcxstrdel.__doc__ =\
"""Delete an extensible string object.
xstr -- specifies the extensible string object.
Note that the deleted object and its derivatives can not be used
anymore.
"""
tcxstrcat = cfunc('tcxstrcat', libtc, None,
('xstr', TCXSTR_P, 1),
('ptr', c_void_p, 1),
('size', c_int, 1))
tcxstrcat.__doc__ =\
"""Concatenate a region to the end of an extensible string object.
xstr -- specifies the extensible string object.
ptr -- specifies the pointer to the region to be appended.
size -- specifies the size of the region.
"""
tcxstrcat2 = cfunc('tcxstrcat2', libtc, None,
('xstr', TCXSTR_P, 1),
('str', c_char_p, 1))
tcxstrcat2.__doc__ =\
"""Concatenate a character string to the end of an extensible string
object.
xstr -- specifies the extensible string object.
str -- specifies the string to be appended.
"""
tcxstrptr = cfunc('tcxstrptr', libtc, c_void_p,
('xstr', TCXSTR_P, 1))
tcxstrptr.__doc__ =\
"""Get the pointer of the region of an extensible string object.
xstr -- specifies the extensible string object.
The return value is the pointer of the region of the object.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string.
"""
tcxstrsize = cfunc('tcxstrsize', libtc, c_int,
('xstr', TCXSTR_P, 1))
tcxstrsize.__doc__ =\
"""Get the size of the region of an extensible string object.
xstr -- specifies the extensible string object.
The return value is the size of the region of the object.
"""
tcxstrclear = cfunc('tcxstrclear', libtc, None,
('xstr', TCXSTR_P, 1))
tcxstrclear.__doc__ =\
"""Clear an extensible string object.
xstr -- specifies the extensible string object.
The internal buffer of the object is cleared and the size is set zero.
"""
# extensible string (for experts)
tcxstrtomalloc = cfunc('tcxstrtomalloc', libtc, tc_void_p,
('xstr', TCXSTR_P, 1))
tcxstrtomalloc.__doc__ =\
"""Convert an extensible string object into a usual allocated region.
xstr -- specifies the extensible string object.
The return value is the pointer to the allocated region of the object.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use. Because the region of the original object is
deleted, it should not be deleted again.
"""
tcxstrfrommalloc = cfunc('tcxstrfrommalloc', libtc, TCXSTR_P,
('ptr', c_void_p, 1),
('size', c_int, 1))
tcxstrfrommalloc.__doc__ =\
"""Create an extensible string object from an allocated region.
ptr -- specifies the pointer to the region allocated with 'malloc'
call.
size -- specifies the size of the region.
The return value is the new extensible string object wrapping the
specified region.
Note that the specified region is released when the object is deleted.
"""
# array list
class TCLIST_P(c_void_p):
"""Type of structure for an array list."""
# We treat it as a opaque structure. We can use ctype.Structure
# if needed.
def __del__(self):
if self and libtc:
libtc.tclistdel(self)
tclistnew = cfunc('tclistnew', libtc, TCLIST_P)
tclistnew.__doc__ =\
"""Create a list object.
The return value is the new list object.
"""
tclistnew2 = cfunc('tclistnew2', libtc, TCLIST_P,
('anum', c_int, 1))
tclistnew2.__doc__ =\
"""Create a list object with expecting the number of elements.
anum -- specifies the number of elements expected to be stored in the
list.
The return value is the new list object.
"""
tclistnew3 = cfunc('tclistnew3', libtc, TCLIST_P,
('str', c_char_p, 1))
tclistnew3.__doc__ =\
"""Create a list object with initial string elements.
str -- specifies the string of the first element.
The other arguments are other elements. They should be trailed by a
'NULL' argument.
The return value is the new list object.
"""
tclistdup = cfunc('tclistdup', libtc, TCLIST_P,
('list', TCLIST_P, 1))
tclistdup.__doc__ =\
"""Copy a list object.
list -- specifies the list object.
The return value is the new list object equivalent to the specified
object.
"""
tclistdel = cfunc('tclistdel', libtc, None,
('list', TCLIST_P, 1))
tclistdel.__doc__ =\
"""Delete a list object.
list -- specifies the list object.
Note that the deleted object and its derivatives can not be used
anymore.
"""
tclistnum = cfunc('tclistnum', libtc, c_int,
('list', TCLIST_P, 1))
tclistnum.__doc__ =\
"""Get the number of elements of a list object.
list -- specifies the list object.
The return value is the number of elements of the list.
"""
tclistval = cfunc('tclistval', libtc, c_void_p,
('list', TCLIST_P, 1),
('index', c_int, 1),
('sp', c_int_p, 2))
tclistval.errcheck = lambda result, func, arguments : (result, arguments[2])
tclistval.__doc__ =\
"""Get the pointer to the region of an element of a list object.
list -- specifies the list object.
index -- specifies the index of the element.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
The return value is the pointer to the region of the value.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. If 'index' is equal to or more than the number of elements,
the return value is 'NULL'.
"""
tclistval2 = cfunc('tclistval2', libtc, c_char_p,
('list', TCLIST_P, 1),
('index', c_int, 1))
tclistval2.__doc__ =\
"""Get the string of an element of a list object.
list -- specifies the list object.
index -- specifies the index of the element.
The return value is the string of the value.
If 'index' is equal to or more than the number of elements, the return
value is 'NULL'.
"""
tclistpush = cfunc('tclistpush', libtc, None,
('list', TCLIST_P, 1),
('ptr', c_void_p, 1),
('size', c_int, 1))
tclistpush.__doc__ =\
"""Add an element at the end of a list object.
list -- specifies the list object.
ptr -- specifies the pointer to the region of the new element.
size -- specifies the size of the region.
"""
tclistpush2 = cfunc('tclistpush2', libtc, None,
('list', TCLIST_P, 1),
('str', c_char_p, 1))
tclistpush2.__doc__ =\
"""Add a string element at the end of a list object.
list -- specifies the list object.
str -- specifies the string of the new element.
"""
tclistpop = cfunc('tclistpop', libtc, tc_void_p,
('list', TCLIST_P, 1),
('sp', c_int_p, 2))
tclistpop.errcheck = lambda result, func, arguments : (result, arguments[1])
tclistpop.__doc__ =\
"""Remove an element of the end of a list object.
list -- specifies the list object.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
The return value is the pointer to the region of the removed element.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use. If the list is empty, the return value is 'NULL'.
"""
tclistpop2 = cfunc('tclistpop2', libtc, tc_char_p,
('list', TCLIST_P, 1))
tclistpop2.__doc__ =\
"""Remove a string element of the end of a list object.
list -- specifies the list object.
The return value is the string of the removed element.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use. If the list is empty, the return value is 'NULL'.
"""
tclistunshift = cfunc('tclistunshift', libtc, None,
('list', TCLIST_P, 1),
('ptr', c_void_p, 1),
('size', c_int, 1))
tclistunshift.__doc__ =\
"""Add an element at the top of a list object.
list -- specifies the list object.
ptr -- specifies the pointer to the region of the new element.
size -- specifies the size of the region.
"""
tclistunshift2 = cfunc('tclistunshift2', libtc, None,
('list', TCLIST_P, 1),
('str', c_char_p, 1))
tclistunshift2.__doc__ =\
"""Add a string element at the top of a list object.
list -- specifies the list object.
str -- specifies the string of the new element.
"""
tclistshift = cfunc('tclistshift', libtc, tc_void_p,
('list', TCLIST_P, 1),
('sp', c_int_p, 2))
tclistshift.errcheck = lambda result, func, arguments : (result, arguments[1])
tclistshift.__doc__ =\
"""Remove an element of the top of a list object.
list -- specifies the list object.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
The return value is the pointer to the region of the removed element.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use. If the list is empty, the return value is 'NULL'.
"""
tclistshift2 = cfunc('tclistshift2', libtc, tc_char_p,
('list', TCLIST_P, 1))
tclistshift2.__doc__ =\
"""Remove a string element of the top of a list object.
list -- specifies the list object.
The return value is the string of the removed element.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use. If the list is empty, the return value is 'NULL'.
"""
tclistinsert = cfunc('tclistinsert', libtc, None,
('list', TCLIST_P, 1),
('index', c_int, 1),
('ptr', c_void_p, 1),
('size', c_int, 1))
tclistinsert.__doc__ =\
"""Add an element at the specified location of a list object.
list -- specifies the list object.
index -- specifies the index of the new element.
ptr -- specifies the pointer to the region of the new element.
size -- specifies the size of the region.
If 'index' is equal to or more than the number of elements, this
function has no effect.
"""
tclistinsert2 = cfunc('tclistinsert2', libtc, None,
('list', TCLIST_P, 1),
('index', c_int, 1),
('str', c_char_p, 1))
tclistinsert2.__doc__ =\
"""Add a string element at the specified location of a list object.
list -- specifies the list object.
index -- specifies the index of the new element.
str -- specifies the string of the new element.
If 'index' is equal to or more than the number of elements, this
function has no effect.
"""
tclistremove = cfunc('tclistremove', libtc, tc_void_p,
('list', TCLIST_P, 1),
('index', c_int, 1),
('sp', c_int_p, 2))
tclistremove.errcheck = lambda result, func, arguments : (result, arguments[2])
tclistremove.__doc__ =\
"""Remove an element at the specified location of a list object.
list -- specifies the list object.
index -- specifies the index of the element to be removed.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
The return value is the pointer to the region of the removed element.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use. If `index' is equal to or more than the number of
elements, no element is removed and the return value is 'NULL'.
"""
tclistremove2 = cfunc('tclistremove2', libtc, tc_char_p,
('list', TCLIST_P, 1),
('index', c_int, 1))
tclistremove2.__doc__ =\
"""Remove a string element at the specified location of a list object.
list -- specifies the list object.
index -- specifies the index of the element to be removed.
The return value is the string of the removed element.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use. If 'index' is equal to or more than the number of elements,
no element is removed and the return value is 'NULL'.
"""
tclistover = cfunc('tclistover', libtc, None,
('list', TCLIST_P, 1),
('index', c_int, 1),
('ptr', c_void_p, 1),
('size', c_int, 1))
tclistover.__doc__ =\
"""Overwrite an element at the specified location of a list object.
list -- specifies the list object.
index -- specifies the index of the element to be overwritten.
ptr -- specifies the pointer to the region of the new content.
size -- specifies the size of the new content.
If 'index' is equal to or more than the number of elements, this
function has no effect.
"""
tclistover2 = cfunc('tclistover2', libtc, None,
('list', TCLIST_P, 1),
('index', c_int, 1),
('str', c_char_p, 1))
tclistover2.__doc__ =\
"""Overwrite a string element at the specified location of a list
object.
list -- specifies the list object.
index -- specifies the index of the element to be overwritten.
str -- specifies the string of the new content.
If 'index' is equal to or more than the number of elements, this
function has no effect.
"""
tclistsort = cfunc('tclistsort', libtc, None,
('list', TCLIST_P, 1))
tclistsort.__doc__ =\
"""Sort elements of a list object in lexical order.
list -- specifies the list object.
"""
tclistlsearch = cfunc('tclistlsearch', libtc, c_int,
('list', TCLIST_P, 1),
('ptr', c_void_p, 1),
('size', c_int, 1))
tclistlsearch.__doc__ =\
"""Search a list object for an element using liner search.
list -- specifies the list object.
ptr -- specifies the pointer to the region of the key.
size -- specifies the size of the region.
The return value is the index of a corresponding element or -1 if
there is no corresponding element.
If two or more elements correspond, the former returns.
"""
tclistbsearch = cfunc('tclistbsearch', libtc, c_int,
('list', TCLIST_P, 1),
('ptr', c_void_p, 1),
('size', c_int, 1))
tclistbsearch.__doc__ =\
"""Search a list object for an element using binary search.
list -- specifies the list object. It should be sorted in lexical
order.
ptr -- specifies the pointer to the region of the key.
size -- specifies the size of the region.
The return value is the index of a corresponding element or -1 if
there is no corresponding element.
If two or more elements correspond, which returns is not defined.
"""
tclistclear = cfunc('tclistclear', libtc, None,
('list', TCLIST_P, 1))
tclistclear.__doc__ =\
"""Clear a list object.
list -- specifies the list object.
All elements are removed.
"""
tclistdump = cfunc('tclistdump', libtc, tc_void_p,
('list', TCLIST_P, 1),
('sp', c_int_p, 2))
tclistdump.errcheck = lambda result, func, arguments : (result, arguments[1])
tclistdump.__doc__ =\
"""Serialize a list object into a byte array.
list -- specifies the list object.
spcc -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
The return value is the pointer to the region of the result serial
region.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
tclistload = cfunc('tclistload', libtc, TCLIST_P,
('ptr', c_void_p, 1),
('size', c_int, 1))
tclistload.__doc__ =\
"""Create a list object from a serialized byte array.
ptr -- specifies the pointer to the region of serialized byte array.
size -- specifies the size of the region.
The return value is a new list object.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use.
"""
# array list (for experts)
tclistpushmalloc = cfunc('tclistpushmalloc', libtc, None,
('list', TCLIST_P, 1),
('ptr', c_void_p, 1),
('size', c_int, 1))
tclistpushmalloc.__doc__ =\
"""Add an allocated element at the end of a list object.
list -- specifies the list object.
ptr -- specifies the pointer to the region allocated with 'malloc'
call.
size -- specifies the size of the region.
Note that the specified region is released when the object is deleted.
"""
tclistsortci = cfunc('tclistsortci', libtc, None,
('list', TCLIST_P, 1))
tclistsortci.__doc__ =\
"""Sort elements of a list object in case-insensitive lexical order.
list -- specifies the list object.
"""
# NOT-TO-BE-IMPLEMENTED: use TCLISTDATUM as 'cmp' param
# tclistsortex = cfunc('tclistsortex', libtc, None,
# ('list', TCLIST_P, 1),
# ('cmp', <to-be-defined>, 1))
# tclistsortex.__doc__ =\
# """Sort elements of a list object by an arbitrary comparison function.
# list -- specifies the list object.
# cmp -- specifies the pointer to the comparison function. The
# structure TCLISTDATUM has the member "ptr" which is the
# pointer to the region of the element, and the member "size"
# which is the size of the region.
# """
tclistinvert = cfunc('tclistinvert', libtc, None,
('list', TCLIST_P, 1))
tclistinvert.__doc__ =\
"""Invert elements of a list object.
list -- specifies the list object.
"""
tclistprintf = cfunc_va('tclistprintf', libtc, None,
('list', TCLIST_P, 1),
('format', c_char_p, 1))
tclistprintf.__doc__ =\
"""Perform formatted output into a list object.
list -- specifies the list object.
format -- specifies the printf-like format string. The conversion
character '%' can be used with such flag characters as 's',
'd', 'o', 'u', 'x', 'X', 'c', 'e', 'E', 'f', 'g', 'G', '@',
'?', 'b', and '%'. '@' works as with 's' but escapes meta
characters of XML. '?' works as with 's' but escapes meta
characters of URL. 'b' converts an integer to the string as
binary numbers. The other conversion character work as with
each original.
The other arguments are used according to the format string.
"""
# hash map
class TCMAP_P(c_void_p):
"""Type of structure for a map."""
# We treat it as a opaque structure. We can use ctype.Structure
# if needed.
def __del__(self):
if self and libtc:
libtc.tcmapdel(self)
tcmapnew = cfunc('tcmapnew', libtc, TCMAP_P)
tcmapnew.__doc__ =\
"""Create a map object.
The return value is the new map object.
"""
tcmapnew2 = cfunc('tcmapnew2', libtc, TCMAP_P,
('bnum', c_uint32, 1))
tcmapnew2.__doc__ =\
"""Create a map object with specifying the number of the buckets.
bnum -- specifies the number of the buckets.
The return value is the new map object.
"""
tcmapnew3 = cfunc('tcmapnew3', libtc, TCMAP_P,
('str', c_char_p, 1))
tcmapnew3.__doc__ =\
"""Create a map object with initial string elements.
str -- specifies the string of the first element.
The other arguments are other elements. They should be trailed by a
'NULL' argument.
The return value is the new map object.
The key and the value of each record are situated one after the other.
"""
tcmapdup = cfunc('tcmapdup', libtc, TCMAP_P,
('map', TCMAP_P, 1))
tcmapdup.__doc__ =\
"""Copy a map object.
map -- specifies the map object.
The return value is the new map object equivalent to the specified
object.
"""
tcmapdel = cfunc('tcmapdel', libtc, None,
('map', TCMAP_P, 1))
tcmapdel.__doc__ =\
"""Delete a map object.
map -- specifies the map object.
Note that the deleted object and its derivatives can not be used
anymore.
"""
tcmapput = cfunc('tcmapput', libtc, None,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
tcmapput.__doc__ =\
"""Store a record into a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If a record with the same key exists in the map, it is overwritten.
"""
tcmapput2 = cfunc('tcmapput2', libtc, None,
('map', TCMAP_P, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
tcmapput2.__doc__ =\
"""Store a string record into a map object.
map -- specifies the map object.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If a record with the same key exists in the map, it is overwritten.
"""
tcmapputkeep = cfunc('tcmapputkeep', libtc, c_bool,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
tcmapputkeep.__doc__ =\
"""Store a new record into a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the map, this function has no effect.
"""
tcmapputkeep2 = cfunc('tcmapputkeep2', libtc, c_bool,
('map', TCMAP_P, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
tcmapputkeep2.__doc__ =\
"""Store a new string record into a map object.
map -- specifies the map object.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the map, this function has no
effect.
"""
tcmapputcat = cfunc('tcmapputcat', libtc, None,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
tcmapputcat.__doc__ =\
"""Concatenate a value at the end of the value of the existing record
in a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If there is no corresponding record, a new record is created.
"""
tcmapputcat2 = cfunc('tcmapputcat2', libtc, None,
('map', TCMAP_P, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
tcmapputcat2.__doc__ =\
"""Concatenate a string value at the end of the value of the existing
record in a map object.
map -- specifies the map object.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If there is no corresponding record, a new record is created.
"""
tcmapout = cfunc('tcmapout', libtc, c_bool,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
tcmapout.__doc__ =\
"""Remove a record of a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is true. False is returned when no
record corresponds to the specified key.
"""
tcmapout2 = cfunc('tcmapout2', libtc, c_bool,
('map', TCMAP_P, 1),
('kstr', c_char_p, 1))
tcmapout2.__doc__ =\
"""Remove a string record of a map object.
map -- specifies the map object.
kstr -- specifies the string of the key.
If successful, the return value is true. False is returned when no
record corresponds to the specified key.
"""
tcmapget = cfunc('tcmapget', libtc, c_void_p,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('sp', c_int_p, 2))
tcmapget.errcheck = lambda result, func, arguments: (result, arguments[3])
tcmapget.__doc__ =\
"""Retrieve a record in a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value of the corresponding record. 'NULL' is returned when no record
corresponds.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string.
"""
tcmapget2 = cfunc('tcmapget2', libtc, c_char_p,
('map', TCMAP_P, 1),
('kstr', c_char_p, 1))
tcmapget2.__doc__ =\
"""Retrieve a string record in a map object.
map -- specifies the map object.
kstr' specifies the string of the key. If successful, the return
value is the string of the value of the corresponding record. 'NULL'
is returned when no record corresponds.
"""
tcmapmove = cfunc('tcmapmove', libtc, c_bool,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('head', c_bool, 1))
tcmapmove.__doc__ =\
"""Move a record to the edge of a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of a key.
ksiz -- specifies the size of the region of the key.
head -- specifies the destination which is the head if it is true or
the tail if else.
If successful, the return value is true. False is returned when no
record corresponds to the specified key.
"""
tcmapmove2 = cfunc('tcmapmove2', libtc, c_bool,
('map', TCMAP_P, 1),
('kstr', c_char_p, 1),
('head', c_bool, 1))
tcmapmove2.__doc__ =\
"""Move a string record to the edge of a map object.
map -- specifies the map object.
kstr -- specifies the string of a key.
head -- specifies the destination which is the head if it is true or
the tail if else.
If successful, the return value is true. False is returned when no
record corresponds to the specified key.
"""
tcmapiterinit = cfunc('tcmapiterinit', libtc, None,
('map', TCMAP_P, 1))
tcmapiterinit.__doc__ =\
"""Initialize the iterator of a map object.
map -- specifies the map object.
The iterator is used in order to access the key of every record stored
in the map object.
"""
tcmapiternext = cfunc('tcmapiternext', libtc, c_void_p,
('map', TCMAP_P, 1),
('sp', c_int_p, 2))
tcmapiternext.errcheck = lambda result, func, arguments : (result, arguments[1])
tcmapiternext.__doc__ =\
"""Get the next key of the iterator of a map object.
map -- specifies the map object.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
next key, else, it is 'NULL'. 'NULL' is returned when no record can
be fetched from the iterator.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string.
The order of iteration is assured to be the same as the stored order.
"""
tcmapiternext2 = cfunc('tcmapiternext2', libtc, c_char_p,
('map', TCMAP_P, 1))
tcmapiternext2.__doc__ =\
"""Get the next key string of the iterator of a map object.
map -- specifies the map object.
If successful, the return value is the pointer to the region of the
next key, else, it is 'NULL'. 'NULL' is returned when no record can
be fetched from the iterator.
The order of iteration is assured to be the same as the stored order.
"""
tcmaprnum = cfunc('tcmaprnum', libtc, c_uint64,
('map', TCMAP_P, 1))
tcmaprnum.__doc__ =\
"""Get the number of records stored in a map object.
map -- specifies the map object.
The return value is the number of the records stored in the map
object.
"""
tcmapmsiz = cfunc('tcmapmsiz', libtc, c_uint64,
('map', TCMAP_P, 1))
tcmapmsiz.__doc__ =\
"""Get the total size of memory used in a map object.
map -- specifies the map object.
The return value is the total size of memory used in a map object.
"""
tcmapkeys = cfunc('tcmapkeys', libtc, TCLIST_P,
('map', TCMAP_P, 1))
tcmapkeys.__doc__ =\
"""Create a list object containing all keys in a map object.
map -- specifies the map object.
The return value is the new list object containing all keys in the map
object.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use.
"""
tcmapvals = cfunc('tcmapvals', libtc, TCLIST_P,
('map', TCMAP_P, 1))
tcmapvals.__doc__ =\
"""Create a list object containing all values in a map object.
map -- specifies the map object.
The return value is the new list object containing all values in the
map object.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use.
"""
tcmapaddint = cfunc('tcmapaddint', libtc, c_int,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('num', c_int, 1))
tcmapaddint.__doc__ =\
"""Add an integer to a record in a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
num -- specifies the additional value.
The return value is the summation value.
If the corresponding record exists, the value is treated as an integer
and is added to. If no record corresponds, a new record of the
additional value is stored.
"""
tcmapadddouble = cfunc('tcmapadddouble', libtc, c_double,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('num', c_int, 1))
tcmapadddouble.__doc__ =\
"""Add a real number to a record in a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
num -- specifies the additional value.
The return value is the summation value.
If the corresponding record exists, the value is treated as a real
number and is added to. If no record corresponds, a new record of the
additional value is stored.
"""
tcmapclear = cfunc('tcmapclear', libtc, None,
('map', TCMAP_P, 1))
tcmapclear.__doc__ =\
"""Clear a map object.
map -- specifies the map object.
All records are removed.
"""
tcmapcutfront = cfunc('tcmapcutfront', libtc, None,
('map', TCMAP_P, 1),
('num', c_int, 1))
tcmapcutfront.__doc__ =\
"""Remove front records of a map object.
map -- specifies the map object.
num -- specifies the number of records to be removed.
"""
tcmapdump = cfunc('tcmapdump', libtc, tc_void_p,
('map', TCMAP_P, 1),
('sp', c_int_p, 2))
tcmapdump.errcheck = lambda result, func, arguments : (result, arguments[1])
tcmapdump.__doc__ =\
"""Serialize a map object into a byte array.
map -- specifies the map object.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
The return value is the pointer to the region of the result serial
region.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
tcmapload = cfunc('tcmapload', libtc, TCMAP_P,
('ptr', c_void_p, 1),
('size', c_int, 1))
tcmapload.__doc__ =\
"""Create a map object from a serialized byte array.
ptr -- specifies the pointer to the region of serialized byte array.
size -- specifies the size of the region.
The return value is a new map object.
Because the object of the return value is created with the function
'tcmapnew', it should be deleted with the function 'tcmapdel' when it
is no longer in use.
"""
# hash map (for experts)
tcmapput3 = cfunc('tcmapput3', libtc, None,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
tcmapput3.__doc__ =\
"""Store a record and make it semivolatile in a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If a record with the same key exists in the map, it is overwritten.
The record is moved to the tail.
"""
tcmapput4 = cfunc('tcmapput4', libtc, None,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('fvbuf', c_void_p, 1),
('fvsiz', c_int, 1),
('lvbuf', c_void_p, 1),
('lvsiz', c_int, 1))
tcmapput4.__doc__ =\
"""Store a record of the value of two regions into a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
fvbuf -- specifies the pointer to the former region of the value.
fvsiz -- specifies the size of the former region of the value.
lvbuf -- specifies the pointer to the latter region of the value.
lvsiz -- specifies the size of the latter region of the value.
If a record with the same key exists in the map, it is overwritten.
"""
tcmapputcat3 = cfunc('tcmapputcat3', libtc, None,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
tcmapputcat3.__doc__ =\
"""Concatenate a value at the existing record and make it semivolatile
in a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If there is no corresponding record, a new record is created.
"""
tcmapputproc = cfunc('tcmapputproc', libtc, c_bool,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1),
('proc', TCPDPROC, 1),
('op', c_void_p, 1))
tcmapputproc.__doc__ =\
"""Store a record into a map object with a duplication handler.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value. 'NULL'
means that record addition is ommited if there is no
corresponding record.
vsiz -- specifies the size of the region of the value.
proc -- specifies the pointer to the callback function to process
duplication. It receives four parameters. The first
parameter is the pointer to the region of the value. The
second parameter is the size of the region of the value. The
third parameter is the pointer to the variable into which the
size of the region of the return value is assigned. The
fourth parameter is the pointer to the optional opaque object.
It returns the pointer to the result object allocated with
'malloc'. It is released by the caller. If it is 'NULL', the
record is not modified. If it is '(void *)-1', the record is
removed.
op -- specifies an arbitrary pointer to be given as a parameter of
the callback function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
"""
tcmapget3 = cfunc('tcmapget3', libtc, c_void_p,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('sp', c_int_p, 2))
tcmapget3.errcheck = lambda result, func, arguments: (result, arguments[3])
tcmapget3.__doc__ =\
"""Retrieve a semivolatile record in a map object.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value of the corresponding record. 'NULL' is returned when no record
corresponds.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. The internal region of the returned record is moved to the
tail so that the record will survive for a time under LRU cache
algorithm removing records from the head.
"""
tcmapget4 = cfunc('tcmapget4', libtc, c_char_p,
('map', TCMAP_P, 1),
('kstr', c_char_p, 1),
('dstr', c_char_p, 1))
tcmapget4.__doc__ =\
"""Retrieve a string record in a map object with specifying the
default value string.
map -- specifies the map object.
kstr -- specifies the string of the key.
dstr -- specifies the string of the default value.
The return value is the string of the value of the corresponding
record or the default value string.
"""
tcmapiterinit2 = cfunc('tcmapiterinit2', libtc, None,
('map', TCMAP_P, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
tcmapiterinit2.__doc__ =\
"""Initialize the iterator of a map object at the record corresponding
a key.
map -- specifies the map object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If there is no record corresponding the condition, the iterator is not
modified.
"""
tcmapiterinit3 = cfunc('tcmapiterinit3', libtc, None,
('map', TCMAP_P, 1),
('kstr', c_char_p, 1))
tcmapiterinit3.__doc__ =\
"""Initialize the iterator of a map object at the record corresponding
a key string.
map -- specifies the map object.
kstr -- specifies the string of the key.
If there is no record corresponding the condition, the iterator is not
modified.
"""
tcmapiterval = cfunc('tcmapiterval', libtc, c_void_p,
('kbuf', c_void_p, 1),
('sp', c_int_p, 2))
tcmapiterval.errcheck = lambda result, func, arguments : (result, arguments[1])
tcmapiterval.__doc__ =\
"""Get the value bound to the key fetched from the iterator of a map object.
kbuf -- specifies the pointer to the region of the iteration key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
The return value is the pointer to the region of the value of the
corresponding record.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string.
"""
tcmapiterval2 = cfunc('tcmapiterval2', libtc, c_char_p,
('kstr', c_char_p, 1))
tcmapiterval2.__doc__ =\
"""Get the value string bound to the key fetched from the iterator of
a map object.
kstr -- specifies the string of the iteration key.
The return value is the pointer to the region of the value of the
corresponding record.
"""
tcmapkeys2 = cfunc('tcmapkeys2', libtc, tc_void_p,
('map', TCMAP_P, 1),
('np', c_int_p, 2))
tcmapkeys2.errcheck = lambda result, func, arguments :\
py_list(result, arguments[1], c_char_p)
tcmapkeys2.__doc__ =\
"""Create an array of strings of all keys in a map object.
map -- specifies the map object.
np -- specifies the pointer to a variable into which the number of
elements of the return value is assigned.
The return value is the pointer to the array of all string keys in the
map object.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call if when is no longer
in use. Note that elements of the array point to the inner objects,
whose life duration is synchronous with the map object.
"""
tcmapvals2 = cfunc('tcmapvals2', libtc, tc_void_p,
('map', TCMAP_P, 1),
('np', c_int_p, 2))
tcmapvals2.errcheck = lambda result, func, arguments :\
py_list(result, arguments[1], c_char_p)
tcmapvals2.__doc__ =\
"""Create an array of strings of all values in a map object.
map -- specifies the map object.
np -- specifies the pointer to a variable into which the number of
elements of the return value is assigned.
The return value is the pointer to the array of all string values in
the map object.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call if when is no longer
in use. Note that elements of the array point to the inner objects,
whose life duration is synchronous with the map object.
"""
tcmaploadone = cfunc('tcmaploadone', libtc, c_void_p,
('ptr', c_void_p, 1),
('size', c_int, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('sp', c_int_p, 2))
tcmaploadone.errcheck = lambda result, func, arguments : (result, arguments[4])
tcmaploadone.__doc__ =\
"""Extract a map record from a serialized byte array.
ptr -- specifies the pointer to the region of serialized byte array.
size -- specifies the size of the region.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value of the corresponding record. 'NULL' is returned when no record
corresponds.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string.
"""
tcmapprintf = cfunc_va('tcmapprintf', libtc, None,
('map', TCMAP_P, 1),
('format', c_char_p, 1))
tcmapprintf.__doc__ =\
"""Perform formatted output into a map object.
map -- specifies the map object.
kstr -- specifies the string of the key.
format -- specifies the printf-like format string. The conversion
character '%' can be used with such flag characters as 's',
'd', 'o', 'u', 'x', 'X', 'c', 'e', 'E', 'f', 'g', 'G', '@',
'?', 'b', and '%'. '@' works as with 's' but escapes meta
characters of XML. '?' works as with 's' but escapes meta
characters of URL. 'b' converts an integer to the string as
binary numbers. The other conversion character work as with
each original.
The other arguments are used according to the format string.
"""
# enumeration for database type
THASH = 0 # hash table
TBTREE = 1 # B+ tree
TFIXED = 2 # fixed-length
TTABLE = 3 # table
tccmplexical = cfunc('tccmplexical', libtc, c_int,
('aptr', c_char_p, 1),
('asiz', c_int, 1),
('bptr', c_char_p, 1),
('bsiz', c_int, 1),
('op', c_void_p, 1))
tccmplexical.__doc__ =\
"""Compare two keys by lexical order.
aptr -- specifies the pointer to the region of one key.
asiz -- specifies the size of the region of one key.
bptr -- specifies the pointer to the region of the other key.
bsiz -- specifies the size of the region of the other key.
op -- is ignored.
The return value is positive if the former is big, negative if the
latter is big, 0 if both are equivalent.
"""
tccmpdecimal = cfunc('tccmpdecimal', libtc, c_int,
('aptr', c_char_p, 1),
('asiz', c_int, 1),
('bptr', c_char_p, 1),
('bsiz', c_int, 1),
('op', c_void_p, 1))
tccmpdecimal.__doc__ =\
"""Compare two keys as decimal strings of real numbers.
aptr -- specifies the pointer to the region of one key.
asiz -- specifies the size of the region of one key.
bptr -- specifies the pointer to the region of the other key.
bsiz -- specifies the size of the region of the other key.
op -- is ignored.
The return value is positive if the former is big, negative if the
latter is big, 0 if both are equivalent.
"""
tccmpint32 = cfunc('tccmpint32', libtc, c_int,
('aptr', c_char_p, 1),
('asiz', c_int, 1),
('bptr', c_char_p, 1),
('bsiz', c_int, 1),
('op', c_void_p, 1))
tccmpint32.__doc__ =\
"""Compare two keys as 32-bit integers in the native byte order.
aptr -- specifies the pointer to the region of one key.
asiz -- specifies the size of the region of one key.
bptr -- specifies the pointer to the region of the other key.
bsiz -- specifies the size of the region of the other key.
op -- is ignored.
The return value is positive if the former is big, negative if the
latter is big, 0 if both are equivalent.
"""
tccmpint64 = cfunc('tccmpint64', libtc, c_int,
('aptr', c_char_p, 1),
('asiz', c_int, 1),
('bptr', c_char_p, 1),
('bsiz', c_int, 1),
('op', c_void_p, 1))
tccmpint64.__doc__ =\
"""Compare two keys as 64-bit integers in the native byte order.
aptr -- specifies the pointer to the region of one key.
asiz -- specifies the size of the region of one key.
bptr -- specifies the pointer to the region of the other key.
bsiz -- specifies the size of the region of the other key.
op -- is ignored.
The return value is positive if the former is big, negative if the
latter is big, 0 if both are equivalent.
"""
#
# Functions from tcadb.h
#
adb_new = cfunc('tcadbnew', libtc, c_void_p)
adb_new.__doc__ =\
"""Create an abstract database object.
The return value is the new abstract database object.
"""
adb_del = cfunc('tcadbdel', libtc, None,
('adb', c_void_p, 1))
adb_del.__doc__ =\
"""Delete an abstract database object.
adb -- specifies the abstract database object.
"""
adb_open = cfunc('tcadbopen', libtc, c_bool,
('adb', c_void_p, 1),
('name', c_char_p, 1))
adb_open.__doc__ =\
"""Open an abstract database.
adb -- specifies the abstract database object.
name -- specifies the name of the database. If it is "*", the
database will be an on-memory hash database. If it is "+",
the database will be an on-memory tree database. If its
suffix is ".tch", the database will be a hash database. If
its suffix is ".tcb", the database will be a B+ tree database.
If its suffix is ".tcf", the database will be a fixed-length
database. If its suffix is ".tct", the database will be a
table database. Otherwise, this function fails. Tuning
parameters can trail the name, separated by "#". Each
parameter is composed of the name and the value, separated by
"=". On-memory hash database supports "bnum", "capnum", and
"capsiz". On-memory tree database supports "capnum" and
"capsiz". Hash database supports "mode", "bnum", "apow",
"fpow", "opts", "rcnum", "xmsiz", and "dfunit". B+ tree
database supports "mode", "lmemb", "nmemb", "bnum", "apow",
"fpow", "opts", "lcnum", "ncnum", "xmsiz", and "dfunit".
Fixed-length database supports "mode", "width", and "limsiz".
Table database supports "mode", "bnum", "apow", "fpow",
"opts", "rcnum", "lcnum", "ncnum", "xmsiz", "dfunit", and
"idx".
If successful, the return value is true, else, it is false.
The tuning parameter "capnum" specifies the capacity number of
records. "capsiz" specifies the capacity size of using memory.
Records spilled the capacity are removed by the storing order. "mode"
can contain "w" of writer, "r" of reader, "c" of creating, "t" of
truncating, "e" of no locking, and "f" of non-blocking lock. The
default mode is relevant to "wc". "opts" can contains "l" of large
option, "d" of Deflate option, "b" of BZIP2 option, and "t" of TCBS
option. "idx" specifies the column name of an index and its type
separated by ":".
For example, "casket.tch#bnum=1000000#opts=ld" means that the name of
the database file is "casket.tch", and the bucket number is 1000000,
and the options are large and Deflate.
"""
adb_close = cfunc('tcadbclose', libtc, c_bool,
('adb', c_void_p, 1))
adb_close.__doc__ =\
"""Close an abstract database object.
adb -- specifies the abstract database object.
If successful, the return value is true, else, it is false.
Update of a database is assured to be written when the database is
closed. If a writer opens a database but does not close it
appropriately, the database will be broken.
"""
adb_put = cfunc('tcadbput', libtc, c_bool,
('adb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
adb_put.__doc__ =\
"""Store a record into an abstract database object.
adb -- specifies the abstract database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten.
"""
adb_put2 = cfunc_fast('tcadbput2', libtc, c_bool,
('adb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
adb_put2.__doc__ =\
"""Store a string record into an abstract object.
adb -- specifies the abstract database object.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten.
"""
adb_putkeep = cfunc('tcadbputkeep', libtc, c_bool,
('adb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
adb_putkeep.__doc__ =\
"""Store a new record into an abstract database object.
adb -- specifies the abstract database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
adb_putkeep2 = cfunc_fast('tcadbputkeep2', libtc, c_bool,
('adb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
adb_putkeep2.__doc__ =\
"""Store a new string record into an abstract database object.
adb -- specifies the abstract database object.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
adb_putcat = cfunc('tcadbputcat', libtc, c_bool,
('adb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
adb_putcat.__doc__ =\
"""Concatenate a value at the end of the existing record in an
abstract database object.
adb -- specifies the abstract database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
adb_putcat2 = cfunc_fast('tcadbputcat2', libtc, c_bool,
('adb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
adb_putcat2.__doc__ =\
"""Concatenate a string value at the end of the existing record in an
abstract database object.
adb -- specifies the abstract database object.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
adb_out = cfunc('tcadbout', libtc, c_bool,
('adb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
adb_out.__doc__ =\
"""Remove a record of an abstract database object.
adb -- specifies the abstract database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is true, else, it is false.
"""
adb_out2 = cfunc_fast('tcadbout2', libtc, c_bool,
('adb', c_void_p, 1),
('kstr', c_char_p, 1))
adb_out2.__doc__ =\
"""Remove a string record of an abstract database object.
adb -- specifies the abstract database object.
kstr -- specifies the string of the key.
If successful, the return value is true, else, it is false.
"""
adb_get = cfunc('tcadbget', libtc, tc_void_p,
('adb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('sp', c_int_p, 2))
adb_get.errcheck = lambda result, func, arguments: (result, arguments[3])
adb_get.__doc__ =\
"""Retrieve a record in an abstract database object.
adb -- specifies the abstract database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value of the corresponding record. 'NULL' is returned if no record
corresponds.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use.
"""
adb_get2 = cfunc_fast('tcadbget2', libtc, tc_char_p,
('adb', c_void_p, 1),
('kstr', c_char_p, 1))
adb_get2.__doc__ =\
"""Retrieve a string record in an abstract database object.
adb -- specifies the abstract database object.
kstr -- specifies the string of the key.
If successful, the return value is the string of the value of the
corresponding record. 'NULL' is returned if no record corresponds.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
adb_vsiz = cfunc('tcadbvsiz', libtc, c_int,
('adb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
adb_vsiz.__doc__ =\
"""Get the size of the value of a record in an abstract database
object.
adb -- specifies the abstract database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is the size of the value of the
corresponding record, else, it is -1.
"""
adb_vsiz2 = cfunc_fast('tcadbvsiz2', libtc, c_int,
('adb', c_void_p, 1),
('kstr', c_char_p, 1))
adb_vsiz2.__doc__ =\
"""Get the size of the value of a string record in an abstract
database object.
adb -- specifies the abstract database object.
kstr -- specifies the string of the key.
If successful, the return value is the size of the value of the
corresponding record, else, it is -1.
"""
adb_iterinit = cfunc('tcadbiterinit', libtc, c_bool,
('adb', c_void_p, 1))
adb_iterinit.__doc__ =\
"""Initialize the iterator of an abstract database object.
adb -- specifies the abstract database object.
If successful, the return value is true, else, it is false.
The iterator is used in order to access the key of every record stored
in a database.
"""
adb_iternext = cfunc('tcadbiternext', libtc, tc_void_p,
('adb', c_void_p, 1),
('sp', c_int_p, 2))
adb_iternext.errcheck = lambda result, func, arguments : (result, arguments[1])
adb_iternext.__doc__ =\
"""Get the next key of the iterator of an abstract database object.
adb -- specifies the abstract database object.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
next key, else, it is 'NULL'. 'NULL' is returned when no record is to
be get out of the iterator.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use. It is possible to access every record by iteration
of calling this function. It is allowed to update or remove records
whose keys are fetched while the iteration. However, it is not assured
if updating the database is occurred while the iteration. Besides,
the order of this traversal access method is arbitrary, so it is not
assured that the order of storing matches the one of the traversal
access.
"""
adb_iternext2 = cfunc_fast('tcadbiternext2', libtc, tc_char_p,
('adb', c_void_p, 1))
adb_iternext2.__doc__ =\
"""Get the next key string of the iterator of an abstract database
object.
adb -- specifies the abstract database object.
If successful, the return value is the string of the next key, else,
it is 'NULL'. 'NULL' is returned when no record is to be get out of
the iterator.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use. It is possible to access every record by iteration of calling
this function. However, it is not assured if updating the database is
occurred while the iteration. Besides, the order of this traversal
access method is arbitrary, so it is not assured that the order of
storing matches the one of the traversal access.
"""
adb_fwmkeys = cfunc('tcadbfwmkeys', libtc, TCLIST_P,
('adb', c_void_p, 1),
('pbuf', c_void_p, 1),
('psiz', c_int, 1),
('max', c_int, 1, -1))
adb_fwmkeys.__doc__ =\
"""Get forward matching keys in an abstract database object.
adb -- specifies the abstract database object.
pbuf -- specifies the pointer to the region of the prefix.
psiz -- specifies the size of the region of the prefix.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding keys. This
function does never fail. It returns an empty list even if no key
corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use. Note that this function may be very slow
because every key in the database is scanned.
"""
adb_fwmkeys2 = cfunc_fast('tcadbfwmkeys2', libtc, TCLIST_P,
('adb', c_void_p, 1),
('pstr', c_char_p, 1),
('max', c_int, 1, -1))
adb_fwmkeys2.__doc__ =\
"""Get forward matching string keys in an abstract database object.
adb -- specifies the abstract database object.
pstr -- specifies the string of the prefix.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding keys. This
function does never fail. It returns an empty list even if no key
corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use. Note that this function may be very slow
because every key in the database is scanned.
"""
adb_addint = cfunc('tcadbaddint', libtc, c_int,
('adb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('num', c_int, 1))
adb_addint.__doc__ =\
"""Add an integer to a record in an abstract database object.
adb -- specifies the abstract database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
num -- specifies the additional value.
If successful, the return value is the summation value, else, it is
'INT_MIN'.
If the corresponding record exists, the value is treated as an integer
and is added to. If no record corresponds, a new record of the
additional value is stored.
"""
adb_adddouble = cfunc('tcadbadddouble', libtc, c_double,
('adb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('num', c_double, 1))
adb_adddouble.__doc__ =\
"""Add a real number to a record in an abstract database object.
adb -- specifies the abstract database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
num -- specifies the additional value.
If successful, the return value is the summation value, else, it is
Not-a-Number.
If the corresponding record exists, the value is treated as a real
number and is added to. If no record corresponds, a new record of the
additional value is stored.
"""
adb_sync = cfunc('tcadbsync', libtc, c_bool,
('adb', c_void_p, 1))
adb_sync.__doc__ =\
"""Synchronize updated contents of an abstract database object with
the file and the device.
adb -- specifies the abstract database object.
If successful, the return value is true, else, it is false.
"""
adb_optimize = cfunc('tcadboptimize', libtc, c_bool,
('adb', c_void_p, 1),
('params', c_char_p, 1))
adb_optimize.__doc__ =\
"""Optimize the storage of an abstract database object.
adb -- specifies the abstract database object.
params -- specifies the string of the tuning parameters, which works
as with the tuning of parameters the function 'tcadbopen'.
If it is 'NULL', it is not used.
If successful, the return value is true, else, it is false.
This function is useful to reduce the size of the database storage
with data fragmentation by successive updating.
"""
adb_vanish = cfunc('tcadbvanish', libtc, c_bool,
('adb', c_void_p, 1))
adb_vanish.__doc__ =\
"""Remove all records of an abstract database object.
adb -- specifies the abstract database object.
If successful, the return value is true, else, it is false.
"""
adb_copy = cfunc('tcadbcopy', libtc, c_bool,
('adb', c_void_p, 1),
('path', c_char_p, 1))
adb_copy.__doc__ =\
"""Copy the database file of an abstract database object.
adb -- specifies the abstract database object.
path -- specifies the path of the destination file. If it begins with
'@', the trailing substring is executed as a command line.
If successful, the return value is true, else, it is false. False is
returned if the executed command returns non-zero code.
The database file is assured to be kept synchronized and not modified
while the copying or executing operation is in progress. So, this
function is useful to create a backup file of the database file.
"""
adb_tranbegin = cfunc('tcadbtranbegin', libtc, c_bool,
('adb', c_void_p, 1))
adb_tranbegin.__doc__ =\
"""Begin the transaction of an abstract database object.
adb -- specifies the abstract database object.
If successful, the return value is true, else, it is false.
The database is locked by the thread while the transaction so that
only one transaction can be activated with a database object at the
same time. Thus, the serializable isolation level is assumed if every
database operation is performed in the transaction. All updated
regions are kept track of by write ahead logging while the
transaction. If the database is closed during transaction, the
transaction is aborted implicitly.
"""
adb_trancommit = cfunc('tcadbtrancommit', libtc, c_bool,
('adb', c_void_p, 1))
adb_trancommit.__doc__ =\
"""Commit the transaction of an abstract database object.
adb -- specifies the abstract database object.
If successful, the return value is true, else, it is false.
Update in the transaction is fixed when it is committed successfully.
"""
adb_tranabort = cfunc('tcadbtranabort', libtc, c_bool,
('adb', c_void_p, 1))
adb_tranabort.__doc__ =\
"""Abort the transaction of an abstract database object.
adb -- specifies the abstract database object.
If successful, the return value is true, else, it is false.
Update in the transaction is discarded when it is aborted. The state
of the database is rollbacked to before transaction.
"""
adb_path = cfunc('tcadbpath', libtc, c_char_p,
('adb', c_void_p, 1))
adb_path.__doc__ =\
"""Get the file path of an abstract database object.
adb -- specifies the abstract database object.
The return value is the path of the database file or 'NULL' if the
object does not connect to any database. "*" stands for on-memory
hash database. "+" stands for on-memory tree database.
"""
adb_rnum = cfunc('tcadbrnum', libtc, c_uint64,
('adb', c_void_p, 1))
adb_rnum.__doc__ =\
"""Get the number of records of an abstract database object.
adb -- specifies the abstract database object.
The return value is the number of records or 0 if the object does not
connect to any database instance.
"""
adb_size = cfunc('tcadbsize', libtc, c_uint64,
('adb', c_void_p, 1))
adb_size.__doc__ =\
"""Get the size of the database of an abstract database object.
adb -- specifies the abstract database object.
The return value is the size of the database or 0 if the object does
not connect to any database instance.
"""
adb_misc = cfunc('tcadbmisc', libtc, TCLIST_P,
('adb', c_void_p, 1),
('name', c_char_p, 1),
('args', c_void_p, 1))
adb_misc.__doc__ =\
"""Call a versatile function for miscellaneous operations of an
abstract database object.
adb -- specifies the abstract database object.
name -- specifies the name of the function. All databases support
"put", "out", "get", "putlist", "outlist", "getlist", and
"getpart". "put" is to store a record. It receives a key and
a value, and returns an empty list. "out" is to remove a
record. It receives a key, and returns an empty list. "get"
is to retrieve a record. It receives a key, and returns a
list of the values. "putlist" is to store records. It
receives keys and values one after the other, and returns an
empty list. "outlist" is to remove records. It receives
keys, and returns an empty list. "getlist" is to retrieve
records. It receives keys, and returns keys and values of
corresponding records one after the other. "getpart" is to
retrieve the partial value of a record. It receives a key,
the offset of the region, and the length of the region.
args -- specifies a list object containing arguments.
If successful, the return value is a list object of the result.
'NULL' is returned on failure.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use.
"""
# features for experts
# NOT-TO-BE-IMPLEMENTED: very esoteric function
# adb_setskel = cfunc('tcadbsetskel', libtc, c_bool,
# ('adb', c_void_p, 1),
# ('skel', c_void_p, 1))
# adb_setskel.__doc__ =\
# """Set an extra database skeleton to an abstract database object.
# adb -- specifies the abstract database object.
# skel -- specifies the extra database skeleton.
# If successful, the return value is true, else, it is false.
# """
# adb_setskelmulti = cfunc('tcadbsetskelmulti', libtc, c_bool,
# ('adb', c_void_p, 1),
# ('num', c_int, 1))
# adb_setskelmulti.__doc__ =\
# """Set the multiple database skeleton to an abstract database object.
# adb -- specifies the abstract database object.
# num -- specifies the number of inner databases.
# If successful, the return value is true, else, it is false.
# """
adb_omode = cfunc('tcadbomode', libtc, c_int,
('adb', c_void_p, 1))
adb_omode.__doc__ =\
"""Get the open mode of an abstract database object.
adb -- specifies the abstract database object.
The return value is 'ADBOVOID' for not opened database, 'ADBOMDB' for
on-memory hash database, 'ADBONDB' for on-memory tree database,
'ADBOHDB' for hash database, 'ADBOBDB' for B+ tree database, 'ADBOFDB'
for fixed-length database, 'ADBOTDB' for table database.
"""
adb_reveal = cfunc('tcadbreveal', libtc, c_void_p,
('adb', c_void_p, 1))
adb_reveal.__doc__ =\
"""Get the concrete database object of an abstract database object.
adb -- specifies the abstract database object.
The return value is the concrete database object depend on the open
mode or 0 if the object does not connect to any database instance.
"""
adb_putproc = cfunc('tcadbputproc', libtc, c_bool,
('adb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1),
('proc', TCPDPROC, 1),
('op', c_void_p, 1))
adb_putproc.__doc__ =\
"""Store a record into an abstract database object with a duplication
handler.
adb -- specifies the abstract database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
proc -- specifies the pointer to the callback function to process
duplication.
op -- specifies an arbitrary pointer to be given as a parameter of
the callback function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
This function does not work for the table database.
"""
adb_foreach = cfunc('tcadbforeach', libtc, c_bool,
('adb', c_void_p, 1),
('iter', TCITER, 1),
('op', c_char_p, 1))
adb_foreach.__doc__ =\
"""Process each record atomically of an abstract database object.
adb -- specifies the abstract database object.
iter -- specifies the pointer to the iterator function called for each
record.
op -- specifies an arbitrary pointer to be given as a parameter of
the iterator function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
"""
# adb_mapbdb = cfunc('tcadbmapbdb', libtc, c_bool,
# ('adb', c_void_p, 1),
# ('keys', TCLIST_P, 1),
# ('bdb', c_void_p, 1),
# ('op', c_void_p, 1),
# ('csiz', c_int64, 1, -1))
# adb_mapbdb.__doc__ =\
# """Map records of an abstract database object into another B+ tree database.
# adb -- specifies the abstract database object.
# keys -- specifies a list object of the keys of the target records. If
# it is 'NULL', every record is processed.
# bdb -- specifies the B+ tree database object into which records
# emitted by the mapping function are stored.
# proc -- specifies the pointer to the mapping function called for each
# record.
# op -- specifies specifies the pointer to the optional opaque object
# for the mapping function.
# csiz -- specifies the size of the cache to sort emitted records. If
# it is negative, the default size is specified. The default
# size is 268435456.
# If successful, the return value is true, else, it is false.
# """
# adb_mapbdbemit = cfunc('tcadbmapbdbemit', libtc, c_bool,
# ('map', c_void_p, 1),
# ('kbuf', c_void_p, 1),
# ('ksiz', c_int, 1),
# ('vbuf', c_void_p, 1),
# ('vsiz', c_int, 1))
# adb_mapbdbemit.__doc__ =\
# """Emit records generated by the mapping function into the result map.
# kbuf -- specifies the pointer to the region of the key.
# ksiz -- specifies the size of the region of the key.
# vbuf -- specifies the pointer to the region of the value.
# vsiz -- specifies the size of the region of the value.
# If successful, the return value is true, else, it is false.
# """
#
# Functions from tchdb.h
#
hdb_errmsg = cfunc('tchdberrmsg', libtc, c_char_p,
('ecode', c_int, 1))
hdb_errmsg.__doc__ =\
"""Get the message string corresponding to an error code.
ecode -- specifies the error code.
The return value is the message string of the error code.
"""
hdb_new = cfunc('tchdbnew', libtc, c_void_p)
hdb_new.__doc__ =\
"""Create a hash database object.
The return value is the new hash database object.
"""
hdb_del = cfunc('tchdbdel', libtc, None,
('hdb', c_void_p, 1))
hdb_del.__doc__ =\
"""Delete a hash database object.
hdb -- specifies the hash database object.
If the database is not closed, it is closed implicitly. Note that the
deleted object and its derivatives can not be used anymore.
"""
hdb_ecode = cfunc('tchdbecode', libtc, c_int,
('hdb', c_void_p, 1))
hdb_ecode.__doc__ =\
"""Get the last happened error code of a hash database object.
hdb -- specifies the hash database object.
The return value is the last happened error code.
The following error codes are defined: 'ESUCCESS' for success,
'ETHREAD' for threading error, 'EINVALID' for invalid operation,
'ENOFILE' for file not found, 'ENOPERM' for no permission, 'EMETA' for
invalid meta data, 'ERHEAD' for invalid record header, 'EOPEN' for
open error, 'ECLOSE' for close error, 'ETRUNC' for trunc error,
'ESYNC' for sync error, 'ESTAT' for stat error, 'ESEEK' for seek
error, 'EREAD' for read error, 'EWRITE' for write error, 'EMMAP' for
mmap error, 'ELOCK' for lock error, 'EUNLINK' for unlink error,
'ERENAME' for rename error, 'EMKDIR' for mkdir error, 'ERMDIR' for
rmdir error, 'EKEEP' for existing record, 'ENOREC' for no record
found, and 'EMISC' for miscellaneous error.
"""
hdb_setmutex = cfunc('tchdbsetmutex', libtc, c_bool,
('hdb', c_void_p, 1))
hdb_setmutex.__doc__ =\
"""Set mutual exclusion control of a hash database object for
threading.
hdb -- specifies the hash database object which is not opened.
If successful, the return value is true, else, it is false.
Note that the mutual exclusion control is needed if the object is
shared by plural threads and this function should be called before the
database is opened.
"""
hdb_tune = cfunc('tchdbtune', libtc, c_bool,
('hdb', c_void_p, 1),
('bnum', c_int64, 1, 0),
('apow', c_int8, 1, -1),
('fpow', c_int8, 1, -1),
('opts', c_uint8, 1, 0))
hdb_tune.__doc__ =\
"""Set the tuning parameters of a hash database object.
hdb -- specifies the hash database object which is not opened.
bnum -- specifies the number of elements of the bucket array. If it
is not more than 0, the default value is specified. The
default value is 131071. Suggested size of the bucket array
is about from 0.5 to 4 times of the number of all records to
be stored.
apow -- specifies the size of record alignment by power of 2. If it
is negative, the default value is specified. The default
value is 4 standing for 2^4=16.
fpow -- specifies the maximum number of elements of the free block pool
by power of 2. If it is negative, the default value is
specified. The default value is 10 standing for 2^10=1024.
opts -- specifies options by bitwise-or: 'TLARGE' specifies that the
size of the database can be larger than 2GB by using 64-bit
bucket array, 'TDEFLATE' specifies that each record is
compressed with Deflate encoding, 'TBZIP' specifies that each
record is compressed with BZIP2 encoding, 'TTCBS' specifies
that each record is compressed with TCBS encoding.
If successful, the return value is true, else, it is false.
Note that the tuning parameters should be set before the database is
opened.
"""
hdb_setcache = cfunc('tchdbsetcache', libtc, c_bool,
('hdb', c_void_p, 1),
('rcnum', c_int32, 1, 0))
hdb_setcache.__doc__ =\
"""Set the caching parameters of a hash database object.
hdb -- specifies the hash database object which is not opened.
rcnum -- specifies the maximum number of records to be cached. If it
is not more than 0, the record cache is disabled. It is
disabled by default.
If successful, the return value is true, else, it is false.
Note that the caching parameters should be set before the database is
opened.
"""
hdb_setxmsiz = cfunc('tchdbsetxmsiz', libtc, c_bool,
('hdb', c_void_p, 1),
('xmsiz', c_int64, 1, 67108864))
hdb_setxmsiz.__doc__ =\
"""Set the size of the extra mapped memory of a hash database object.
hdb -- specifies the hash database object which is not opened.
xmsiz -- specifies the size of the extra mapped memory. If it is not
more than 0, the extra mapped memory is disabled. The
default size is 67108864.
If successful, the return value is true, else, it is false.
Note that the mapping parameters should be set before the database is
opened.
"""
hdb_setdfunit = cfunc('tchdbsetdfunit', libtc, c_bool,
('hdb', c_void_p, 1),
('dfunit', c_int32, 1, 0))
hdb_setdfunit.__doc__ =\
"""Set the unit step number of auto defragmentation of a hash database
object.
hdb -- specifies the hash database object which is not opened.
dfunit -- specifie the unit step number. If it is not more than 0, the
auto defragmentation is disabled. It is disabled by
default.
If successful, the return value is true, else, it is false.
Note that the defragmentation parameters should be set before the
database is opened.
"""
hdb_open = cfunc('tchdbopen', libtc, c_bool,
('hdb', c_void_p, 1),
('path', c_char_p, 1),
('omode', c_int, 1))
hdb_open.__doc__ =\
"""Open a database file and connect a hash database object.
hdb -- specifies the hash database object which is not opened.
path -- specifies the path of the database file.
omode -- specifies the connection mode: 'OWRITER' as a writer,
'OREADER' as a reader. If the mode is 'OWRITER', the
following may be added by bitwise-or: 'OCREAT', which means
it creates a new database if not exist, 'OTRUNC', which means
it creates a new database regardless if one exists, 'OTSYNC',
which means every transaction synchronizes updated contents
with the device. Both of 'OREADER' and 'OWRITER' can be
added to by bitwise-or: 'ONOLCK', which means it opens the
database file without file locking, or 'OLCKNB', which means
locking is performed without blocking.
If successful, the return value is true, else, it is false.
"""
hdb_close = cfunc('tchdbclose', libtc, c_bool,
('hdb', c_void_p, 1))
hdb_close.__doc__ =\
"""Close a hash database object.
hdb -- specifies the hash database object.
If successful, the return value is true, else, it is false.
Update of a database is assured to be written when the database is
closed. If a writer opens a database but does not close it
appropriately, the database will be broken.
"""
hdb_put = cfunc('tchdbput', libtc, c_bool,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
hdb_put.__doc__ =\
"""Store a record into a hash database object.
hdb -- specifies the hash database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten.
"""
hdb_put2 = cfunc_fast('tchdbput2', libtc, c_bool,
('hdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
hdb_put2.__doc__ =\
"""Store a string record into a hash database object.
hdb -- specifies the hash database object connected as a writer.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten.
"""
hdb_putkeep = cfunc('tchdbputkeep', libtc, c_bool,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
hdb_putkeep.__doc__ =\
"""Store a new record into a hash database object.
hdb -- specifies the hash database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
hdb_putkeep2 = cfunc_fast('tchdbputkeep2', libtc, c_bool,
('hdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
hdb_putkeep2.__doc__ =\
"""Store a new string record into a hash database object.
hdb -- specifies the hash database object connected as a writer.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
hdb_putcat = cfunc('tchdbputcat', libtc, c_bool,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
hdb_putcat.__doc__ =\
"""Concatenate a value at the end of the existing record in a hash
database object.
hdb -- specifies the hash database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
hdb_putcat2 = cfunc_fast('tchdbputcat2', libtc, c_bool,
('hdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
hdb_putcat2.__doc__ =\
"""Concatenate a string value at the end of the existing record in a
hash database object.
hdb -- specifies the hash database object connected as a writer.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
hdb_putasync = cfunc('tchdbputasync', libtc, c_bool,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
hdb_putasync.__doc__ =\
"""Store a record into a hash database object in asynchronous fashion.
hdb -- specifies the hash database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten. Records passed to this function are accumulated into the
inner buffer and wrote into the file at a blast.
"""
hdb_putasync2 = cfunc_fast('tchdbputasync2', libtc, c_bool,
('hdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
hdb_putasync2.__doc__ =\
"""Store a string record into a hash database object in asynchronous
fashion.
hdb -- specifies the hash database object connected as a writer.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten. Records passed to this function are accumulated into the
inner buffer and wrote into the file at a blast.
"""
hdb_out = cfunc('tchdbout', libtc, c_bool,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
hdb_out.__doc__ =\
"""Remove a record of a hash database object.
hdb -- specifies the hash database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is true, else, it is false.
"""
hdb_out2 = cfunc_fast('tchdbout2', libtc, c_bool,
('hdb', c_void_p, 1),
('kstr', c_char_p, 1))
hdb_out2.__doc__ =\
"""Remove a string record of a hash database object.
hdb -- specifies the hash database object connected as a writer.
kstr -- specifies the string of the key.
If successful, the return value is true, else, it is false.
"""
hdb_get = cfunc('tchdbget', libtc, tc_void_p,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('sp', c_int_p, 2))
hdb_get.errcheck = lambda result, func, arguments : (result, arguments[3])
hdb_get.__doc__ =\
"""Retrieve a record in a hash database object.
hdb -- specifies the hash database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value of the corresponding record. 'NULL' is returned if no record
corresponds.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use.
"""
hdb_get2 = cfunc_fast('tchdbget2', libtc, tc_char_p,
('hdb', c_void_p, 1),
('kstr', c_char_p, 1))
hdb_get2.__doc__ =\
"""Retrieve a string record in a hash database object.
hdb -- specifies the hash database object.
kstr -- specifies the string of the key.
If successful, the return value is the string of the value of the
corresponding record. 'NULL' is returned if no record corresponds.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
hdb_get3 = cfunc('tchdbget3', libtc, c_int,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('max', c_int, 1))
hdb_get3.__doc__ =\
"""Retrieve a record in a hash database object and write the value
into a buffer.
hdb -- specifies the hash database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the buffer into which the value of
the corresponding record is written.
max -- specifies the size of the buffer.
If successful, the return value is the size of the written data, else,
it is -1. -1 is returned if no record corresponds to the specified
key.
Note that an additional zero code is not appended at the end of the
region of the writing buffer.
"""
hdb_vsiz = cfunc('tchdbvsiz', libtc, c_int,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
hdb_vsiz.__doc__ =\
"""Get the size of the value of a record in a hash database object.
hdb -- specifies the hash database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is the size of the value of the
corresponding record, else, it is -1.
"""
hdb_vsiz2 = cfunc_fast('tchdbvsiz2', libtc, c_int,
('hdb', c_void_p, 1),
('kstr', c_char_p, 1))
hdb_vsiz2.__doc__ =\
"""Get the size of the value of a string record in a hash database
object.
hdb -- specifies the hash database object.
kstr -- specifies the string of the key.
If successful, the return value is the size of the value of the
corresponding record, else, it is -1.
"""
hdb_iterinit = cfunc('tchdbiterinit', libtc, c_bool,
('hdb', c_void_p, 1))
hdb_iterinit.__doc__ =\
"""Initialize the iterator of a hash database object.
hdb -- specifies the hash database object.
If successful, the return value is true, else, it is false.
The iterator is used in order to access the key of every record stored
in a database.
"""
hdb_iternext = cfunc('tchdbiternext', libtc, tc_void_p,
('hdb', c_void_p, 1),
('sp', c_int_p, 2))
hdb_iternext.errcheck = lambda result, func, arguments : (result, arguments[1])
hdb_iternext.__doc__ =\
"""Get the next key of the iterator of a hash database object.
hdb -- specifies the hash database object.
sp -- specifies the pointer to the variable into which the size of the
region of the return value is assigned.
If successful, the return value is the pointer to the region of the
next key, else, it is 'NULL'. 'NULL' is returned when no record is to
be get out of the iterator.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use. It is possible to access every record by iteration
of calling this function. It is allowed to update or remove records
whose keys are fetched while the iteration. However, it is not
assured if updating the database is occurred while the iteration.
Besides, the order of this traversal access method is arbitrary, so it
is not assured that the order of storing matches the one of the
traversal access.
"""
hdb_iternext2 = cfunc_fast('tchdbiternext2', libtc, tc_char_p,
('hdb', c_void_p, 1))
hdb_iternext2.__doc__ =\
"""Get the next key string of the iterator of a hash database object.
hdb -- specifies the hash database object.
If successful, the return value is the string of the next key, else,
it is 'NULL'. 'NULL' is returned when no record is to be get out of
the iterator.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use. It is possible to access every record by iteration of calling
this function. However, it is not assured if updating the database is
occurred while the iteration. Besides, the order of this traversal
access method is arbitrary, so it is not assured that the order of
storing matches the one of the traversal access.
"""
hdb_iternext3 = cfunc('tchdbiternext3', libtc, c_bool,
('hdb', c_void_p, 1),
('kxstr', TCXSTR_P, 1),
('vxstr', TCXSTR_P, 1))
hdb_iternext3.__doc__ =\
"""Get the next extensible objects of the iterator of a hash database
object.
hdb -- specifies the hash database object.
kxstr -- specifies the object into which the next key is wrote down.
vxstr -- specifies the object into which the next value is wrote down.
If successful, the return value is true, else, it is false. False is
returned when no record is to be get out of the iterator.
"""
hdb_fwmkeys = cfunc('tchdbfwmkeys', libtc, TCLIST_P,
('hdb', c_void_p, 1),
('pbuf', c_void_p, 1),
('psiz', c_int, 1),
('max', c_int, 1, -1))
hdb_fwmkeys.__doc__ =\
"""Get forward matching keys in a hash database object.
hdb -- specifies the hash database object.
pbuf -- specifies the pointer to the region of the prefix.
psiz -- specifies the size of the region of the prefix.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding keys. This
function does never fail. It returns an empty list even if no key
corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use. Note that this function may be very slow
because every key in the database is scanned.
"""
hdb_fwmkeys2 = cfunc_fast('tchdbfwmkeys2', libtc, TCLIST_P,
('hdb', c_void_p, 1),
('pstr', c_char_p, 1),
('max', c_int, 1, -1))
hdb_fwmkeys2.__doc__ =\
"""Get forward matching string keys in a hash database object.
hdb -- specifies the hash database object.
pstr -- specifies the string of the prefix.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding keys. This
function does never fail. It returns an empty list even if no key
corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use. Note that this function may be very slow
because every key in the database is scanned.
"""
hdb_addint = cfunc('tchdbaddint', libtc, c_int,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('num', c_int, 1))
hdb_addint.__doc__ =\
"""Add an integer to a record in a hash database object.
hdb -- specifies the hash database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
num -- specifies the additional value.
If successful, the return value is the summation value, else, it is
'INT_MIN'.
If the corresponding record exists, the value is treated as an integer
and is added to. If no record corresponds, a new record of the
additional value is stored.
"""
hdb_adddouble = cfunc('tchdbadddouble', libtc, c_double,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('num', c_double, 1))
hdb_adddouble.__doc__ =\
"""Add a real number to a record in a hash database object.
hdb -- specifies the hash database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
num -- specifies the additional value.
If successful, the return value is the summation value, else, it is
Not-a-Number.
If the corresponding record exists, the value is treated as a real
number and is added to. If no record corresponds, a new record of the
additional value is stored.
"""
hdb_sync = cfunc('tchdbsync', libtc, c_bool,
('hdb', c_void_p, 1))
hdb_sync.__doc__ =\
"""Synchronize updated contents of a hash database object with the
file and the device.
hdb -- specifies the hash database object connected as a writer.
If successful, the return value is true, else, it is false.
This function is useful when another process connects to the same
database file.
"""
hdb_optimize = cfunc('tchdboptimize', libtc, c_bool,
('hdb', c_void_p, 1),
('bnum', c_int64, 1, 0),
('apow', c_int8, 1, -1),
('fpow', c_int8, 1, -1),
('opts', c_uint8, 1, 255))
hdb_optimize.__doc__ =\
"""Optimize the file of a hash database object.
hdb -- specifies the hash database object connected as a writer.
bnum -- specifies the number of elements of the bucket array. If it
is not more than 0, the default value is specified. The
default value is two times of the number of records.
apow -- specifies the size of record alignment by power of 2. If it
is negative, the current setting is not changed.
fpow -- specifies the maximum number of elements of the free block
pool by power of 2. If it is negative, the current setting is
not changed.
opts -- specifies options by bitwise-or: 'TLARGE' specifies that the
size of the database can be larger than 2GB by using 64-bit
bucket array, 'TDEFLATE' specifies that each record is
compressed with Deflate encoding, 'TBZIP' specifies that each
record is compressed with BZIP2 encoding, 'TTCBS' specifies
that each record is compressed with TCBS encoding. If it is
'UINT8_MAX', the current setting is not changed.
If successful, the return value is true, else, it is false.
This function is useful to reduce the size of the database file with
data fragmentation by successive updating.
"""
hdb_vanish = cfunc('tchdbvanish', libtc, c_bool,
('hdb', c_void_p, 1))
hdb_vanish.__doc__ =\
"""Remove all records of a hash database object.
hdb -- specifies the hash database object connected as a writer.
If successful, the return value is true, else, it is false.
"""
hdb_copy = cfunc('tchdbcopy', libtc, c_bool,
('hdb', c_void_p, 1),
('path', c_char_p, 1))
hdb_copy.__doc__ =\
"""Copy the database file of a hash database object.
hdb -- specifies the hash database object.
path -- specifies the path of the destination file. If it begins with
'@', the trailing substring is executed as a command line.
If successful, the return value is true, else, it is false. False is
returned if the executed command returns non-zero code.
The database file is assured to be kept synchronized and not modified
while the copying or executing operation is in progress. So, this
function is useful to create a backup file of the database file.
"""
hdb_tranbegin = cfunc('tchdbtranbegin', libtc, c_bool,
('hdb', c_void_p, 1))
hdb_tranbegin.__doc__ =\
"""Begin the transaction of a hash database object.
hdb -- specifies the hash database object connected as a writer.
If successful, the return value is true, else, it is false.
The database is locked by the thread while the transaction so that
only one transaction can be activated with a database object at the
same time. Thus, the serializable isolation level is assumed if every
database operation is performed in the transaction. All updated
regions are kept track of by write ahead logging while the
transaction. If the database is closed during transaction, the
transaction is aborted implicitly.
"""
hdb_trancommit = cfunc('tchdbtrancommit', libtc, c_bool,
('hdb', c_void_p, 1))
hdb_trancommit.__doc__ =\
"""Commit the transaction of a hash database object.
hdb -- specifies the hash database object connected as a writer.
If successful, the return value is true, else, it is false.
Update in the transaction is fixed when it is committed successfully.
"""
hdb_tranabort = cfunc('tchdbtranabort', libtc, c_bool,
('hdb', c_void_p, 1))
hdb_tranabort.__doc__ =\
"""Abort the transaction of a hash database object.
hdb -- specifies the hash database object connected as a writer.
If successful, the return value is true, else, it is false.
Update in the transaction is discarded when it is aborted. The state
of the database is rollbacked to before transaction.
"""
hdb_path = cfunc('tchdbpath', libtc, c_char_p,
('hdb', c_void_p, 1))
hdb_path.__doc__ =\
"""Get the file path of a hash database object.
hdb -- specifies the hash database object.
The return value is the path of the database file or 'NULL' if the
object does not connect to any database file.
"""
hdb_rnum = cfunc('tchdbrnum', libtc, c_uint64,
('hdb', c_void_p, 1))
hdb_rnum.__doc__ =\
"""Get the number of records of a hash database object.
hdb -- specifies the hash database object.
The return value is the number of records or 0 if the object does not
connect to any database file.
"""
hdb_fsiz = cfunc('tchdbfsiz', libtc, c_uint64,
('hdb', c_void_p, 1))
hdb_fsiz.__doc__ =\
"""Get the size of the database file of a hash database object.
hdb -- specifies the hash database object.
The return value is the size of the database file or 0 if the object
does not connect to any database file.
"""
# features for experts
hdb_setecode = cfunc('tchdbsetecode', libtc, None,
('hdb', c_void_p, 1),
('ecode', c_int, 1),
('filename', c_char_p, 1),
('line', c_int, 1),
('func', c_char_p, 1))
hdb_setecode.__doc__ =\
"""Set the error code of a hash database object.
hdb -- specifies the hash database object.
ecode -- specifies the error code.
file -- specifies the file name of the code.
line -- specifies the line number of the code.
func -- specifies the function name of the code.
"""
hdb_settype = cfunc('tchdbsettype', libtc, None,
('hdb', c_void_p, 1),
('type', c_uint8, 1))
hdb_settype.__doc__ =\
"""Set the type of a hash database object.
hdb -- specifies the hash database object.
type -- specifies the database type.
"""
hdb_setdbgfd = cfunc('tchdbsetdbgfd', libtc, None,
('hdb', c_void_p, 1),
('fd', c_int, 1))
hdb_setdbgfd.__doc__ =\
"""Set the file descriptor for debugging output.
hdb -- specifies the hash database object.
fd -- specifies the file descriptor for debugging output.
"""
hdb_dbgfd = cfunc('tchdbdbgfd', libtc, c_int,
('hdb', c_void_p, 1))
hdb_dbgfd.__doc__ =\
"""Get the file descriptor for debugging output.
hdb -- specifies the hash database object.
The return value is the file descriptor for debugging output.
"""
hdb_hasmutex = cfunc('tchdbhasmutex', libtc, c_bool,
('hdb', c_void_p, 1))
hdb_hasmutex.__doc__ =\
"""Check whether mutual exclusion control is set to a hash database
object.
hdb -- specifies the hash database object.
If mutual exclusion control is set, it is true, else it is false.
"""
hdb_memsync = cfunc('tchdbmemsync', libtc, c_bool,
('hdb', c_void_p, 1),
('phys', c_bool, 1))
hdb_memsync.__doc__ =\
"""Synchronize updating contents on memory of a hash database object.
hdb -- specifies the hash database object connected as a writer.
phys -- specifies whether to synchronize physically.
If successful, the return value is true, else, it is false.
"""
hdb_bnum = cfunc('tchdbbnum', libtc, c_uint64,
('hdb', c_void_p, 1))
hdb_bnum.__doc__ =\
"""Get the number of elements of the bucket array of a hash database
object.
hdb -- specifies the hash database object.
The return value is the number of elements of the bucket array or 0 if
the object does not connect to any database file.
"""
hdb_align = cfunc('tchdbalign', libtc, c_uint32,
('hdb', c_void_p, 1))
hdb_align.__doc__ =\
"""Get the record alignment of a hash database object.
hdb -- specifies the hash database object.
The return value is the record alignment or 0 if the object does not
connect to any database file.
"""
hdb_fbpmax = cfunc('tchdbfbpmax', libtc, c_uint32,
('hdb', c_void_p, 1))
hdb_fbpmax.__doc__ =\
"""Get the maximum number of the free block pool of a a hash database
object.
hdb -- specifies the hash database object.
The return value is the maximum number of the free block pool or 0 if
the object does not connect to any database file.
"""
hdb_xmsiz = cfunc('tchdbxmsiz', libtc, c_uint64,
('hdb', c_void_p, 1))
hdb_xmsiz.__doc__ =\
"""Get the size of the extra mapped memory of a hash database object.
hdb -- specifies the hash database object.
The return value is the size of the extra mapped memory or 0 if the
object does not connect to any database file.
"""
hdb_inode = cfunc('tchdbinode', libtc, c_uint64,
('hdb', c_void_p, 1))
hdb_inode.__doc__ =\
"""Get the inode number of the database file of a hash database
object.
hdb -- specifies the hash database object.
The return value is the inode number of the database file or 0 if the
object does not connect to any database file.
"""
hdb_mtime = cfunc('tchdbmtime', libtc, c_time,
('hdb', c_void_p, 1))
hdb_mtime.__doc__ =\
"""Get the modification time of the database file of a hash database
object.
hdb -- specifies the hash database object.
The return value is the inode number of the database file or 0 if the
object does not connect to any database file.
"""
hdb_omode = cfunc('tchdbomode', libtc, c_int,
('hdb', c_void_p, 1))
hdb_omode.__doc__ =\
"""Get the connection mode of a hash database object.
hdb -- specifies the hash database object.
The return value is the connection mode.
"""
hdb_type = cfunc('tchdbtype', libtc, c_uint8,
('hdb', c_void_p, 1))
hdb_type.__doc__ =\
"""Get the database type of a hash database object.
hdb -- specifies the hash database object.
The return value is the database type.
"""
hdb_flags = cfunc('tchdbflags', libtc, c_uint8,
('hdb', c_void_p, 1))
hdb_flags.__doc__ =\
"""Get the additional flags of a hash database object.
hdb -- specifies the hash database object.
The return value is the additional flags.
"""
hdb_opts = cfunc('tchdbopts', libtc, c_uint8,
('hdb', c_void_p, 1))
hdb_opts.__doc__ =\
"""Get the options of a hash database object.
hdb -- specifies the hash database object.
The return value is the options.
"""
hdb_opaque = cfunc('tchdbopaque', libtc, c_char_p,
('hdb', c_void_p, 1))
hdb_opaque.__doc__ =\
"""Get the pointer to the opaque field of a hash database object.
hdb -- specifies the hash database object.
The return value is the pointer to the opaque field whose size is 128
bytes.
"""
hdb_bnumused = cfunc('tchdbbnumused', libtc, c_uint64,
('hdb', c_void_p, 1))
hdb_bnumused.__doc__ =\
"""Get the number of used elements of the bucket array of a hash
database object.
hdb -- specifies the hash database object.
The return value is the number of used elements of the bucket array or
0 if the object does not connect to any database file.
"""
hdb_setcodecfunc = cfunc('tchdbsetcodecfunc', libtc, c_bool,
('hdb', c_void_p, 1),
('enc', TCCODEC, 1),
('encop', c_void_p, 1),
('dec', TCCODEC, 1),
('decop', c_void_p, 1))
hdb_setcodecfunc.__doc__ =\
"""Set the custom codec functions of a hash database object.
hdb -- specifies the hash database object.
enc -- specifies the pointer to the custom encoding function. It
receives four parameters. The first parameter is the pointer
to the region. The second parameter is the size of the
region. The third parameter is the pointer to the variable
into which the size of the region of the return value is
assigned. The fourth parameter is the pointer to the
optional opaque object. It returns the pointer to the result
object allocated with 'malloc' call if successful, else, it
returns 'NULL'.
encop -- specifies an arbitrary pointer to be given as a parameter of
the encoding function. If it is not needed, 'NULL' can be
specified.
dec -- specifies the pointer to the custom decoding function.
decop -- specifies an arbitrary pointer to be given as a parameter of
the decoding function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
Note that the custom codec functions should be set before the database
is opened and should be set every time the database is being opened.
"""
hdb_codecfunc = cfunc('tchdbcodecfunc', libtc, None,
('hdb', c_void_p, 1),
('ep', TCCODEC_P, 2),
('eop', c_void_p, 2),
('dp', TCCODEC_P, 2),
('dop', c_void_p, 2))
hdb_codecfunc.errcheck = lambda result, func, arguments :\
(result, arguments[1], arguments[2], arguments[3], arguments[4])
hdb_codecfunc.__doc__ =\
"""Get the custom codec functions of a hash database object.
hdb -- specifies the hash database object.
ep -- specifies the pointer to a variable into which the pointer to
the custom encoding function is assigned
eop -- specifies the pointer to a variable into which the arbitrary
pointer to be given to the encoding function is assigned.
dp -- specifies the pointer to a variable into which the pointer to
the custom decoding function is assigned
dop -- specifies the pointer to a variable into which the arbitrary
pointer to be given to the decoding function is assigned.
"""
hdb_dfunit = cfunc('tchdbdfunit', libtc, c_uint32,
('hdb', c_void_p, 1))
hdb_dfunit.__doc__ =\
"""Get the unit step number of auto defragmentation of a hash database
object.
hdb -- specifies the hash database object.
The return value is the unit step number of auto defragmentation.
"""
hdb_defrag = cfunc('tchdbdefrag', libtc, c_bool,
('hdb', c_void_p, 1),
('step', c_int64, 1))
hdb_defrag.__doc__ =\
"""Perform dynamic defragmentation of a hash database object.
hdb -- specifies the hash database object connected as a writer.
step -- specifie the number of steps. If it is not more than 0, the
whole file is defragmented gradually without keeping a
continuous lock.
If successful, the return value is true, else, it is false.
"""
hdb_cacheclear = cfunc('tchdbcacheclear', libtc, c_bool,
('hdb', c_void_p, 1))
hdb_cacheclear.__doc__ =\
"""Clear the cache of a hash tree database object.
hdb -- specifies the hash tree database object.
If successful, the return value is true, else, it is false.
"""
hdb_putproc = cfunc('tchdbputproc', libtc, c_bool,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1),
('proc', TCPDPROC, 1),
('op', c_void_p, 1))
hdb_putproc.__doc__ =\
"""Store a record into a hash database object with a duplication
handler.
hdb -- specifies the hash database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value. 'NULL'
means that record addition is ommited if there is no
corresponding record.
vsiz -- specifies the size of the region of the value.
proc -- specifies the pointer to the callback function to process
duplication. It receives four parameters. The first
parameter is the pointer to the region of the value. The
second parameter is the size of the region of the value. The
third parameter is the pointer to the variable into which the
size of the region of the return value is assigned. The
fourth parameter is the pointer to the optional opaque object.
It returns the pointer to the result object allocated with
'malloc'. It is released by the caller. If it is 'NULL', the
record is not modified. If it is '(void *)-1', the record is
removed.
op -- specifies an arbitrary pointer to be given as a parameter of
the callback function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
Note that the callback function can not perform any database operation
because the function is called in the critical section guarded by the
same locks of database operations.
"""
hdb_getnext = cfunc('tchdbgetnext', libtc, tc_void_p,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('sp', c_int_p, 2))
hdb_getnext.errcheck = lambda result, func, arguments : (result, arguments[3])
hdb_getnext.__doc__ =\
"""Retrieve the next record of a record in a hash database object.
hdb -- specifies the hash database object.
kbuf -- specifies the pointer to the region of the key. If it is
'NULL', the first record is retrieved.
ksiz -- specifies the size of the region of the key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
key of the next record. 'NULL' is returned if no record corresponds.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use.
"""
hdb_getnext2 = cfunc('tchdbgetnext2', libtc, tc_char_p,
('hdb', c_void_p, 1),
('kstr', c_char_p, 1))
hdb_getnext2.__doc__ =\
"""Retrieve the next string record in a hash database object.
hdb -- specifies the hash database object.
kstr -- specifies the string of the key. If it is 'NULL', the first
record is retrieved.
If successful, the return value is the string of the key of the next
record. 'NULL' is returned if no record corresponds.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
hdb_getnext3 = cfunc('tchdbgetnext3', libtc, tc_char_p,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('sp', c_int_p, 2),
('vbp', POINTER(tc_char_p), 2),
('vsp', c_int_p, 2))
hdb_getnext3.errcheck = lambda result, func, arguments :\
(result, arguments[3], arguments[4], arguments[5])
hdb_getnext3.__doc__ =\
"""Retrieve the key and the value of the next record of a record in a
hash database object.
hdb -- specifies the hash database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
vbp -- specifies the pointer to the variable into which the pointer
to the value is assigned.
vsp -- specifies the pointer to the variable into which the size of
the value is assigned.
If successful, the return value is the pointer to the region of the
key of the next record.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use. The retion pointed to by 'vbp' should not be released.
"""
hdb_iterinit2 = cfunc('tchdbiterinit2', libtc, c_bool,
('hdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
hdb_iterinit2.__doc__ =\
"""Move the iterator to the record corresponding a key of a hash
database object.
hdb -- specifies the hash database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is true, else, it is false. False is
returned if there is no record corresponding the condition.
"""
hdb_iterinit3 = cfunc('tchdbiterinit3', libtc, c_bool,
('hdb', c_void_p, 1),
('kstr', c_char_p, 1))
hdb_iterinit3.__doc__ =\
"""Move the iterator to the record corresponding a key string of a
hash database object.
hdb -- specifies the hash database object.
kstr -- specifies the string of the key.
If successful, the return value is true, else, it is false. False is
returned if there is no record corresponding the condition.
"""
hdb_foreach = cfunc('tchdbforeach', libtc, c_bool,
('hdb', c_void_p, 1),
('iter', TCITER, 1),
('op', c_char_p, 1))
hdb_foreach.__doc__ =\
"""Process each record atomically of a hash database object.
hdb -- specifies the hash database object.
iter -- specifies the pointer to the iterator function called for
each record. It receives five parameters. The first
parameter is the pointer to the region of the key. The
second parameter is the size of the region of the key. The
third parameter is the pointer to the region of the value.
The fourth parameter is the size of the region of the value.
The fifth parameter is the pointer to the optional opaque
object. It returns true to continue iteration or false to
stop iteration.
op -- specifies an arbitrary pointer to be given as a parameter of
the iterator function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
Note that the callback function can not perform any database operation
because the function is called in the critical section guarded by the
same locks of database operations.
"""
hdb_tranvoid = cfunc('tchdbtranvoid', libtc, c_bool,
('hdb', c_void_p, 1))
hdb_tranvoid.__doc__ =\
"""Void the transaction of a hash database object.
hdb -- specifies the hash database object connected as a writer.
If successful, the return value is true, else, it is false.
This function should be called only when no update in the transaction.
"""
#
# Functions from tcbdb.h
#
bdb_errmsg = cfunc('tcbdberrmsg', libtc, c_char_p,
('ecode', c_int, 1))
bdb_errmsg.__doc__ =\
"""Get the message string corresponding to an error code.
ecode -- specifies the error code.
The return value is the message string of the error code.
"""
bdb_new = cfunc('tcbdbnew', libtc, c_void_p)
bdb_new.__doc__ =\
"""Create a B+ tree database object.
The return value is the new B+ tree database object.
"""
bdb_del = cfunc('tcbdbdel', libtc, None,
('bdb', c_void_p, 1))
bdb_del.__doc__ =\
"""Delete a B+ tree database object.
bdb -- specifies the B+ tree database object.
If the database is not closed, it is closed implicitly. Note that the
deleted object and its derivatives can not be used anymore.
"""
bdb_ecode = cfunc('tcbdbecode', libtc, c_int,
('bdb', c_void_p, 1))
bdb_ecode.__doc__ =\
"""Get the last happened error code of a B+ tree database object.
bdb -- specifies the B+ tree database object.
The return value is the last happened error code.
The following error codes are defined: 'ESUCCESS' for success,
'ETHREAD' for threading error, 'EINVALID' for invalid operation,
'ENOFILE' for file not found, 'ENOPERM' for no permission, 'EMETA' for
invalid meta data, 'ERHEAD' for invalid record header, 'EOPEN' for
open error, 'ECLOSE' for close error, 'ETRUNC' for trunc error,
'ESYNC' for sync error, 'ESTAT' for stat error, 'ESEEK' for seek
error, 'EREAD' for read error, 'EWRITE' for write error, 'EMMAP' for
mmap error, 'ELOCK' for lock error, 'EUNLINK' for unlink error,
'ERENAME' for rename error, 'EMKDIR' for mkdir error, 'ERMDIR' for
rmdir error, 'EKEEP' for existing record, 'ENOREC' for no record
found, and 'EMISC' for miscellaneous error.
"""
bdb_setmutex = cfunc('tcbdbsetmutex', libtc, c_bool,
('bdb', c_void_p, 1))
bdb_setmutex.__doc__ =\
"""Set mutual exclusion control of a B+ tree database object for
threading.
bdb -- specifies the B+ tree database object which is not opened.
If successful, the return value is true, else, it is false.
Note that the mutual exclusion control is needed if the object is
shared by plural threads and this function should be called before the
database is opened.
"""
bdb_setcmpfunc = cfunc('tcbdbsetcmpfunc', libtc, c_bool,
('bdb', c_void_p, 1),
('cmp', TCCMP, 1),
('cmpop', c_void_p, 1))
bdb_setcmpfunc.__doc__=\
"""Set the custom comparison function of a B+ tree database object.
bdb -- specifies the B+ tree database object which is not opened.
cmp -- specifies the pointer to the custom comparison function. It
receives five parameters. The first parameter is the pointer
to the region of one key. The second parameter is the size
of the region of one key. The third parameter is the pointer
to the region of the other key. The fourth parameter is the
size of the region of the other key. The fifth parameter is
the pointer to the optional opaque object. It returns
positive if the former is big, negative if the latter is big,
0 if both are equivalent.
cmpop -- specifies an arbitrary pointer to be given as a parameter of
the comparison function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
The default comparison function compares keys of two records by
lexical order. The functions 'tctccmplexical' (default),
'tctccmpdecimal', 'tctccmpint32', and 'tctccmpint64' are built-in.
Note that the comparison function should be set before the database is
opened. Moreover, user-defined comparison functions should be set
every time the database is being opened.
"""
bdb_tune = cfunc('tcbdbtune', libtc, c_bool,
('bdb', c_void_p, 1),
('lmemb', c_int32, 1, 0),
('nmemb', c_int32, 1, 0),
('bnum', c_int64, 1, 0),
('apow', c_int8, 1, -1),
('fpow', c_int8, 1, -1),
('opts', c_uint8, 1, 0))
bdb_tune.__doc__ =\
"""Set the tuning parameters of a B+ tree database object.
bdb -- specifies the B+ tree database object which is not opened.
lmemb -- specifies the number of members in each leaf page. If it is
not more than 0, the default value is specified. The default
value is 128.
nmemb -- specifies the number of members in each non-leaf page. If it
is not more than 0, the default value is specified. The
default value is 256.
bnum -- specifies the number of elements of the bucket array. If it
is not more than 0, the default value is specified. The
default value is 32749. Suggested size of the bucket array
is about from 1 to 4 times of the number of all pages to be
stored.
apow -- specifies the size of record alignment by power of 2. If it
is negative, the default value is specified. The default
value is 8 standing for 2^8=256.
fpow -- specifies the maximum number of elements of the free block
pool by power of 2. If it is negative, the default value is
specified. The default value is 10 standing for 2^10=1024.
opts -- specifies options by bitwise-or: 'TLARGE' specifies that the
size of the database can be larger than 2GB by using 64-bit
bucket array, 'TDEFLATE' specifies that each page is
compressed with Deflate encoding, 'TBZIP' specifies that each
page is compressed with BZIP2 encoding, 'TTCBS' specifies
that each page is compressed with TCBS encoding.
If successful, the return value is true, else, it is false.
Note that the tuning parameters should be set before the database is
opened.
"""
bdb_setcache = cfunc('tcbdbsetcache', libtc, c_bool,
('bdb', c_void_p, 1),
('lcnum', c_int32, 1, 0),
('ncnum', c_int32, 1, 0))
bdb_setcache.__doc__ =\
"""Set the caching parameters of a B+ tree database object.
bdb -- specifies the B+ tree database object which is not opened.
lcnum -- specifies the maximum number of leaf nodes to be cached. If
it is not more than 0, the default value is specified. The
default value is 1024.
ncnum -- specifies the maximum number of non-leaf nodes to be cached.
If it is not more than 0, the default value is specified.
The default value is 512.
If successful, the return value is true, else, it is false.
Note that the caching parameters should be set before the database is
opened.
"""
bdb_setxmsiz = cfunc('tcbdbsetxmsiz', libtc, c_bool,
('bdb', c_void_p, 1),
('xmsiz', c_int64, 1, 0))
bdb_setxmsiz.__doc__ =\
"""Set the size of the extra mapped memory of a B+ tree database
object.
bdb -- specifies the B+ tree database object which is not opened.
xmsiz -- specifies the size of the extra mapped memory. If it is not
more than 0, the extra mapped memory is disabled. It is
disabled by default.
If successful, the return value is true, else, it is false.
Note that the mapping parameters should be set before the database is
opened.
"""
bdb_setdfunit = cfunc('tcbdbsetdfunit', libtc, c_bool,
('bdb', c_void_p, 1),
('dfunit', c_int32, 1, 0))
bdb_setdfunit.__doc__ =\
"""Set the unit step number of auto defragmentation of a B+ tree
database object.
bdb -- specifies the B+ tree database object which is not opened.
dfunit -- specifie the unit step number. If it is not more than 0, the
auto defragmentation is disabled. It is disabled by
default.
If successful, the return value is true, else, it is false.
Note that the defragmentation parameters should be set before the
database is opened.
"""
bdb_open = cfunc('tcbdbopen', libtc, c_bool,
('bdb', c_void_p, 1),
('path', c_char_p, 1),
('omode', c_int, 1))
bdb_open.__doc__ =\
"""Open a database file and connect a B+ tree database object.
bdb -- specifies the B+ tree database object which is not opened.
path -- specifies the path of the database file.
omode -- specifies the connection mode: 'OWRITER' as a writer,
'OREADER' as a reader. If the mode is 'OWRITER', the
following may be added by bitwise-or: 'OCREAT', which means
it creates a new database if not exist, 'OTRUNC', which
means it creates a new database regardless if one exists,
'OTSYNC', which means every transaction synchronizes updated
contents with the device. Both of 'OREADER' and 'OWRITER'
can be added to by bitwise-or: 'ONOLCK', which means it
opens the database file without file locking, or 'OLCKNB',
which means locking is performed without blocking.
If successful, the return value is true, else, it is false.
"""
bdb_close = cfunc('tcbdbclose', libtc, c_bool,
('bdb', c_void_p, 1))
bdb_close.__doc__ =\
"""Close a B+ tree database object.
bdb -- specifies the B+ tree database object.
If successful, the return value is true, else, it is false.
Update of a database is assured to be written when the database is
closed. If a writer opens a database but does not close it
appropriately, the database will be broken.
"""
bdb_put = cfunc('tcbdbput', libtc, c_bool,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
bdb_put.__doc__ =\
"""Store a record into a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten.
"""
bdb_put2 = cfunc_fast('tcbdbput2', libtc, c_bool,
('bdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
bdb_put2.__doc__ =\
"""Store a string record into a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten.
"""
bdb_putkeep = cfunc('tcbdbputkeep', libtc, c_bool,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
bdb_putkeep.__doc__ =\
"""Store a new record into a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
bdb_putkeep2 = cfunc_fast('tcbdbputkeep2', libtc, c_bool,
('bdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
bdb_putkeep2.__doc__ =\
"""Store a new string record into a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
bdb_putcat = cfunc('tcbdbputcat', libtc, c_bool,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
bdb_putcat.__doc__ =\
"""Concatenate a value at the end of the existing record in a B+ tree
database object.
bdb -- specifies the B+ tree database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
bdb_putcat2 = cfunc_fast('tcbdbputcat2', libtc, c_bool,
('bdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
bdb_putcat2.__doc__ =\
"""Concatenate a string value at the end of the existing record in a
B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
bdb_putdup = cfunc('tcbdbputdup', libtc, c_bool,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
bdb_putdup.__doc__ =\
"""Store a record into a B+ tree database object with allowing
duplication of keys.
bdb -- specifies the B+ tree database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, the new record
is placed after the existing one.
"""
bdb_putdup2 = cfunc_fast('tcbdbputdup2', libtc, c_bool,
('bdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
bdb_putdup2.__doc__ =\
"""Store a string record into a B+ tree database object with allowing
duplication of keys.
bdb -- specifies the B+ tree database object connected as a writer.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, the new record
is placed after the existing one.
"""
bdb_putdup3 = cfunc('tcbdbputdup3', libtc, c_bool,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vals', TCLIST_P, 1))
bdb_putdup3.__doc__ =\
"""Store records into a B+ tree database object with allowing
duplication of keys.
bdb -- specifies the B+ tree database object connected as a writer.
kbuf -- specifies the pointer to the region of the common key.
ksiz -- specifies the size of the region of the common key.
vals -- specifies a list object containing values.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, the new records
are placed after the existing one.
"""
bdb_out = cfunc('tcbdbout', libtc, c_bool,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
bdb_out.__doc__ =\
"""Remove a record of a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is true, else, it is false.
If the key of duplicated records is specified, the first one is
selected.
"""
bdb_out2 = cfunc_fast('tcbdbout2', libtc, c_bool,
('bdb', c_void_p, 1),
('kstr', c_char_p, 1))
bdb_out2.__doc__ =\
"""Remove a string record of a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
kstr -- specifies the string of the key.
If successful, the return value is true, else, it is false.
If the key of duplicated records is specified, the first one is
selected.
"""
bdb_out3 = cfunc('tcbdbout3', libtc, c_bool,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
bdb_out3.__doc__ =\
"""Remove records of a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is true, else, it is false.
If the key of duplicated records is specified, all of them are
removed.
"""
bdb_get = cfunc('tcbdbget', libtc, tc_void_p,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('sp', c_int_p, 2))
bdb_get.errcheck = lambda result, func, arguments : (result, arguments[3])
bdb_get.__doc__ =\
"""Retrieve a record in a B+ tree database object.
bdb -- specifies the B+ tree database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value of the corresponding record. 'NULL' is returned if no record
corresponds.
If the key of duplicated records is specified, the first one is
selected. Because an additional zero code is appended at the end of
the region of the return value, the return value can be treated as a
character string. Because the region of the return value is allocated
with the 'malloc' call, it should be released with the 'free' call
when it is no longer in use.
"""
bdb_get2 = cfunc_fast('tcbdbget2', libtc, tc_char_p,
('bdb', c_void_p, 1),
('kstr', c_char_p, 1))
bdb_get2.__doc__ =\
"""Retrieve a string record in a B+ tree database object.
bdb -- specifies the B+ tree database object.
kstr -- specifies the string of the key.
If successful, the return value is the string of the value of the
corresponding record. 'NULL' is returned if no record corresponds.
If the key of duplicated records is specified, the first one is
selected. Because the region of the return value is allocated with
the 'malloc' call, it should be released with the 'free' call when it
is no longer in use.
"""
bdb_get3 = cfunc('tcbdbget3', libtc, c_void_p,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('sp', c_int_p, 2))
bdb_get3.errcheck = lambda result, func, arguments : (result, arguments[3])
bdb_get3.__doc__ =\
"""Retrieve a record in a B+ tree database object as a volatile
buffer.
bdb -- specifies the B+ tree database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value of the corresponding record. 'NULL' is returned if no record
corresponds.
If the key of duplicated records is specified, the first one is
selected. Because an additional zero code is appended at the end of
the region of the return value, the return value can be treated as a
character string. Because the region of the return value is volatile
and it may be spoiled by another operation of the database, the data
should be copied into another involatile buffer immediately.
"""
bdb_get4 = cfunc('tcbdbget4', libtc, TCLIST_P,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
bdb_get4.__doc__ =\
"""Retrieve records in a B+ tree database object.
bdb -- specifies the B+ tree database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is a list object of the values of the
corresponding records. 'NULL' is returned if no record corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use.
"""
bdb_vnum = cfunc('tcbdbvnum', libtc, int,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
bdb_vnum.__doc__ =\
"""Get the number of records corresponding a key in a B+ tree database
object.
bdb -- specifies the B+ tree database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is the number of the corresponding
records, else, it is 0.
"""
bdb_vnum2 = cfunc_fast('tcbdbvnum2', libtc, int,
('bdb', c_void_p, 1),
('kstr', c_char_p, 1))
bdb_vnum2.__doc__ =\
"""Get the number of records corresponding a string key in a B+ tree
database object.
bdb -- specifies the B+ tree database object.
kstr -- specifies the string of the key.
If successful, the return value is the number of the corresponding
records, else, it is 0.
"""
bdb_vsiz = cfunc('tcbdbvsiz', libtc, c_int,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
bdb_vsiz.__doc__ =\
"""Get the size of the value of a record in a B+ tree database object.
bdb -- specifies the B+ tree database object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is the size of the value of the
corresponding record, else, it is -1.
If the key of duplicated records is specified, the first one is
selected.
"""
bdb_vsiz2 = cfunc_fast('tcbdbvsiz2', libtc, c_int,
('bdb', c_void_p, 1),
('kstr', c_char_p, 1))
bdb_vsiz2.__doc__ =\
"""Get the size of the value of a string record in a B+ tree database
object.
bdb -- specifies the B+ tree database object.
kstr -- specifies the string of the key.
If successful, the return value is the size of the value of the
corresponding record, else, it is -1.
If the key of duplicated records is specified, the first one is
selected.
"""
bdb_range = cfunc('tcbdbrange', libtc, TCLIST_P,
('bdb', c_void_p, 1),
('bkbuf', c_void_p, 1),
('bksiz', c_int, 1),
('binc', c_bool, 1),
('ekbuf', c_void_p, 1),
('eksiz', c_int, 1),
('einc', c_bool, 1),
('max', c_int, 1, -1))
bdb_range.__doc__ =\
"""Get keys of ranged records in a B+ tree database object.
bdb -- specifies the B+ tree database object.
bkbuf -- specifies the pointer to the region of the key of the
beginning border. If it is 'NULL', the first record is
specified.
bksiz -- specifies the size of the region of the beginning key.
binc -- specifies whether the beginning border is inclusive or not.
ekbuf -- specifies the pointer to the region of the key of the ending
border. If it is 'NULL', the last record is specified.
eksiz -- specifies the size of the region of the ending key.
einc -- specifies whether the ending border is inclusive or not.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the keys of the corresponding
records. This function does never fail. It returns an empty list
even if no record corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use.
"""
bdb_range2 = cfunc_fast('tcbdbrange2', libtc, TCLIST_P,
('bdb', c_void_p, 1),
('bkstr', c_char_p, 1),
('binc', c_bool, 1),
('ekstr', c_char_p, 1),
('einc', c_bool, 1),
('max', c_int, 1, -1))
bdb_range2.__doc__ =\
"""Get string keys of ranged records in a B+ tree database object.
bdb -- specifies the B+ tree database object.
bkstr -- specifies the string of the key of the beginning border. If
it is 'NULL', the first record is specified.
binc -- specifies whether the beginning border is inclusive or not.
ekstr -- specifies the string of the key of the ending border. If it
is 'NULL', the last record is specified.
einc -- specifies whether the ending border is inclusive or not.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the keys of the corresponding
records. This function does never fail. It returns an empty list
even if no record corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use.
"""
bdb_fwmkeys = cfunc('tcbdbfwmkeys', libtc, TCLIST_P,
('bdb', c_void_p, 1),
('pbuf', c_void_p, 1),
('psiz', c_int, 1),
('max', c_int, 1, -1))
bdb_fwmkeys.__doc__ =\
"""Get forward matching keys in a B+ tree database object.
bdb -- specifies the B+ tree database object.
pbuf -- specifies the pointer to the region of the prefix.
psiz -- specifies the size of the region of the prefix.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding keys. This
function does never fail. It returns an empty list even if no key
corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use.
"""
bdb_fwmkeys2 = cfunc_fast('tcbdbfwmkeys2', libtc, TCLIST_P,
('bdb', c_void_p, 1),
('pstr', c_char_p, 1),
('max', c_int, 1, -1))
bdb_fwmkeys2.__doc__ =\
"""Get forward matching string keys in a B+ tree database object.
bdb -- specifies the B+ tree database object.
pstr -- specifies the string of the prefix.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding keys. This
function does never fail. It returns an empty list even if no key
corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use.
"""
bdb_addint = cfunc('tcbdbaddint', libtc, c_int,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('num', c_int, 1))
bdb_addint.__doc__ =\
"""Add an integer to a record in a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
num -- specifies the additional value.
If successful, the return value is the summation value, else, it is
'INT_MIN'.
If the corresponding record exists, the value is treated as an integer
and is added to. If no record corresponds, a new record of the
additional value is stored.
"""
bdb_adddouble = cfunc('tcbdbadddouble', libtc, c_double,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('num', c_double, 1))
bdb_adddouble.__doc__ =\
"""Add a real number to a record in a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
num -- specifies the additional value.
If successful, the return value is the summation value, else, it is
Not-a-Number.
If the corresponding record exists, the value is treated as a real
number and is added to. If no record corresponds, a new record of the
additional value is stored.
"""
bdb_sync = cfunc('tcbdbsync', libtc, c_bool,
('bdb', c_void_p, 1))
bdb_sync.__doc__ =\
"""Synchronize updated contents of a B+ tree database object with the
file and the device.
bdb -- specifies the B+ tree database object connected as a writer.
If successful, the return value is true, else, it is false.
This function is useful when another process connects to the same
database file.
"""
bdb_optimize = cfunc('tcbdboptimize', libtc, c_bool,
('bdb', c_void_p, 1),
('lmemb', c_int32, 1, 0),
('nmemb', c_int32, 1, 0),
('bnum', c_int64, 1, 0),
('apow', c_int8, 1, -1),
('fpow', c_int8, 1, -1),
('opts', c_uint8, 1, 255))
bdb_optimize.__doc__ =\
"""Optimize the file of a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
lmemb -- specifies the number of members in each leaf page. If it is
not more than 0, the current setting is not changed.
nmemb -- specifies the number of members in each non-leaf page. If it
is not more than 0, the current setting is not changed.
bnum -- specifies the number of elements of the bucket array. If it
is not more than 0, the default value is specified. The
default value is two times of the number of pages.
apow -- specifies the size of record alignment by power of 2. If it
is negative, the current setting is not changed.
fpow -- specifies the maximum number of elements of the free block
pool by power of 2. If it is negative, the current setting
is not changed.
opts -- specifies options by bitwise-or:'TLARGE' specifies that the
size of the database can be larger than 2GB by using 64-bit
bucket array, 'TDEFLATE' specifies that each record is
compressed with Deflate encoding, 'TBZIP' specifies that each
page is compressed with BZIP2 encoding, 'TTCBS' specifies
that each page is compressed with TCBS encoding. If it is
'UINT8_MAX', the current setting is not changed.
If successful, the return value is true, else, it is false.
This function is useful to reduce the size of the database file with
data fragmentation by successive updating.
"""
bdb_vanish = cfunc('tcbdbvanish', libtc, c_bool,
('bdb', c_void_p, 1))
bdb_vanish.__doc__ =\
"""Remove all records of a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
If successful, the return value is true, else, it is false.
"""
bdb_copy = cfunc('tcbdbcopy', libtc, c_bool,
('bdb', c_void_p, 1),
('path', c_char_p, 1))
bdb_copy.__doc__ =\
"""Copy the database file of a B+ tree database object.
bdb -- specifies the B+ tree database object.
path -- specifies the path of the destination file. If it begins with
'@', the trailing substring is executed as a command line.
If successful, the return value is true, else, it is false. False is
returned if the executed command returns non-zero code.
The database file is assured to be kept synchronized and not modified
while the copying or executing operation is in progress. So, this
function is useful to create a backup file of the database file.
"""
bdb_tranbegin = cfunc('tcbdbtranbegin', libtc, c_bool,
('bdb', c_void_p, 1))
bdb_tranbegin.__doc__ =\
"""Begin the transaction of a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
If successful, the return value is true, else, it is false.
The database is locked by the thread while the transaction so that
only one transaction can be activated with a database object at the
same time. Thus, the serializable isolation level is assumed if every
database operation is performed in the transaction. Because all pages
are cached on memory while the transaction, the amount of referred
records is limited by the memory capacity. If the database is closed
during transaction, the transaction is aborted implicitly.
"""
bdb_trancommit = cfunc('tcbdbtrancommit', libtc, c_bool,
('bdb', c_void_p, 1))
bdb_trancommit.__doc__ =\
"""Commit the transaction of a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
If successful, the return value is true, else, it is false.
Update in the transaction is fixed when it is committed successfully.
"""
bdb_tranabort = cfunc('tcbdbtranabort', libtc, c_bool,
('bdb', c_void_p, 1))
bdb_tranabort.__doc__ =\
"""Abort the transaction of a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
If successful, the return value is true, else, it is false.
Update in the transaction is discarded when it is aborted. The state
of the database is rollbacked to before transaction.
"""
bdb_path = cfunc('tcbdbpath', libtc, c_char_p,
('bdb', c_void_p, 1))
bdb_path.__doc__ =\
"""Get the file path of a B+ tree database object.
bdb -- specifies the B+ tree database object.
The return value is the path of the database file or 'NULL' if the
object does not connect to any database file.
"""
bdb_rnum = cfunc('tcbdbrnum', libtc, c_uint64,
('bdb', c_void_p, 1))
bdb_rnum.__doc__ =\
"""Get the number of records of a B+ tree database object.
bdb -- specifies the B+ tree database object.
The return value is the number of records or 0 if the object does not
connect to any database file.
"""
bdb_fsiz = cfunc('tcbdbfsiz', libtc, c_uint64,
('bdb', c_void_p, 1))
bdb_fsiz.__doc__ =\
"""Get the size of the database file of a B+ tree database object.
bdb -- specifies the B+ tree database object.
The return value is the size of the database file or 0 if the object
does not connect to any database file.
"""
bdb_curnew = cfunc('tcbdbcurnew', libtc, c_void_p,
('bdb', c_void_p, 1))
bdb_curnew.__doc__ =\
"""Create a cursor object.
bdb -- specifies the B+ tree database object.
The return value is the new cursor object.
Note that the cursor is available only after initialization with the
'tcbdbcurfirst' or the 'tcbdbcurjump' functions and so on. Moreover,
the position of the cursor will be indefinite when the database is
updated after the initialization of the cursor.
"""
bdb_curdel = cfunc('tcbdbcurdel', libtc, None,
('cur', c_void_p, 1))
bdb_curdel.__doc__ =\
"""Delete a cursor object.
cur -- specifies the cursor object.
"""
bdb_curfirst = cfunc('tcbdbcurfirst', libtc, c_bool,
('cur', c_void_p, 1))
bdb_curfirst.__doc__ =\
"""Move a cursor object to the first record.
cur -- specifies the cursor object.
If successful, the return value is true, else, it is false. False is
returned if there is no record in the database.
"""
bdb_curlast = cfunc('tcbdbcurlast', libtc, c_bool,
('cur', c_void_p, 1))
bdb_curlast.__doc__ =\
"""Move a cursor object to the last record.
cur -- specifies the cursor object.
If successful, the return value is true, else, it is false. False is
returned if there is no record in the database.
"""
bdb_curjump = cfunc('tcbdbcurjump', libtc, c_bool,
('cur', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
bdb_curjump.__doc__ =\
"""Move a cursor object to the front of records corresponding a key.
cur -- specifies the cursor object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is true, else, it is false. False is
returned if there is no record corresponding the condition.
The cursor is set to the first record corresponding the key or the
next substitute if completely matching record does not exist.
"""
bdb_curjump2 = cfunc_fast('tcbdbcurjump2', libtc, c_bool,
('cur', c_void_p, 1),
('kstr', c_char_p, 1))
bdb_curjump2.__doc__ =\
"""Move a cursor object to the front of records corresponding a key
string.
cur -- specifies the cursor object.
kstr -- specifies the string of the key.
If successful, the return value is true, else, it is false. False is
returned if there is no record corresponding the condition.
The cursor is set to the first record corresponding the key or the
next substitute if completely matching record does not exist.
"""
bdb_curprev = cfunc('tcbdbcurprev', libtc, c_bool,
('cur', c_void_p, 1))
bdb_curprev.__doc__ =\
"""Move a cursor object to the previous record.
cur -- specifies the cursor object.
If successful, the return value is true, else, it is false. False is
returned if there is no previous record.
"""
bdb_curnext = cfunc('tcbdbcurnext', libtc, c_bool,
('cur', c_void_p, 1))
bdb_curnext.__doc__ =\
"""Move a cursor object to the next record.
cur -- specifies the cursor object.
If successful, the return value is true, else, it is false. False is
returned if there is no next record.
"""
bdb_curput = cfunc('tcbdbcurput', libtc, c_bool,
('cur', c_void_p, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1),
('cpmode', c_int, 1))
bdb_curput.__doc__ =\
"""Insert a record around a cursor object.
cur -- pecifies the cursor object of writer connection.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
cpmode -- specifies detail adjustment:
'BDBCPCURRENT', which means that the value of the current
record is overwritten,
'BDBCPBEFORE', which means that the new record is inserted
before the current record,
'BDBCPAFTER', which means that the new record is inserted
after the current record.
If successful, the return value is true, else, it is false. False is
returned when the cursor is at invalid position.
After insertion, the cursor is moved to the inserted record.
"""
bdb_curput2 = cfunc_fast('tcbdbcurput2', libtc, c_bool,
('cur', c_void_p, 1),
('vstr', c_char_p, 1),
('cpmode', c_int, 1))
bdb_curput2.__doc__ =\
"""Insert a string record around a cursor object.
cur -- specifies the cursor object of writer connection.
vstr -- specifies the string of the value.
cpmode -- specifies detail adjustment:
'BDBCPCURRENT', which means that the value of the current
record is overwritten,
'BDBCPBEFORE', which means that the new record is inserted
before the current record,
'BDBCPAFTER', which means that the new record is inserted
after the current record.
If successful, the return value is true, else, it is false. False is
returned when the cursor is at invalid position.
After insertion, the cursor is moved to the inserted record.
"""
bdb_curout = cfunc('tcbdbcurout', libtc, c_bool,
('cur', c_void_p, 1))
bdb_curout.__doc__ =\
"""Remove the record where a cursor object is.
cur -- specifies the cursor object of writer connection.
If successful, the return value is true, else, it is false. False is
returned when the cursor is at invalid position.
After deletion, the cursor is moved to the next record if possible.
"""
bdb_curkey = cfunc('tcbdbcurkey', libtc, tc_void_p,
('cur', c_void_p, 1),
('sp', c_int_p, 2))
bdb_curkey.errcheck = lambda result, func, arguments : (result, arguments[1])
bdb_curkey.__doc__ =\
"""Get the key of the record where the cursor object is.
cur -- specifies the cursor object.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
key, else, it is 'NULL'. 'NULL' is returned when the cursor is at
invalid position.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use.
"""
bdb_curkey2 = cfunc_fast('tcbdbcurkey2', libtc, tc_char_p,
('cur', c_void_p, 1))
bdb_curkey2.__doc__ =\
"""Get the key string of the record where the cursor object is.
cur -- specifies the cursor object.
If successful, the return value is the string of the key, else, it is
'NULL'. 'NULL' is returned when the cursor is at invalid position.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
bdb_curkey3 = cfunc('tcbdbcurkey3', libtc, c_void_p,
('cur', c_void_p, 1),
('sp', c_int_p, 2))
bdb_curkey3.errcheck = lambda result, func, arguments : (result, arguments[1])
bdb_curkey3.__doc__ =\
"""Get the key of the record where the cursor object is, as a volatile
buffer.
cur -- specifies the cursor object.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
key, else, it is 'NULL'. 'NULL' is returned when the cursor is at
invalid position.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is volatile and it may
be spoiled by another operation of the database, the data should be
copied into another involatile buffer immediately.
"""
bdb_curval = cfunc('tcbdbcurval', libtc, tc_void_p,
('cur', c_void_p, 1),
('sp', c_int_p, 2))
bdb_curval.errcheck = lambda result, func, arguments : (result, arguments[1])
bdb_curval.__doc__ =\
"""Get the value of the record where the cursor object is.
cur -- specifies the cursor object.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value, else, it is 'NULL'. 'NULL' is returned when the cursor is at
invalid position.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use.
"""
bdb_curval2 = cfunc_fast('tcbdbcurval2', libtc, tc_char_p,
('cur', c_void_p, 1))
bdb_curval2.__doc__ =\
"""Get the value string of the record where the cursor object is.
cur -- specifies the cursor object.
If successful, the return value is the string of the value, else, it
is 'NULL'. 'NULL' is returned when the cursor is at invalid position.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
bdb_curval3 = cfunc('tcbdbcurval3', libtc, c_void_p,
('cur', c_void_p, 1),
('sp', c_int_p, 2))
bdb_curval3.errcheck = lambda result, func, arguments : (result, arguments[1])
bdb_curval3.__doc__ =\
"""Get the value of the record where the cursor object is, as a
volatile buffer.
cur -- specifies the cursor object.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value, else, it is 'NULL'. 'NULL' is returned when the cursor is at
invalid position.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is volatile and it may
be spoiled by another operation of the database, the data should be
copied into another involatile buffer immediately.
"""
bdb_currec = cfunc('tcbdbcurrec', libtc, c_bool,
('cur', c_void_p, 1),
('kxstr', TCXSTR_P, 1),
('vxstr', TCXSTR_P, 1))
bdb_currec.__doc__ =\
"""Get the key and the value of the record where the cursor object is.
cur -- specifies the cursor object.
kxstr -- specifies the object into which the key is wrote down.
vxstr -- specifies the object into which the value is wrote down.
If successful, the return value is true, else, it is false. False is
returned when the cursor is at invalid position.
"""
# features for experts
bdb_setecode = cfunc('tcbdbsetecode', libtc, None,
('bdb', c_void_p, 1),
('ecode', c_int, 1),
('filename', c_char_p, 1),
('line', c_int, 1),
('func', c_char_p, 1))
bdb_setecode.__doc__ =\
"""Set the error code of a B+ tree database object.
bdb -- specifies the B+ tree database object.
ecode -- specifies the error code.
file -- specifies the file name of the code.
line -- specifies the line number of the code.
func -- specifies the function name of the code.
"""
bdb_setdbgfd = cfunc('tcbdbsetdbgfd', libtc, None,
('bdb', c_void_p, 1),
('fd', c_int, 1))
bdb_setdbgfd.__doc__ =\
"""Set the file descriptor for debugging output.
bdb -- specifies the B+ tree database object.
fd -- specifies the file descriptor for debugging output.
"""
bdb_dbgfd = cfunc('tcbdbdbgfd', libtc, c_int,
('bdb', c_void_p, 1))
bdb_dbgfd.__doc__ =\
"""Get the file descriptor for debugging output.
bdb -- specifies the B+ tree database object.
The return value is the file descriptor for debugging output.
"""
bdb_hasmutex = cfunc('tcbdbhasmutex', libtc, c_bool,
('bdb', c_void_p, 1))
bdb_hasmutex.__doc__ =\
"""Check whether mutual exclusion control is set to a B+ tree database
object.
bdb -- specifies the B+ tree database object.
If mutual exclusion control is set, it is true, else it is false.
"""
bdb_memsync = cfunc('tcbdbmemsync', libtc, c_bool,
('bdb', c_void_p, 1),
('phys', c_bool, 1))
bdb_memsync.__doc__ =\
"""Synchronize updating contents on memory of a B+ tree database
object.
bdb -- specifies the B+ tree database object connected as a writer.
phys -- specifies whether to synchronize physically.
If successful, the return value is true, else, it is false.
"""
bdb_cmpfunc = cfunc('tcbdbcmpfunc', libtc, TCCMP,
('bdb', c_void_p, 1))
bdb_cmpfunc.__doc__ =\
"""Get the comparison function of a B+ tree database object.
bdb -- specifies the B+ tree database object.
The return value is the pointer to the comparison function.
"""
bdb_cmpop = cfunc('tcbdbcmpop', libtc, c_void_p,
('bdb', c_void_p, 1))
bdb_cmpop.__doc__ =\
"""Get the opaque object for the comparison function of a B+ tree
database object.
bdb -- specifies the B+ tree database object.
The return value is the opaque object for the comparison function.
"""
bdb_lmemb = cfunc('tcbdblmemb', libtc, c_uint32,
('bdb', c_void_p, 1))
bdb_lmemb.__doc__ =\
"""Get the maximum number of cached leaf nodes of a B+ tree database
object.
bdb -- specifies the B+ tree database object.
The return value is the maximum number of cached leaf nodes.
"""
bdb_lnum = cfunc('tcbdblnum', libtc, c_uint64,
('bdb', c_void_p, 1))
bdb_lnum.__doc__ =\
"""Get the number of the leaf nodes of B+ tree database object.
bdb -- specifies the B+ tree database object.
If successful, the return value is the number of the leaf nodes or 0
if the object does not connect to any database file.
"""
bdb_nnum = cfunc('tcbdbnnum', libtc, c_uint64,
('bdb', c_void_p, 1))
bdb_nnum.__doc__ =\
"""Get the number of the non-leaf nodes of B+ tree database object.
bdb -- specifies the B+ tree database object.
If successful, the return value is the number of the non-leaf nodes or
0 if the object does not connect to any database file.
"""
bdb_bnum = cfunc('tcbdbbnum', libtc, c_uint64,
('bdb', c_void_p, 1))
bdb_bnum.__doc__ =\
"""Get the number of elements of the bucket array of a B+ tree
database object.
bdb -- specifies the B+ tree database object.
The return value is the number of elements of the bucket array or 0 if
the object does not connect to any database file.
"""
bdb_align = cfunc('tcbdbalign', libtc, c_uint32,
('bdb', c_void_p, 1))
bdb_align.__doc__ =\
"""Get the record alignment of a B+ tree database object.
bdb -- specifies the B+ tree database object.
The return value is the record alignment or 0 if the object does not
connect to any database file.
"""
bdb_fbpmax = cfunc('tcbdbfbpmax', libtc, c_uint32,
('bdb', c_void_p, 1))
bdb_fbpmax.__doc__ =\
"""Get the maximum number of the free block pool of a B+ tree database
object.
bdb -- specifies the B+ tree database object.
The return value is the maximum number of the free block pool or 0 if
the object does not connect to any database file.
"""
bdb_inode = cfunc('tcbdbinode', libtc, c_uint64,
('bdb', c_void_p, 1))
bdb_inode.__doc__ =\
"""Get the inode number of the database file of a B+ tree database
object.
bdb -- specifies the B+ tree database object.
The return value is the inode number of the database file or 0 if the
object does not connect to any database file.
"""
bdb_mtime = cfunc('tcbdbmtime', libtc, c_time,
('bdb', c_void_p, 1))
bdb_mtime.__doc__ =\
"""Get the modification time of the database file of a B+ tree
database object.
bdb -- specifies the B+ tree database object.
The return value is the inode number of the database file or 0 if the
object does not connect to any database file.
"""
bdb_flags = cfunc('tcbdbflags', libtc, c_uint8,
('bdb', c_void_p, 1))
bdb_flags.__doc__ =\
"""Get the additional flags of a B+ tree database object.
bdb -- specifies the B+ tree database object.
The return value is the additional flags.
"""
bdb_opts = cfunc('tcbdbopts', libtc, c_uint8,
('bdb', c_void_p, 1))
bdb_opts.__doc__ =\
"""Get the options of a B+ tree database object.
bdb -- specifies the B+ tree database object.
The return value is the options.
"""
bdb_opaque = cfunc('tcbdbopaque', libtc, c_char_p,
('bdb', c_void_p, 1))
bdb_opaque.__doc__ =\
"""Get the pointer to the opaque field of a B+ tree database object.
bdb -- specifies the B+ tree database object.
The return value is the pointer to the opaque field whose size is 128
bytes.
"""
bdb_bnumused = cfunc('tcbdbbnumused', libtc, c_uint64,
('bdb', c_void_p, 1))
bdb_bnumused.__doc__ =\
"""Get the number of used elements of the bucket array of a B+ tree
database object.
bdb -- specifies the B+ tree database object.
The return value is the number of used elements of the bucket array or
0 if the object does not connect to any database file.
"""
bdb_setlsmax = cfunc('tcbdbsetlsmax', libtc, c_bool,
('bdb', c_void_p, 1),
('lsmax', c_uint32, 1, 0))
bdb_setlsmax.__doc__ =\
"""Set the maximum size of each leaf node.
bdb -- specifies the B+ tree database object which is not opened.
lsmax -- specifies the maximum size of each leaf node. If it is not
more than 0, the default value is specified. The default
value is 16386.
If successful, the return value is true, else, it is false.
Note that the tuning parameters of the database should be set before
the database is opened.
"""
bdb_setcapnum = cfunc('tcbdbsetcapnum', libtc, c_bool,
('bdb', c_void_p, 1),
('capnum', c_uint64, 1))
bdb_setcapnum.__doc__ =\
"""Set the capacity number of records.
bdb -- specifies the B+ tree database object which is not opened.
capnum -- specifies the capacity number of records. If it is not more
than 0, the capacity is unlimited.
If successful, the return value is true, else, it is false.
When the number of records exceeds the capacity, forehand records are
removed implicitly. Note that the tuning parameters of the database
should be set before the database is opened.
"""
bdb_setcodecfunc = cfunc('tcbdbsetcodecfunc', libtc, c_bool,
('bdb', c_void_p, 1),
('enc', TCCODEC, 1),
('encop', c_void_p, 1),
('dec', TCCODEC, 1),
('decop', c_void_p, 1))
bdb_setcodecfunc.__doc__ =\
"""Set the custom codec functions of a B+ tree database object.
bdb -- specifies the B+ tree database object.
enc -- specifies the pointer to the custom encoding function. It
receives four parameters. The first parameter is the pointer
to the region. The second parameter is the size of the
region. The third parameter is the pointer to the variable
into which the size of the region of the return value is
assigned. The fourth parameter is the pointer to the
optional opaque object. It returns the pointer to the result
object allocated with 'malloc' call if successful, else, it
returns 'NULL'.
encop -- specifies an arbitrary pointer to be given as a parameter of
the encoding function. If it is not needed, 'NULL' can be
specified.
dec -- specifies the pointer to the custom decoding function.
decop -- specifies an arbitrary pointer to be given as a parameter of
the decoding function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
Note that the custom codec functions should be set before the database
is opened and should be set every time the database is being opened.
"""
bdb_dfunit = cfunc('tcbdbdfunit', libtc, c_uint32,
('bdb', c_void_p, 1))
bdb_dfunit.__doc__ =\
"""Get the unit step number of auto defragmentation of a B+ tree
database object.
bdb -- specifies the B+ tree database object.
The return value is the unit step number of auto defragmentation.
"""
bdb_defrag = cfunc('tcbdbdefrag', libtc, c_bool,
('bdb', c_void_p, 1),
('step', c_int64, 1, 0))
bdb_defrag.__doc__ =\
"""Perform dynamic defragmentation of a B+ tree database object.
bdb -- specifies the B+ tree database object connected as a writer.
step -- specifie the number of steps. If it is not more than 0, the
whole file is defragmented gradually without keeping a
continuous lock.
If successful, the return value is true, else, it is false.
"""
bdb_cacheclear = cfunc('tcbdbcacheclear', libtc, c_bool,
('bdb', c_void_p, 1))
bdb_cacheclear.__doc__ =\
"""Clear the cache of a B+ tree database object.
bdb -- specifies the B+ tree database object.
If successful, the return value is true, else, it is false.
"""
bdb_putdupback = cfunc('tcbdbputdupback', libtc, c_bool,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
bdb_putdupback.__doc__ =\
"""Store a new record into a B+ tree database object with backward
duplication.
bdb -- specifies the B+ tree database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, the new record
is placed after the existing one.
"""
bdb_putdupback2 = cfunc_fast('tcbdbputdupback2', libtc, c_bool,
('bdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
bdb_putdupback2.__doc__ =\
"""Store a new string record into a B+ tree database object with
backward duplication.
bdb -- specifies the B+ tree database object connected as a writer.
kstr -- specifies the string of the key.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, the new record
is placed after the existing one.
"""
bdb_putproc = cfunc('tcbdbputproc', libtc, c_bool,
('bdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1),
('proc', TCPDPROC, 1),
('op', c_void_p, 1))
bdb_putproc.__doc__ =\
"""Store a record into a B+ tree database object with a duplication
handler.
bdb -- specifies the B+ tree database object connected as a writer.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value. 'NULL'
means that record addition is ommited if there is no
corresponding record.
vsiz -- specifies the size of the region of the value.
proc -- specifies the pointer to the callback function to process
duplication. It receives four parameters. The first
parameter is the pointer to the region of the value. The
second parameter is the size of the region of the value. The
third parameter is the pointer to the variable into which the
size of the region of the return value is assigned. The
fourth parameter is the pointer to the optional opaque object.
It returns the pointer to the result object allocated with
'malloc'. It is released by the caller. If it is 'NULL', the
record is not modified. If it is '(void *)-1', the record is
removed.
op -- specifies an arbitrary pointer to be given as a parameter of
the callback function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
Note that the callback function can not perform any database operation
because the function is called in the critical section guarded by the
same locks of database operations.
"""
bdb_curjumpback = cfunc('tcbdbcurjumpback', libtc, c_bool,
('cur', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
bdb_curjumpback.__doc__ =\
"""Move a cursor object to the rear of records corresponding a key.
cur -- specifies the cursor object.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
If successful, the return value is true, else, it is false. False is
returned if there is no record corresponding the condition.
The cursor is set to the last record corresponding the key or the
previous substitute if completely matching record does not exist.
"""
bdb_curjumpback2 = cfunc_fast('tcbdbcurjumpback2', libtc, c_bool,
('cur', c_void_p, 1),
('kstr', c_char_p, 1))
bdb_curjumpback2.__doc__ =\
"""Move a cursor object to the rear of records corresponding a key
string.
cur -- specifies the cursor object.
kstr -- specifies the string of the key.
If successful, the return value is true, else, it is false. False is
returned if there is no record corresponding the condition.
The cursor is set to the last record corresponding the key or the
previous substitute if completely matching record does not exist.
"""
bdb_foreach = cfunc('tcbdbforeach', libtc, c_bool,
('bdb', c_void_p, 1),
('iter', TCITER, 1),
('op', c_char_p, 1))
bdb_foreach.__doc__ =\
"""Process each record atomically of a B+ tree database object.
bdb -- specifies the B+ tree database object.
iter -- specifies the pointer to the iterator function called for each
record. It receives five parameters. The first parameter is
the pointer to the region of the key. The second parameter is
the size of the region of the key. The third parameter is the
pointer to the region of the value. The fourth parameter is
the size of the region of the value. The fifth parameter is
the pointer to the optional opaque object. It returns true to
continue iteration or false to stop iteration.
op -- specifies an arbitrary pointer to be given as a parameter of
the iterator function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
Note that the callback function can not perform any database operation
because the function is called in the critical section guarded by the
same locks of database operations.
"""
#
# Functions from tcfdb.h
#
fdb_errmsg = cfunc('tcfdberrmsg', libtc, c_char_p,
('ecode', c_int, 1))
fdb_errmsg.__doc__ =\
"""Get the message string corresponding to an error code.
ecode -- specifies the error code.
The return value is the message string of the error code.
"""
fdb_new = cfunc('tcfdbnew', libtc, c_void_p)
fdb_new.__doc__ =\
"""Create a fixed-length database object.
The return value is the new fixed-length database object.
"""
fdb_del = cfunc('tcfdbdel', libtc, None,
('fdb', c_void_p, 1))
fdb_del.__doc__ =\
"""Delete a fixed-length database object.
fdb -- specifies the fixed-length database object.
If the database is not closed, it is closed implicitly. Note that the
deleted object and its derivatives can not be used anymore.
"""
fdb_ecode = cfunc('tcfdbecode', libtc, c_int,
('fdb', c_void_p, 1))
fdb_ecode.__doc__ =\
""" Get the last happened error code of a fixed-length database
object.
fdb -- specifies the fixed-length database object.
The return value is the last happened error code.
The following error codes are defined: 'ESUCCESS' for success,
'ETHREAD' for threading error, 'EINVALID' for invalid operation,
'ENOFILE' for file not found, 'ENOPERM' for no permission, 'EMETA' for
invalid meta data, 'ERHEAD' for invalid record header, 'EOPEN' for
open error, 'ECLOSE' for close error, 'ETRUNC' for trunc error,
'ESYNC' for sync error, 'ESTAT' for stat error, 'ESEEK' for seek
error, 'EREAD' for read error, 'EWRITE' for write error, 'EMMAP' for
mmap error, 'ELOCK' for lock error, 'EUNLINK' for unlink error,
'ERENAME' for rename error, 'EMKDIR' for mkdir error, 'ERMDIR' for
rmdir error, 'EKEEP' for existing record, 'ENOREC' for no record
found, and 'EMISC' for miscellaneous error.
"""
fdb_setmutex = cfunc('tcfdbsetmutex', libtc, c_bool,
('fdb', c_void_p, 1))
fdb_setmutex.__doc__ =\
"""Set mutual exclusion control of a fixed-length database object for
threading.
fdb -- specifies the fixed-length database object which is not opened.
If successful, the return value is true, else, it is false.
Note that the mutual exclusion control is needed if the object is
shared by plural threads and this function should be called before the
database is opened.
"""
fdb_tune = cfunc('tcfdbtune', libtc, c_bool,
('fdb', c_void_p, 1),
('width', c_int32, 1, 0),
('limsiz', c_int64, 1, 0))
fdb_tune.__doc__ =\
"""Set the tuning parameters of a fixed-length database object.
fdb -- specifies the fixed-length database object which is not
opened.
width -- specifies the width of the value of each record. If it is
not more than 0, the default value is specified. The
default value is 255.
limsiz -- specifies the limit size of the database file. If it is not
more than 0, the default value is specified. The default
value is 268435456.
If successful, the return value is true, else, it is false.
Note that the tuning parameters should be set before the database is
opened.
"""
fdb_open = cfunc('tcfdbopen', libtc, c_bool,
('fdb', c_void_p, 1),
('path', c_char_p, 1),
('omode', c_int, 1))
fdb_open.__doc__ =\
"""Open a database file and connect a fixed-length database object.
fdb -- specifies the fixed-length database object which is not
opened.
path -- specifies the path of the database file.
omode -- specifies the connection mode: 'OWRITER' as a writer,
'OREADER' as a reader. If the mode is 'OWRITER', the
following may be added by bitwise-or: 'OCREAT', which means
it creates a new database if not exist, 'OTRUNC', which means
it creates a new database regardless if one exists, 'OTSYNC',
which means every transaction synchronizes updated contents
with the device. Both of 'OREADER' and 'OWRITER' can be
added to by bitwise-or: 'ONOLCK', which means it opens the
database file without file locking, or 'OLCKNB', which means
locking is performed without blocking.
If successful, the return value is true, else, it is false.
"""
fdb_close = cfunc('tcfdbclose', libtc, c_bool,
('fdb', c_void_p, 1))
fdb_close.__doc__ =\
"""Close a fixed-length database object.
fdb -- specifies the fixed-length database object.
If successful, the return value is true, else, it is false.
Update of a database is assured to be written when the database is
closed. If a writer opens a database but does not close it
appropriately, the database will be broken.
"""
fdb_put = cfunc('tcfdbput', libtc, c_bool,
('fdb', c_void_p, 1),
('id', c_int64, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
fdb_put.__doc__ =\
"""Store a record into a fixed-length database object.
fdb -- specifies the fixed-length database object connected as a
writer.
id -- specifies the ID number. It should be more than 0. If it is
'IDMIN', the minimum ID number of existing records is
specified. If it is 'IDPREV', the number less by one than the
minimum ID number of existing records is specified. If it is
'IDMAX', the maximum ID number of existing records is
specified. If it is 'IDNEXT', the number greater by one than
the maximum ID number of existing records is specified.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value. If the size of
the value is greater than the width tuning parameter of the
database, the size is cut down to the width.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten.
"""
fdb_put2 = cfunc('tcfdbput2', libtc, c_bool,
('fdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
fdb_put2.__doc__ =\
"""Store a record with a decimal key into a fixed-length database
object.
fdb -- specifies the fixed-length database object connected as a
writer.
kbuf -- specifies the pointer to the region of the decimal key. It
should be more than 0. If it is "min", the minimum ID number
of existing records is specified. If it is "prev", the number
less by one than the minimum ID number of existing records is
specified. If it is "max", the maximum ID number of existing
records is specified. If it is "next", the number greater by
one than the maximum ID number of existing records is
specified.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value. If the size of
the value is greater than the width tuning parameter of the
database, the size is cut down to the width.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten.
"""
fdb_put3 = cfunc_fast('tcfdbput3', libtc, c_bool,
('fdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
fdb_put3.__doc__ =\
"""Store a string record with a decimal key into a fixed-length
database object.
fdb -- specifies the fixed-length database object connected as a
writer.
kstr -- specifies the string of the decimal key. It should be more
than 0. If it is "min", the minimum ID number of existing
records is specified. If it is "prev", the number less by one
than the minimum ID number of existing records is specified.
If it is "max", the maximum ID number of existing records is
specified. If it is "next", the number greater by one than
the maximum ID number of existing records is specified.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten.
"""
fdb_putkeep = cfunc('tcfdbputkeep', libtc, c_bool,
('fdb', c_void_p, 1),
('id', c_int64, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
fdb_putkeep.__doc__ =\
"""Store a new record into a fixed-length database object.
fdb -- specifies the fixed-length database object connected as a
writer.
id -- specifies the ID number. It should be more than 0. If it is
'IDMIN', the minimum ID number of existing records is
specified. If it is 'IDPREV', the number less by one than the
minimum ID number of existing records is specified. If it is
'IDMAX', the maximum ID number of existing records is
specified. If it is 'IDNEXT', the number greater by one than
the maximum ID number of existing records is specified.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value. If the size of
the value is greater than the width tuning parameter of the
database, the size is cut down to the width.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
fdb_putkeep2 = cfunc('tcfdbputkeep2', libtc, c_bool,
('fdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
fdb_putkeep2.__doc__ =\
"""Store a new record with a decimal key into a fixed-length database
object.
fdb -- specifies the fixed-length database object connected as a
writer.
kbuf -- specifies the pointer to the region of the decimal key. It
should be more than 0. If it is "min", the minimum ID number
of existing records is specified. If it is "prev", the number
less by one than the minimum ID number of existing records is
specified. If it is "max", the maximum ID number of existing
records is specified. If it is "next", the number greater by
one than the maximum ID number of existing records is
specified.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value. If the size of
the value is greater than the width tuning parameter of the
database, the size is cut down to the width.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
fdb_putkeep3 = cfunc_fast('tcfdbputkeep3', libtc, c_bool,
('fdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
fdb_putkeep3.__doc__ =\
"""Store a new string record with a decimal key into a fixed-length
database object.
fdb -- specifies the fixed-length database object connected as a
writer.
kstr -- specifies the string of the decimal key. It should be more
than 0. If it is "min", the minimum ID number of existing
records is specified. If it is "prev", the number less by one
than the minimum ID number of existing records is specified.
If it is "max", the maximum ID number of existing records is
specified. If it is "next", the number greater by one than
the maximum ID number of existing records is specified.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
fdb_putcat = cfunc('tcfdbputcat', libtc, c_bool,
('fdb', c_void_p, 1),
('id', c_int64, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
fdb_putcat.__doc__ =\
"""Concatenate a value at the end of the existing record in a
fixed-length database object.
fdb -- specifies the fixed-length database object connected as a
writer.
id -- specifies the ID number. It should be more than 0. If it is
'IDMIN', the minimum ID number of existing records is
specified. If it is 'IDPREV', the number less by one than the
minimum ID number of existing records is specified. If it is
'IDMAX', the maximum ID number of existing records is
specified. If it is 'IDNEXT', the number greater by one than
the maximum ID number of existing records is specified.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value. If the size of
the value is greater than the width tuning parameter of the
database, the size is cut down to the width.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
fdb_putcat2 = cfunc('tcfdbputcat2', libtc, c_bool,
('fdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1))
fdb_putcat2.__doc__ =\
"""Concatenate a value with a decimal key in a fixed-length database
object.
fdb -- specifies the fixed-length database object connected as a
writer.
kbuf -- specifies the pointer to the region of the decimal key. It
should be more than 0. If it is "min", the minimum ID number
of existing records is specified. If it is "prev", the number
less by one than the minimum ID number of existing records is
specified. If it is "max", the maximum ID number of existing
records is specified. If it is "next", the number greater by
one than the maximum ID number of existing records is
specified.
ksiz -- specifies the size of the region of the key.
vbuf -- specifies the pointer to the region of the value.
vsiz -- specifies the size of the region of the value. If the size of
the value is greater than the width tuning parameter of the
database, the size is cut down to the width.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
fdb_putcat3 = cfunc_fast('tcfdbputcat3', libtc, c_bool,
('fdb', c_void_p, 1),
('kstr', c_char_p, 1),
('vstr', c_char_p, 1))
fdb_putcat3.__doc__ =\
"""Concatenate a string value with a decimal key in a fixed-length
database object.
fdb -- specifies the fixed-length database object connected as a
writer.
kstr -- specifies the string of the decimal key. It should be more
than 0. If it is "min", the minimum ID number of existing
records is specified. If it is "prev", the number less by one
than the minimum ID number of existing records is specified.
If it is "max", the maximum ID number of existing records is
specified. If it is "next", the number greater by one than
the maximum ID number of existing records is specified.
vstr -- specifies the string of the value.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
fdb_out = cfunc('tcfdbout', libtc, c_bool,
('fdb', c_void_p, 1),
('id', c_int64, 1))
fdb_out.__doc__ =\
"""Remove a record of a fixed-length database object.
fdb -- specifies the fixed-length database object connected as a
writer.
id -- specifies the ID number. It should be more than 0. If it is
'IDMIN', the minimum ID number of existing records is
specified. If it is 'IDMAX', the maximum ID number of existing
records is specified.
If successful, the return value is true, else, it is false.
"""
fdb_out2 = cfunc('tcfdbout2', libtc, c_bool,
('fdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
fdb_out2.__doc__ =\
"""Remove a record with a decimal key of a fixed-length database
object.
fdb -- specifies the fixed-length database object connected as a
writer.
kbuf -- specifies the pointer to the region of the decimal key. It
should be more than 0. If it is "min", the minimum ID number
of existing records is specified. If it is "max", the maximum
ID number of existing records is specified.
ksiz -- specifies the size of the region of the key.
If successful, the return value is true, else, it is false.
"""
fdb_out3 = cfunc_fast('tcfdbout3', libtc, c_bool,
('fdb', c_void_p, 1),
('kstr', c_char_p, 1))
fdb_out3.__doc__ =\
"""Remove a string record with a decimal key of a fixed-length
database object.
fdb -- specifies the fixed-length database object connected as a
writer.
kstr -- specifies the string of the decimal key. It should be more
than 0. If it is "min", the minimum ID number of existing
records is specified. If it is "max", the maximum ID number
of existing records is specified.
If successful, the return value is true, else, it is false.
"""
fdb_get = cfunc('tcfdbget', libtc, tc_void_p,
('fdb', c_void_p, 1),
('id', c_int64, 1),
('sp', c_int_p, 2))
fdb_get.errcheck = lambda result, func, arguments : (result, arguments[2])
fdb_get.__doc__ =\
"""Retrieve a record in a fixed-length database object.
fdb -- specifies the fixed-length database object.
id -- specifies the ID number. It should be more than 0. If it is
'IDMIN', the minimum ID number of existing records is
specified. If it is 'IDMAX', the maximum ID number of existing
records is specified.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value of the corresponding record. 'NULL' is returned if no record
corresponds.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use.
"""
fdb_get2 = cfunc('tcfdbget2', libtc, tc_void_p,
('fdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1),
('sp', c_int_p, 2))
fdb_get2.errcheck = lambda result, func, arguments : (result, arguments[3])
fdb_get2.__doc__ =\
"""Retrieve a record with a decimal key in a fixed-length database
object.
fdb -- specifies the fixed-length database object.
kbuf -- specifies the pointer to the region of the decimal key. It
should be more than 0. If it is "min", the minimum ID number
of existing records is specified. If it is "max", the maximum
ID number of existing records is specified.
ksiz -- specifies the size of the region of the key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value of the corresponding record. 'NULL' is returned if no record
corresponds.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use.
"""
fdb_get3 = cfunc_fast('tcfdbget3', libtc, tc_char_p,
('fdb', c_void_p, 1),
('kstr', c_char_p, 1))
fdb_get3.__doc__ =\
"""Retrieve a string record with a decimal key in a fixed-length
database object.
fdb -- specifies the fixed-length database object.
kstr -- specifies the string of the decimal key. It should be more
than 0. If it is "min", the minimum ID number of existing
records is specified. If it is "max", the maximum ID number
of existing records is specified.
If successful, the return value is the string of the value of the
corresponding record. 'NULL' is returned if no record corresponds.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use.
"""
fdb_get4 = cfunc('tcfdbget4', libtc, c_int,
('fdb', c_void_p, 1),
('id', c_int64, 1),
('vbuf', c_void_p, 1),
('max', c_int, 1))
fdb_get4.__doc__ =\
"""Retrieve a record in a fixed-length database object and write the
value into a buffer.
fdb -- specifies the fixed-length database object.
id -- specifies the ID number. It should be more than 0. If it is
'IDMIN', the minimum ID number of existing records is
specified. If it is 'IDMAX', the maximum ID number of
existing records is specified.
vbuf -- specifies the pointer to the buffer into which the value of
the corresponding record is written.
max -- specifies the size of the buffer.
If successful, the return value is the size of the written data, else,
it is -1. -1 is returned if no record corresponds to the specified
key.
Note that an additional zero code is not appended at the end of the
region of the writing buffer.
"""
fdb_vsiz = cfunc('tcfdbvsiz', libtc, c_int,
('fdb', c_void_p, 1),
('id', c_int64, 1))
fdb_vsiz.__doc__ =\
"""Get the size of the value of a record in a fixed-length database
object.
fdb -- specifies the fixed-length database object.
id -- specifies the ID number. It should be more than 0. If it is
'IDMIN', the minimum ID number of existing records is specified.
If it is 'IDMAX', the maximum ID number of existing records is
specified.
If successful, the return value is the size of the value of the
corresponding record, else, it is -1.
"""
fdb_vsiz2 = cfunc('tcfdbvsiz2', libtc, c_int,
('fdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
fdb_vsiz2.__doc__ =\
"""Get the size of the value with a decimal key in a fixed-length
database object.
fdb -- specifies the fixed-length database object.
kbuf -- specifies the pointer to the region of the decimal key. It
should be more than 0. If it is "min", the minimum ID number
of existing records is specified. If it is "max", the maximum
ID number of existing records is specified.
ksiz -- specifies the size of the region of the key.
If successful, the return value is the size of the value of the
corresponding record, else, it is -1.
"""
fdb_vsiz3 = cfunc_fast('tcfdbvsiz3', libtc, c_int,
('fdb', c_void_p, 1),
('kstr', c_char_p, 1))
fdb_vsiz3.__doc__ =\
"""Get the size of the string value with a decimal key in a
fixed-length database object.
fdb -- specifies the fixed-length database object.
kstr -- specifies the string of the decimal key. It should be more
than 0. If it is "min", the minimum ID number of existing
records is specified. If it is "max", the maximum ID number
of existing records is specified.
If successful, the return value is the size of the value of the
corresponding record, else, it is -1.
"""
fdb_iterinit = cfunc('tcfdbiterinit', libtc, c_bool,
('fdb', c_void_p, 1))
fdb_iterinit.__doc__ =\
"""Initialize the iterator of a fixed-length database object.
fdb -- specifies the fixed-length database object.
If successful, the return value is true, else, it is false.
The iterator is used in order to access the key of every record stored
in a database.
"""
fdb_iternext = cfunc('tcfdbiternext', libtc, c_uint64,
('fdb', c_void_p, 1))
fdb_iternext.__doc__ =\
"""Get the next ID number of the iterator of a fixed-length database
object.
fdb -- specifies the fixed-length database object.
If successful, the return value is the next ID number of the iterator,
else, it is 0. 0 is returned when no record is to be get out of the
iterator.
It is possible to access every record by iteration of calling this
function. It is allowed to update or remove records whose keys are
fetched while the iteration. The order of this traversal access
method is ascending of the ID number.
"""
fdb_iternext2 = cfunc('tcfdbiternext2', libtc, tc_void_p,
('fdb', c_void_p, 1),
('sp', c_int_p, 2))
fdb_iternext2.errcheck = lambda result, func, arguments : (result, arguments[1])
fdb_iternext2.__doc__ =\
"""Get the next decimay key of the iterator of a fixed-length database
object.
fdb -- specifies the fixed-length database object.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
next decimal key, else, it is 'NULL'. 'NULL' is returned when no
record is to be get out of the iterator.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use. It is possible to access every record by iteration
of calling this function. It is allowed to update or remove records
whose keys are fetched while the iteration. The order of this
traversal access method is ascending of the ID number.
"""
fdb_iternext3 = cfunc_fast('tcfdbiternext3', libtc, tc_char_p,
('fdb', c_void_p, 1))
fdb_iternext3.__doc__ =\
"""Get the next decimal key string of the iterator of a fixed-length
database object.
fdb -- specifies the fixed-length database object.
If successful, the return value is the string of the next decimal key,
else, it is 'NULL'. 'NULL' is returned when no record is to be get
out of the iterator.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use. It is possible to access every record by iteration of calling
this function. It is allowed to update or remove records whose keys
are fetched while the iteration. The order of this traversal access
method is ascending of the ID number.
"""
fdb_range = cfunc('tcfdbrange', libtc, tc_void_p,
('fdb', c_void_p, 1),
('lower', c_int64, 1),
('upper', c_int64, 1),
('max', c_int, 1, -1),
('np', c_int_p, 2))
fdb_range.errcheck = lambda result, func, arguments :\
py_list(result, arguments[4], c_uint64)
fdb_range.__doc__ =\
"""Get range matching ID numbers in a fixed-length database object.
fdb -- specifies the fixed-length database object.
lower -- specifies the lower limit of the range. If it is 'IDMIN',
the minimum ID is specified.
upper -- specifies the upper limit of the range. If it is 'IDMAX',
the maximum ID is specified.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
np -- specifies the pointer to the variable into which the number
of elements of the return value is assigned.
If successful, the return value is the pointer to an array of ID
numbers of the corresponding records. 'NULL' is returned on failure.
This function does never fail. It returns an empty array even if no
key corresponds.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
fdb_range2 = cfunc('tcfdbrange2', libtc, TCLIST_P,
('fdb', c_void_p, 1),
('lbuf', c_void_p, 1),
('lsiz', c_int, 1),
('ubuf', c_void_p, 1),
('usiz', c_int, 1),
('max', c_int, 1, -1))
fdb_range2.__doc__ =\
"""Get range matching decimal keys in a fixed-length database object.
fdb -- specifies the fixed-length database object.
lbuf -- specifies the pointer to the region of the lower key. If it
is "min", the minimum ID number of existing records is
specified.
lsiz -- specifies the size of the region of the lower key.
ubuf -- specifies the pointer to the region of the upper key. If it
is "max", the maximum ID number of existing records is
specified.
usiz -- specifies the size of the region of the upper key.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding decimal keys.
This function does never fail. It returns an empty list even if no
key corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use. Note that this function may be very slow
because every key in the database is scanned.
"""
fdb_range3 = cfunc_fast('tcfdbrange3', libtc, TCLIST_P,
('fdb', c_void_p, 1),
('lstr', c_char_p, 1),
('ustr', c_char_p, 1),
('max', c_int, 1, -1))
fdb_range3.__doc__ =\
"""Get range matching decimal keys with strings in a fixed-length
database object.
fdb -- specifies the fixed-length database object.
lstr -- specifies the string of the lower key. If it is "min", the
minimum ID number of existing records is specified.
ustr -- specifies the string of the upper key. If it is "max", the
maximum ID number of existing records is specified.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding decimal keys.
This function does never fail. It returns an empty list even if no
key corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use. Note that this function may be very slow
because every key in the database is scanned.
"""
fdb_range4 = cfunc('tcfdbrange4', libtc, TCLIST_P,
('fdb', c_void_p, 1),
('ibuf', c_void_p, 1),
('isiz', c_int, 1),
('max', c_int, 1, -1))
fdb_range4.__doc__ =\
"""Get keys with an interval notation in a fixed-length database
object.
fdb -- specifies the fixed-length database object.
ibuf -- specifies the pointer to the region of the interval notation.
isiz -- specifies the size of the region of the interval notation.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding decimal keys.
This function does never fail. It returns an empty list even if no
key corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use. Note that this function may be very slow
because every key in the database is scanned.
"""
fdb_range5 = cfunc('tcfdbrange5', libtc, TCLIST_P,
('fdb', c_void_p, 1),
('istr', c_char_p, 1),
('max', c_int, 1, -1))
fdb_range5.__doc__ =\
"""Get keys with an interval notation string in a fixed-length
database object.
fdb -- specifies the fixed-length database object.
istr -- specifies the pointer to the region of the interval notation
string.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding decimal keys.
This function does never fail. It returns an empty list even if no
key corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use. Note that this function may be very slow
because every key in the database is scanned.
"""
fdb_addint = cfunc('tcfdbaddint', libtc, c_int,
('fdb', c_void_p, 1),
('id', c_int64, 1),
('num', c_int, 1))
fdb_addint.__doc__ =\
"""Add an integer to a record in a fixed-length database object.
fdb -- specifies the fixed-length database object connected as a
writer.
id' -- specifies the ID number. It should be more than 0. If it is
'IDMIN', the minimum ID number of existing records is
specified. If it is 'IDPREV', the number less by one than the
minimum ID number of existing records is specified. If it is
'IDMAX', the maximum ID number of existing records is
specified. If it is 'IDNEXT', the number greater by one than
the maximum ID number of existing records is specified.
num -- specifies the additional value.
If successful, the return value is the summation value, else, it is
'INT_MIN'.
If the corresponding record exists, the value is treated as an integer
and is added to. If no record corresponds, a new record of the
additional value is stored.
"""
fdb_adddouble = cfunc('tcfdbadddouble', libtc, c_double,
('fdb', c_void_p, 1),
('id', c_int64, 1),
('num', c_double, 1))
fdb_adddouble.__doc__ =\
"""Add a real number to a record in a fixed-length database object.
fdb -- specifies the fixed-length database object connected as a
writer.
id -- specifies the ID number. It should be more than 0. If it is
'IDMIN', the minimum ID number of existing records is
specified. If it is 'IDPREV', the number less by one than the
minimum ID number of existing records is specified. If it is
'IDMAX', the maximum ID number of existing records is
specified. If it is 'IDNEXT', the number greater by one than
the maximum ID number of existing records is specified.
num -- specifies the additional value.
If successful, the return value is the summation value, else, it is
Not-a-Number.
If the corresponding record exists, the value is treated as a real
number and is added to. If no record corresponds, a new record of the
additional value is stored.
"""
fdb_sync = cfunc('tcfdbsync', libtc, c_bool,
('fdb', c_void_p, 1))
fdb_sync.__doc__ =\
"""Synchronize updated contents of a fixed-length database object with
the file and the device.
fdb -- specifies the fixed-length database object connected as a
writer.
If successful, the return value is true, else, it is false.
This function is useful when another process connects to the same
database file.
"""
fdb_optimize = cfunc('tcfdboptimize', libtc, c_bool,
('fdb', c_void_p, 1),
('width', c_int32, 1, 0),
('limsiz', c_int64, 1, 0))
fdb_optimize.__doc__ =\
"""Optimize the file of a fixed-length database object.
fdb -- specifies the fixed-length database object connected as a
writer.
width -- specifies the width of the value of each record. If it is
not more than 0, the current setting is not changed.
limsiz -- specifies the limit size of the database file. If it is not
more than 0, the current setting is not changed.
If successful, the return value is true, else, it is false.
"""
fdb_vanish = cfunc('tcfdbvanish', libtc, c_bool,
('fdb', c_void_p, 1))
fdb_vanish.__doc__ =\
"""Remove all records of a fixed-length database object.
fdb -- specifies the fixed-length database object connected as a
writer.
If successful, the return value is true, else, it is false.
"""
fdb_copy = cfunc('tcfdbcopy', libtc, c_bool,
('fdb', c_void_p, 1),
('path', c_char_p, 1))
fdb_copy.__doc__ =\
"""Copy the database file of a fixed-length database object.
fdb -- specifies the fixed-length database object.
path -- specifies the path of the destination file. If it begins with
'@', the trailing substring is executed as a command line.
If successful, the return value is true, else, it is false. False is
returned if the executed command returns non-zero code.
The database file is assured to be kept synchronized and not modified
while the copying or executing operation is in progress. So, this
function is useful to create a backup file of the database file.
"""
fdb_tranbegin = cfunc('tcfdbtranbegin', libtc, c_bool,
('fdb', c_void_p, 1))
fdb_tranbegin.__doc__ =\
"""Begin the transaction of a fixed-length database object.
fdb -- specifies the fixed-length database object connected as a
writer.
If successful, the return value is true, else, it is false.
The database is locked by the thread while the transaction so that
only one transaction can be activated with a database object at the
same time. Thus, the serializable isolation level is assumed if every
database operation is performed in the transaction. All updated
regions are kept track of by write ahead logging while the
transaction. If the database is closed during transaction, the
transaction is aborted implicitly.
"""
fdb_trancommit = cfunc('tcfdbtrancommit', libtc, c_bool,
('fdb', c_void_p, 1))
fdb_trancommit.__doc__ =\
"""Commit the transaction of a fixed-length database object.
fdb -- specifies the fixed-length database object connected as a
writer.
If successful, the return value is true, else, it is false.
Update in the transaction is fixed when it is committed successfully.
"""
fdb_tranabort = cfunc('tcfdbtranabort', libtc, c_bool,
('fdb', c_void_p, 1))
fdb_tranabort.__doc__ =\
"""Abort the transaction of a fixed-length database object.
fdb -- specifies the fixed-length database object connected as a
writer.
If successful, the return value is true, else, it is false.
Update in the transaction is discarded when it is aborted. The state
of the database is rollbacked to before transaction.
"""
fdb_path = cfunc('tcfdbpath', libtc, c_char_p,
('fdb', c_void_p, 1))
fdb_path.__doc__ =\
"""Get the file path of a fixed-length database object.
fdb -- specifies the fixed-length database object.
The return value is the path of the database file or 'NULL' if the
object does not connect to any database file.
"""
fdb_rnum = cfunc('tcfdbrnum', libtc, c_uint64,
('fdb', c_void_p, 1))
fdb_rnum.__doc__ =\
"""Get the number of records of a fixed-length database object.
fdb specifies the fixed-length database object.
The return value is the number of records or 0 if the object does not
connect to any database file.
"""
fdb_fsiz = cfunc('tcfdbfsiz', libtc, c_uint64,
('fdb', c_void_p, 1))
fdb_fsiz.__doc__ =\
"""Get the size of the database file of a fixed-length database
object.
fdb -- specifies the fixed-length database object.
The return value is the size of the database file or 0 if the object
does not connect to any database file.
"""
# features for experts
fdb_setecode = cfunc('tcfdbsetecode', libtc, None,
('fdb', c_void_p, 1),
('ecode', c_int, 1),
('filename', c_char_p, 1),
('line', c_int, 1),
('func', c_char_p, 1))
fdb_setecode.__doc__ =\
"""Set the error code of a fixed-length database object.
fdb -- specifies the fixed-length database object.
ecode -- specifies the error code.
file -- specifies the file name of the code.
line -- specifies the line number of the code.
func -- specifies the function name of the code.
"""
fdb_setdbgfd = cfunc('tcfdbsetdbgfd', libtc, None,
('fdb', c_void_p, 1),
('fd', c_int, 1))
fdb_setdbgfd.__doc__ =\
"""Set the file descriptor for debugging output.
fdb -- specifies the fixed-length database object.
fd -- specifies the file descriptor for debugging output.
"""
fdb_dbgfd = cfunc('tcfdbdbgfd', libtc, c_int,
('fdb', c_void_p, 1))
fdb_dbgfd.__doc__ =\
"""Get the file descriptor for debugging output.
fdb -- specifies the fixed-length database object.
The return value is the file descriptor for debugging output.
"""
fdb_hasmutex = cfunc('tcfdbhasmutex', libtc, c_bool,
('fdb', c_void_p, 1))
fdb_hasmutex.__doc__ =\
"""Check whether mutual exclusion control is set to a fixed-length
database object.
fdb -- specifies the fixed-length database object.
If mutual exclusion control is set, it is true, else it is false.
"""
fdb_memsync = cfunc('tcfdbmemsync', libtc, c_bool,
('fdb', c_void_p, 1),
('phys', c_bool, 1))
fdb_memsync.__doc__ =\
"""Synchronize updating contents on memory of a fixed-length database
object.
fdb -- specifies the fixed-length database object connected as a
writer.
phys -- specifies whether to synchronize physically.
If successful, the return value is true, else, it is false.
"""
fdb_min = cfunc('tcfdbmin', libtc, c_uint64,
('fdb', c_void_p, 1))
fdb_min.__doc__ =\
"""Get the minimum ID number of records of a fixed-length database
object.
fdb -- specifies the fixed-length database object.
The return value is the minimum ID number of records or 0 if the
object does not connect to any database file.
"""
fdb_max = cfunc('tcfdbmax', libtc, c_uint64,
('fdb', c_void_p, 1))
fdb_max.__doc__ =\
"""Get the maximum ID number of records of a fixed-length database
object.
fdb -- specifies the fixed-length database object.
The return value is the maximum ID number of records or 0 if the
object does not connect to any database file.
"""
fdb_width = cfunc('tcfdbwidth', libtc, c_uint32,
('fdb', c_void_p, 1))
fdb_width.__doc__ =\
"""Get the width of the value of each record of a fixed-length
database object.
fdb -- specifies the fixed-length database object.
The return value is the width of the value of each record or 0 if the
object does not connect to any database file.
"""
fdb_limsiz = cfunc('tcfdblimsiz', libtc, c_uint64,
('fdb', c_void_p, 1))
fdb_limsiz.__doc__ =\
"""Get the limit file size of a fixed-length database object.
fdb -- specifies the fixed-length database object.
The return value is the limit file size or 0 if the object does not
connect to any database file.
"""
fdb_limid = cfunc('tcfdblimid', libtc, c_uint64,
('fdb', c_void_p, 1))
fdb_limid.__doc__ =\
"""Get the limit ID number of a fixed-length database object.
fdb -- specifies the fixed-length database object.
The return value is the limit ID number or 0 if the object does not
connect to any database file.
"""
fdb_inode = cfunc('tcfdbinode', libtc, c_uint64,
('fdb', c_void_p, 1))
fdb_inode.__doc__ =\
"""Get the inode number of the database file of a fixed-length
database object.
fdb -- specifies the fixed-length database object.
The return value is the inode number of the database file or 0 if the
object does not connect to any database file.
"""
fdb_mtime = cfunc('tcfdbmtime', libtc, c_time,
('fdb', c_void_p, 1))
fdb_mtime.__doc__ =\
"""Get the modification time of the database file of a fixed-length
database object.
fdb -- specifies the fixed-length database object.
The return value is the inode number of the database file or 0 if the
object does not connect to any database file.
"""
fdb_omode = cfunc('tcfdbomode', libtc, c_int,
('fdb', c_void_p, 1))
fdb_omode.__doc__ =\
"""Get the connection mode of a fixed-length database object.
fdb -- specifies the fixed-length database object.
The return value is the connection mode.
"""
fdb_type = cfunc('tcfdbtype', libtc, c_uint8,
('fdb', c_void_p, 1))
fdb_type.__doc__ =\
"""Get the database type of a fixed-length database object.
fdb -- specifies the fixed-length database object.
The return value is the database type.
"""
fdb_flags = cfunc('tcfdbflags', libtc, c_uint8,
('fdb', c_void_p, 1))
fdb_flags.__doc__ =\
"""Get the additional flags of a fixed-length database object.
fdb -- specifies the fixed-length database object.
The return value is the additional flags.
"""
fdb_opaque = cfunc('tcfdbopaque', libtc, c_char_p,
('fdb', c_void_p, 1))
fdb_opaque.__doc__ =\
"""Get the pointer to the opaque field of a fixed-length database
object.
fdb -- specifies the fixed-length database object.
The return value is the pointer to the opaque field whose size is 128
bytes.
"""
fdb_putproc = cfunc('tcfdbputproc', libtc, c_bool,
('fdb', c_void_p, 1),
('id', c_int64, 1),
('vbuf', c_void_p, 1),
('vsiz', c_int, 1),
('proc', TCPDPROC, 1),
('op', c_void_p, 1))
fdb_putproc.__doc__ =\
"""Store a record into a fixed-length database object with a
duplication handler.
fdb -- specifies the fixed-length database object connected as a
writer.
id -- specifies the ID number. It should be more than 0. If it is
'IDMIN', the minimum ID number of existing records is
specified. If it is 'IDPREV', the number less by one than the
minimum ID number of existing records is specified. If it is
'IDMAX', the maximum ID number of existing records is
specified. If it is 'IDNEXT', the number greater by one than
the maximum ID number of existing records is specified.
vbuf -- specifies the pointer to the region of the value. 'NULL'
means that record addition is ommited if there is no
corresponding record.
vsiz -- specifies the size of the region of the value. If the size of
the value is greater than the width tuning parameter of the
database, the size is cut down to the width.
proc -- specifies the pointer to the callback function to process
duplication. It receives four parameters. The first
parameter is the pointer to the region of the value. The
second parameter is the size of the region of the value. The
third parameter is the pointer to the variable into which the
size of the region of the return value is assigned. The
fourth parameter is the pointer to the optional opaque object.
It returns the pointer to the result object allocated with
'malloc'. It is released by the caller. If it is 'NULL', the
record is not modified. If it is '(void *)-1', the record is
removed.
op -- specifies an arbitrary pointer to be given as a parameter of
the callback function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
Note that the callback function can not perform any database operation
because the function is called in the critical section guarded by the
same locks of database operations.
"""
fdb_iterinit2 = cfunc('tcfdbiterinit2', libtc, c_bool,
('fdb', c_void_p, 1),
('id', c_int64, 1))
fdb_iterinit2.__doc__ =\
"""Move the iterator to the record corresponding a key of a
fixed-length database object.
fdb -- specifies the fixed-length database object.
id -- specifies the ID number. It should be more than 0. If it is
'IDMIN', the minimum ID number of existing records is
specified. If it is 'IDMAX', the maximum ID number of existing
records is specified.
If successful, the return value is true, else, it is false. False is
returned if there is no record corresponding the condition.
"""
fdb_iterinit3 = cfunc('tcfdbiterinit3', libtc, c_bool,
('fdb', c_void_p, 1),
('kbuf', c_void_p, 1),
('ksiz', c_int, 1))
fdb_iterinit3.__doc__ =\
"""Move the iterator to the decimal record of a fixed-length database
object.
fdb -- specifies the fixed-length database object.
kbuf -- specifies the pointer to the region of the decimal key. It
should be more than 0. If it is "min", the minimum ID number
of existing records is specified. If it is "max", the maximum
ID number of existing records is specified.
ksiz -- specifies the size of the region of the key.
If successful, the return value is true, else, it is false. False is
returned if there is no record corresponding the condition.
"""
fdb_iterinit4 = cfunc_fast('tcfdbiterinit4', libtc, c_bool,
('fdb', c_void_p, 1),
('kstr', c_char_p, 1))
fdb_iterinit4.__doc__ =\
"""Move the iterator to the decimal string record of a fixed-length
database object.
fdb -- specifies the fixed-length database object.
kstr -- specifies the string of the decimal key. It should be more
than 0. If it is "min", the minimum ID number of existing
records is specified. If it is "max", the maximum ID number
of existing records is specified.
If successful, the return value is true, else, it is false. False is
returned if there is no record corresponding the condition.
"""
fdb_foreach = cfunc('tcfdbforeach', libtc, c_bool,
('fdb', c_void_p, 1),
('iter', TCITER, 1),
('op', c_char_p, 1))
fdb_foreach.__doc__ =\
"""Process each record atomically of a fixed-length database object.
fdb -- specifies the fixed-length database object.
iter -- specifies the pointer to the iterator function called for each
record. It receives five parameters. The first parameter is
the pointer to the region of the key. The second parameter is
the size of the region of the key. The third parameter is the
pointer to the region of the value. The fourth parameter is
the size of the region of the value. The fifth parameter is
the pointer to the optional opaque object. It returns true to
continue iteration or false to stop iteration.
op -- specifies an arbitrary pointer to be given as a parameter of
the iterator function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
Note that the callback function can not perform any database operation
because the function is called in the critical section guarded by the
same locks of database operations.
"""
fdb_keytoid = cfunc('tcfdbkeytoid', libtc, c_int64,
('fdb', c_void_p, 1),
('kbuf', c_char_p, 1),
('ksiz', c_int, 1))
fdb_keytoid.__doc__ =\
"""Generate the ID number from arbitrary binary data.
kbuf -- specifies the pointer to the region of the key.
ksiz -- specifies the size of the region of the key.
The return value is the ID number.
"""
#
# Functions from tctdb.h
#
TDBQRYPROC = CFUNCTYPE(c_int, c_void_p, c_int, TCMAP_P, c_void_p)
TDBQRYPROC.__doc__ =\
"""Type of the pointer to a iterator function for each table record.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
cols -- specifies a map object containing columns.
op -- specifies the pointer to the optional opaque object.
The return value is flags of the post treatment by bitwise-or: `QPPUT'
to modify the record, `QPOUT' to remove the record, `QPSTOP' to stop
the iteration.
"""
tdb_errmsg = cfunc('tctdberrmsg', libtc, c_char_p,
('ecode', c_int, 1))
tdb_errmsg.__doc__ =\
"""Get the message string corresponding to an error code.
ecode -- specifies the error code.
The return value is the message string of the error code.
"""
tdb_new = cfunc('tctdbnew', libtc, c_void_p)
tdb_new.__doc__ =\
"""Create a table database object.
The return value is the new table database object.
"""
tdb_del = cfunc('tctdbdel', libtc, None,
('tdb', c_void_p, 1))
tdb_del.__doc__ =\
"""Delete a table database object.
tdb -- specifies the table database object.
If the database is not closed, it is closed implicitly. Note that the
deleted object and its derivatives can not be used anymore.
"""
tdb_ecode = cfunc('tctdbecode', libtc, c_int,
('tdb', c_void_p, 1))
tdb_ecode.__doc__ =\
"""Get the last happened error code of a table database object.
tdb -- specifies the table database object.
The return value is the last happened error code.
The following error code is defined: 'ESUCCESS' for success, 'ETHREAD'
for threading error, 'EINVALID' for invalid operation, 'ENOFILE' for
file not found, 'ENOPERM' for no permission, 'EMETA' for invalid meta
data, 'ERHEAD' for invalid record header, 'EOPEN' for open error,
'ECLOSE' for close error, 'ETRUNC' for trunc error, 'ESYNC' for sync
error, 'ESTAT' for stat error, 'ESEEK' for seek error, 'EREAD' for
read error, 'EWRITE' for write error, 'EMMAP' for mmap error, 'ELOCK'
for lock error, 'EUNLINK' for unlink error, 'ERENAME' for rename
error, 'EMKDIR' for mkdir error, 'ERMDIR' for rmdir error, 'EKEEP' for
existing record, 'ENOREC' for no record found, and 'EMISC' for
miscellaneous error.
"""
tdb_setmutex = cfunc('tctdbsetmutex', libtc, c_bool,
('tdb', c_void_p, 1))
tdb_setmutex.__doc__ =\
"""Set mutual exclusion control of a table database object for
threading.
tdb -- specifies the table database object which is not opened.
If successful, the return value is true, else, it is false.
Note that the mutual exclusion control is needed if the object is
shared by plural threads and this function should be called before the
database is opened.
"""
tdb_tune = cfunc('tctdbtune', libtc, c_bool,
('tdb', c_void_p, 1),
('bnum', c_int64, 1, 0),
('apow', c_int8, 1, -1),
('fpow', c_int8, 1, -1),
('opts', c_uint8, 1, 0))
tdb_tune.__doc__ =\
"""Set the tuning parameters of a table database object.
tdb -- specifies the table database object which is not opened.
bnum -- specifies the number of elements of the bucket array. If it
is not more than 0, the default value is specified. The
default value is 131071. Suggested size of the bucket array
is about from 0.5 to 4 times of the number of all records to
be stored.
apow -- specifies the size of record alignment by power of 2. If it
is negative, the default value is specified. The default
value is 4 standing for 2^4=16.
fpow -- specifies the maximum number of elements of the free block
pool by power of 2. If it is negative, the default value is
specified. The default value is 10 standing for 2^10=1024.
opts -- specifies options by bitwise-or: 'TLARGE' specifies that the
size of the database can be larger than 2GB by using 64-bit
bucket array, 'TDEFLATE' specifies that each record is
compressed with Deflate encoding, 'TBZIP' specifies that each
record is compressed with BZIP2 encoding, 'TTCBS' specifies
that each record is compressed with TCBS encoding.
If successful, the return value is true, else, it is false.
Note that the tuning parameters should be set before the database is
opened.
"""
tdb_setcache = cfunc('tctdbsetcache', libtc, c_bool,
('tdb', c_void_p, 1),
('rcnum', c_int32, 1, 0),
('lcnum', c_int32, 1, 0),
('ncnum', c_int32, 1, 0))
tdb_setcache.__doc__ =\
"""Set the caching parameters of a table database object.
tdb -- specifies the table database object which is not opened.
rcnum -- specifies the maximum number of records to be cached. If it
is not more than 0, the record cache is disabled. It is
disabled by default.
lcnum -- specifies the maximum number of leaf nodes to be cached. If
it is not more than 0, the default value is specified. The
default value is 4096.
ncnum -- specifies the maximum number of non-leaf nodes to be cached.
If it is not more than 0, the default value is specified.
The default value is 512.
If successful, the return value is true, else, it is false.
Note that the caching parameters should be set before the database is
opened. Leaf nodes and non-leaf nodes are used in column indices.
"""
tdb_setxmsiz = cfunc('tctdbsetxmsiz', libtc, c_bool,
('tdb', c_void_p, 1),
('xmsiz', c_int64, 1, 0))
tdb_setxmsiz.__doc__ =\
"""Set the size of the extra mapped memory of a table database object.
tdb -- specifies the table database object which is not opened.
xmsiz -- specifies the size of the extra mapped memory. If it is not
more than 0, the extra mapped memory is disabled. The
default size is 67108864.
If successful, the return value is true, else, it is false.
Note that the mapping parameters should be set before the database is opened.
"""
tdb_setdfunit = cfunc('tctdbsetdfunit', libtc, c_bool,
('tdb', c_void_p, 1),
('dfunit', c_int32, 1, 0))
tdb_setdfunit.__doc__ =\
"""Set the unit step number of auto defragmentation of a table
database object.
tdb -- specifies the table database object which is not opened.
dfunit -- specifie the unit step number. If it is not more than 0,
the auto defragmentation is disabled. It is disabled by
default.
If successful, the return value is true, else, it is false.
Note that the defragmentation parameters should be set before the
database is opened.
"""
tdb_open = cfunc('tctdbopen', libtc, c_bool,
('tdb', c_void_p, 1),
('path', c_char_p, 1),
('omode', c_int, 1))
tdb_open.__doc__ =\
"""Open a database file and connect a table database object.
tdb -- specifies the table database object which is not opened.
path -- specifies the path of the database file.
omode -- specifies the connection mode: 'OWRITER' as a writer,
'OREADER' as a reader. If the mode is 'OWRITER', the
following may be added by bitwise-or: 'OCREAT', which means
it creates a new database if not exist, 'OTRUNC', which means
it creates a new database regardless if one exists, 'OTSYNC',
which means every transaction synchronizes updated contents
with the device. Both of 'OREADER' and 'OWRITER' can be
added to by bitwise-or: 'ONOLCK', which means it opens the
database file without file locking, or 'OLCKNB', which means
locking is performed without blocking.
If successful, the return value is true, else, it is false.
"""
tdb_close = cfunc('tctdbclose', libtc, c_bool,
('tdb', c_void_p, 1))
tdb_close.__doc__ =\
"""Close a table database object.
tdb -- specifies the table database object.
If successful, the return value is true, else, it is false.
Update of a database is assured to be written when the database is
closed. If a writer opens a database but does not close it
appropriately, the database will be broken.
"""
tdb_put = cfunc('tctdbput', libtc, c_bool,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1),
('cols', TCMAP_P, 1))
tdb_put.__doc__ =\
"""Store a record into a table database object.
tdb -- specifies the table database object connected as a writer.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
cols -- specifies a map object containing columns.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is overwritten.
"""
tdb_put2 = cfunc('tctdbput2', libtc, c_bool,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1),
('cbuf', c_void_p, 1),
('csiz', c_int, 1))
tdb_put2.__doc__ =\
"""Store a string record into a table database object with a zero
separated column string.
tdb -- specifies the table database object connected as a writer.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
cbuf -- specifies the pointer to the region of the zero separated
column string where the name and the value of each column are
situated one after the other.
csiz -- specifies the size of the region of the column string.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is
overwritten.
"""
tdb_put3 = cfunc('tctdbput3', libtc, c_bool,
('tdb', c_void_p, 1),
('pkstr', c_char_p, 1),
('cstr', c_char_p, 1))
tdb_put3.__doc__ =\
"""Store a string record into a table database object with a tab
separated column string.
tdb -- specifies the table database object connected as a writer.
pkstr -- specifies the string of the primary key.
cstr -- specifies the string of the the tab separated column string
where the name and the value of each column are situated one
after the other.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, it is overwritten.
"""
tdb_putkeep = cfunc('tctdbputkeep', libtc, c_bool,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1),
('cols', TCMAP_P, 1))
tdb_putkeep.__doc__ =\
"""Store a new record into a table database object.
tdb -- specifies the table database object connected as a writer.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
cols -- specifies a map object containing columns.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
tdb_putkeep2 = cfunc('tctdbputkeep2', libtc, c_bool,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1),
('cbuf', c_void_p, 1),
('csiz', c_int, 1))
tdb_putkeep2.__doc__ =\
"""Store a new string record into a table database object with a zero
separated column string.
tdb -- specifies the table database object connected as a writer.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
cbuf -- specifies the pointer to the region of the zero separated
column string where the name and the value of each column are
situated one after the other.
csiz -- specifies the size of the region of the column string.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
tdb_putkeep3 = cfunc('tctdbputkeep3', libtc, c_bool,
('tdb', c_void_p, 1),
('pkstr', c_char_p, 1),
('cstr', c_char_p, 1))
tdb_putkeep3.__doc__ =\
"""Store a new string record into a table database object with a tab
separated column string.
tdb -- specifies the table database object connected as a writer.
pkstr -- specifies the string of the primary key.
cstr -- specifies the string of the the tab separated column string
where the name and the value of each column are situated one
after the other.
If successful, the return value is true, else, it is false.
If a record with the same key exists in the database, this function
has no effect.
"""
tdb_putcat = cfunc('tctdbputcat', libtc, c_bool,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1),
('cols', TCMAP_P, 1))
tdb_putcat.__doc__ =\
"""Concatenate columns of the existing record in a table database
object.
tdb -- specifies the table database object connected as a writer.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
cols -- specifies a map object containing columns.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
tdb_putcat2 = cfunc('tctdbputcat2', libtc, c_bool,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1),
('cbuf', c_void_p, 1),
('csiz', c_int, 1))
tdb_putcat2.__doc__ =\
"""Concatenate columns in a table database object with a zero
separated column string.
tdb -- specifies the table database object connected as a writer.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
cbuf -- specifies the pointer to the region of the zero separated
column string where the name and the value of each column are
situated one after the other.
csiz -- specifies the size of the region of the column string.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
tdb_putcat3 = cfunc('tctdbputcat3', libtc, c_bool,
('tdb', c_void_p, 1),
('pkstr', c_char_p, 1),
('cstr', c_char_p, 1))
tdb_putcat3.__doc__ =\
"""Concatenate columns in a table database object with with a tab
separated column string.
tdb -- specifies the table database object connected as a writer.
pkstr -- specifies the string of the primary key.
cstr -- specifies the string of the the tab separated column string
where the name and the value of each column are situated one
after the other.
If successful, the return value is true, else, it is false.
If there is no corresponding record, a new record is created.
"""
tdb_out = cfunc('tctdbout', libtc, c_bool,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1))
tdb_out.__doc__ =\
"""Remove a record of a table database object.
tdb -- specifies the table database object connected as a writer.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
If successful, the return value is true, else, it is false.
"""
tdb_out2 = cfunc('tctdbout2', libtc, c_bool,
('tdb', c_void_p, 1),
('pkstr', c_char_p, 1))
tdb_out2.__doc__ =\
"""Remove a string record of a table database object.
tdb -- specifies the table database object connected as a writer.
pkstr -- specifies the string of the primary key.
If successful, the return value is true, else, it is false.
"""
tdb_get = cfunc('tctdbget', libtc, TCMAP_P,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1))
tdb_get.__doc__ =\
"""Retrieve a record in a table database object.
tdb -- specifies the table database object.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz .. specifies the size of the region of the primary key.
If successful, the return value is a map object of the columns of the
corresponding record. 'NULL' is returned if no record corresponds.
Because the object of the return value is created with the function
'tcmapnew', it should be deleted with the function 'tcmapdel' when it
is no longer in use.
"""
tdb_get2 = cfunc('tctdbget2', libtc, tc_char_p,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1),
('sp', c_int_p, 2))
tdb_get2.errcheck = lambda result, func, arguments : (result, arguments[3])
tdb_get2.__doc__ =\
"""Retrieve a record in a table database object as a zero separated
column string.
tdb -- specifies the table database object.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
column string of the corresponding record. 'NULL' is returned if no
record corresponds.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use.
"""
tdb_get3 = cfunc('tctdbget3', libtc, tc_char_p,
('tdb', c_void_p, 1),
('pkstr', c_char_p, 1))
tdb_get3.__doc__ =\
"""Retrieve a string record in a table database object as a tab
separated column string.
tdb -- specifies the table database object.
pkstr -- specifies the string of the primary key.
If successful, the return value is the tab separated column string of
the corresponding record. 'NULL' is returned if no record
corresponds.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use.
"""
tdb_vsiz = cfunc('tctdbvsiz', libtc, c_int,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1))
tdb_vsiz.__doc__ =\
"""Get the size of the value of a record in a table database object.
tdb -- specifies the table database object.
kbuf -- specifies the pointer to the region of the primary key.
ksiz -- specifies the size of the region of the primary key.
If successful, the return value is the size of the value of the
corresponding record, else, it is -1.
"""
tdb_vsiz2 = cfunc('tctdbvsiz2', libtc, c_int,
('tdb', c_void_p, 1),
('pkstr', c_char_p, 1))
tdb_vsiz2.__doc__ =\
"""Get the size of the value of a string record in a table database
object.
tdb -- specifies the table database object.
kstr -- specifies the string of the primary key.
If successful, the return value is the size of the value of the
corresponding record, else, it is -1.
"""
tdb_iterinit = cfunc('tctdbiterinit', libtc, c_bool,
('tdb', c_void_p, 1))
tdb_iterinit.__doc__ =\
"""Initialize the iterator of a table database object.
tdb -- specifies the table database object.
If successful, the return value is true, else, it is false.
The iterator is used in order to access the primary key of every
record stored in a database.
"""
tdb_iternext = cfunc('tctdbiternext', libtc, tc_void_p,
('tdb', c_void_p, 1),
('sp', c_int_p, 2))
tdb_iternext.errcheck = lambda result, func, arguments : (result, arguments[1])
tdb_iternext.__doc__ =\
"""Get the next primary key of the iterator of a table database
object.
tdb -- specifies the table database object.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
next primary key, else, it is 'NULL'. 'NULL' is returned when no
record is to be get out of the iterator.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use. It is possible to access every record by iteration
of calling this function. It is allowed to update or remove records
whose keys are fetched while the iteration. However, it is not
assured if updating the database is occurred while the iteration.
Besides, the order of this traversal access method is arbitrary, so it
is not assured that the order of storing matches the one of the
traversal access.
"""
tdb_iternext2 = cfunc('tctdbiternext2', libtc, tc_char_p,
('tdb', c_void_p, 1))
tdb_iternext2.__doc__ =\
"""Get the next primary key string of the iterator of a table database
object.
tdb -- specifies the table database object.
If successful, the return value is the string of the next primary key,
else, it is 'NULL'. 'NULL' is returned when no record is to be get
out of the iterator.
Because the region of the return value is allocated with the 'malloc'
call, it should be released with the 'free' call when it is no longer
in use. It is possible to access every record by iteration of calling
this function. However, it is not assured if updating the database is
occurred while the iteration. Besides, the order of this traversal
access method is arbitrary, so it is not assured that the order of
storing matches the one of the traversal access.
"""
tdb_iternext3 = cfunc('tctdbiternext3', libtc, TCMAP_P,
('tdb', c_void_p, 1))
tdb_iternext3.__doc__ =\
"""Get the columns of the next record of the iterator of a table
database object.
tdb -- specifies the table database object.
If successful, the return value is a map object of the columns of the
next record, else, it is 'NULL'. 'NULL' is returned when no record is
to be get out of the iterator. The primary key is added into the map
as a column of an empty string key.
Because the object of the return value is created with the function
'tcmapnew', it should be deleted with the function 'tcmapdel' when it
is no longer in use. It is possible to access every record by
iteration of calling this function. However, it is not assured if
updating the database is occurred while the iteration. Besides, the
order of this traversal access method is arbitrary, so it is not
assured that the order of storing matches the one of the traversal
access.
"""
tdb_fwmkeys = cfunc('tctdbfwmkeys', libtc, TCLIST_P,
('tdb', c_void_p, 1),
('pbuf', c_void_p, 1),
('psiz', c_int, 1),
('max', c_int, 1, -1))
tdb_fwmkeys.__doc__ =\
"""Get forward matching primary keys in a table database object.
tdb -- specifies the table database object.
pbuf -- specifies the pointer to the region of the prefix.
psiz -- specifies the size of the region of the prefix.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding keys. This
function does never fail. It returns an empty list even if no key
corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use. Note that this function may be very slow
because every key in the database is scanned.
"""
tdb_fwmkeys2 = cfunc('tctdbfwmkeys2', libtc, TCLIST_P,
('tdb', c_void_p, 1),
('pstr', c_char_p, 1),
('max', c_int, 1, -1))
tdb_fwmkeys2.__doc__ =\
"""Get forward matching string primary keys in a table database
object.
tdb -- specifies the table database object.
pstr -- specifies the string of the prefix.
max -- specifies the maximum number of keys to be fetched. If it is
negative, no limit is specified.
The return value is a list object of the corresponding keys. This
function does never fail. It returns an empty list even if no key
corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use. Note that this function may be very slow
because every key in the database is scanned.
"""
tdb_addint = cfunc('tctdbaddint', libtc, c_int,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1),
('num', c_int, 1))
tdb_addint.__doc__ =\
"""Add an integer to a column of a record in a table database object.
tdb -- specifies the table database object connected as a writer.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
num -- specifies the additional value.
If successful, the return value is the summation value, else, it is
'INT_MIN'.
The additional value is stored as a decimal string value of a column
whose name is "_num". If no record corresponds, a new record with the
additional value is stored.
"""
tdb_adddouble = cfunc('tctdbadddouble', libtc, c_double,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1),
('num', c_double, 1))
tdb_adddouble.__doc__ =\
"""Add a real number to a column of a record in a table database
object.
tdb -- specifies the table database object connected as a writer.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
num -- specifies the additional value.
If successful, the return value is the summation value, else, it is
Not-a-Number.
The additional value is stored as a decimal string value of a column
whose name is "_num". If no record corresponds, a new record with the
additional value is stored.
"""
tdb_sync = cfunc('tctdbsync', libtc, c_bool,
('tdb', c_void_p, 1))
tdb_sync.__doc__ =\
"""Synchronize updated contents of a table database object with the
file and the device.
tdb -- specifies the table database object connected as a writer.
If successful, the return value is true, else, it is false.
This function is useful when another process connects to the same
database file.
"""
tdb_optimize = cfunc('tctdboptimize', libtc, c_bool,
('tdb', c_void_p, 1),
('bnum', c_int64, 1, 0),
('apow', c_int8, 1, -1),
('fpow', c_int8, 1, -1),
('opts', c_uint8, 1, 0))
tdb_optimize.__doc__ =\
"""Optimize the file of a table database object.
tdb -- specifies the table database object connected as a writer.
bnum -- specifies the number of elements of the bucket array. If it
is not more than 0, the default value is specified. The
default value is two times of the number of records.
apow -- specifies the size of record alignment by power of 2. If it
is negative, the current setting is not changed.
fpow -- specifies the maximum number of elements of the free block
pool by power of 2. If it is negative, the current setting is
not changed.
opts -- specifies options by bitwise-or: 'TLARGE' specifies that the
size of the database can be larger than 2GB by using 64-bit
bucket array, 'TDEFLATE' specifies that each record is
compressed with Deflate encoding, 'TBZIP' specifies that each
record is compressed with BZIP2 encoding, 'TTCBS' specifies
that each record is compressed with TCBS encoding. If it is
'UINT8_MAX', the current setting is not changed.
If successful, the return value is true, else, it is false.
This function is useful to reduce the size of the database file with
data fragmentation by successive updating.
"""
tdb_vanish = cfunc('tctdbvanish', libtc, c_bool,
('tdb', c_void_p, 1))
tdb_vanish.__doc__ =\
"""Remove all records of a table database object.
tdb -- specifies the table database object connected as a writer.
If successful, the return value is true, else, it is false.
"""
tdb_copy = cfunc('tctdbcopy', libtc, c_bool,
('tdb', c_void_p, 1),
('path', c_char_p, 1))
tdb_copy.__doc__ =\
"""Copy the database file of a table database object.
tdb -- specifies the table database object.
path -- specifies the path of the destination file. If it begins with
'@', the trailing substring is executed as a command line.
If successful, the return value is true, else, it is false. False is
returned if the executed command returns non-zero code.
The database file is assured to be kept synchronized and not modified
while the copying or executing operation is in progress. So, this
function is useful to create a backup file of the database file.
"""
tdb_tranbegin = cfunc('tctdbtranbegin', libtc, c_bool,
('tdb', c_void_p, 1))
tdb_tranbegin.__doc__ =\
"""Begin the transaction of a table database object.
tdb -- specifies the table database object connected as a writer.
If successful, the return value is true, else, it is false.
The database is locked by the thread while the transaction so that
only one transaction can be activated with a database object at the
same time. Thus, the serializable isolation level is assumed if every
database operation is performed in the transaction. Because all pages
are cached on memory while the transaction, the amount of referred
records is limited by the memory capacity. If the database is closed
during transaction, the transaction is aborted implicitly.
"""
tdb_trancommit = cfunc('tctdbtrancommit', libtc, c_bool,
('tdb', c_void_p, 1))
tdb_trancommit.__doc__ =\
"""Commit the transaction of a table database object.
tdb -- specifies the table database object connected as a writer.
If successful, the return value is true, else, it is false.
Update in the transaction is fixed when it is committed successfully.
"""
tdb_tranabort = cfunc('tctdbtranabort', libtc, c_bool,
('tdb', c_void_p, 1))
tdb_tranabort.__doc__ =\
"""Abort the transaction of a table database object.
tdb -- specifies the table database object connected as a writer.
If successful, the return value is true, else, it is false.
Update in the transaction is discarded when it is aborted. The state
of the database is rollbacked to before transaction.
"""
tdb_path = cfunc('tctdbpath', libtc, c_char_p,
('tdb', c_void_p, 1))
tdb_path.__doc__ =\
"""Get the file path of a table database object.
tdb -- specifies the table database object.
The return value is the path of the database file or 'NULL' if the
object does not connect to any database file.
"""
tdb_rnum = cfunc('tctdbrnum', libtc, c_uint64,
('tdb', c_void_p, 1))
tdb_rnum.__doc__ =\
"""Get the number of records ccccof a table database object.
tdb -- specifies the table database object.
The return value is the number of records or 0 if the object does not
connect to any database file.
"""
tdb_fsiz = cfunc('tctdbfsiz', libtc, c_uint64,
('tdb', c_void_p, 1))
tdb_fsiz.__doc__ =\
"""Get the size of the database file of a table database object.
tdb -- specifies the table database object.
The return value is the size of the database file or 0 if the object
does not connect to any database file.
"""
tdb_setindex = cfunc('tctdbsetindex', libtc, c_bool,
('tdb', c_void_p, 1),
('name', c_char_p, 1),
('type', c_int, 1))
tdb_setindex.__doc__ =\
"""Set a column index to a table database object.
tdb -- specifies the table database object connected as a writer.
name -- specifies the name of a column. If the name of an existing
index is specified, the index is rebuilt. An empty string
means the primary key.
type -- specifies the index type: 'ITLEXICAL' for lexical string,
'ITDECIMAL' for decimal string, 'ITTOKEN' for token inverted
index, 'ITQGRAM' for q-gram inverted index. If it is 'ITOPT',
the index is optimized. If it is 'ITVOID', the index is
removed. If 'ITKEEP' is added by bitwise-or and the index
exists, this function merely returns failure.
If successful, the return value is true, else, it is false.
Note that the setting indices should be set after the database is
opened.
"""
tdb_genuid = cfunc('tctdbgenuid', libtc, c_int64,
('tdb', c_void_p, 1))
tdb_genuid.__doc__ =\
"""Generate a unique ID number of a table database object.
tdb -- specifies the table database object connected as a writer.
The return value is the new unique ID number or -1 on failure.
"""
tdb_qrynew = cfunc('tctdbqrynew', libtc, c_void_p,
('tdb', c_void_p, 1))
tdb_qrynew.__doc__ =\
"""Create a query object.
tdb -- specifies the table database object.
The return value is the new query object.
"""
tdb_qrydel = cfunc('tctdbqrydel', libtc, None,
('qry', c_void_p, 1))
tdb_qrydel.__doc__ =\
"""Delete a query object.
qry -- specifies the query object.
"""
tdb_qryaddcond = cfunc('tctdbqryaddcond', libtc, None,
('qry', c_void_p, 1),
('name', c_char_p, 1),
('op', c_int, 1),
('expr', c_char_p, 1))
tdb_qryaddcond.__doc__ =\
"""Add a narrowing condition to a query object.
qry -- specifies the query object.
name -- specifies the name of a column. An empty string means the
primary key.
op -- specifies an operation type: 'QCSTREQ' for string which is
equal to the expression, 'QCSTRINC' for string which is
included in the expression, 'QCSTRBW' for string which begins
with the expression, 'QCSTREW' for string which ends with the
expression, 'QCSTRAND' for string which includes all tokens in
the expression, 'QCSTROR' for string which includes at least
one token in the expression, 'QCSTROREQ' for string which is
equal to at least one token in the expression, 'QCSTRRX' for
string which matches regular expressions of the expression,
'QCNUMEQ' for number which is equal to the expression,
'QCNUMGT' for number which is greater than the expression,
'QCNUMGE' for number which is greater than or equal to the
expression, 'QCNUMLT' for number which is less than the
expression, 'QCNUMLE' for number which is less than or equal
to the expression, 'QCNUMBT' for number which is between two
tokens of the expression, 'QCNUMOREQ' for number which is
equal to at least one token in the expression, 'QCFTSPH' for
full-text search with the phrase of the expression, 'QCFTSAND'
for full-text search with all tokens in the expression,
'QCFTSOR' for full-text search with at least one token in the
expression, 'QCFTSEX' for full-text search with the compound
expression. All operations can be flagged by bitwise-or:
'QCNEGATE' for negation, 'QCNOIDX' for using no index.
expr -- specifies an operand exression.
"""
tdb_qrysetorder = cfunc('tctdbqrysetorder', libtc, None,
('qry', c_void_p, 1),
('name', c_char_p, 1),
('type', c_int, 1))
tdb_qrysetorder.__doc__ =\
"""Set the order of a query object.
qry -- specifies the query object.
name -- specifies the name of a column. An empty string means the
primary key.
type -- specifies the order type: 'QOSTRASC' for string ascending,
'QOSTRDESC' for string descending, 'QONUMASC' for number
ascending, 'QONUMDESC' for number descending.
"""
tdb_qrysetlimit = cfunc('tctdbqrysetlimit', libtc, None,
('qry', c_void_p, 1),
('max', c_int, 1, -1),
('skip', c_int, 1, 0))
tdb_qrysetlimit.__doc__ =\
"""Set the limit number of records of the result of a query object.
qry -- specifies the query object.
max -- specifies the maximum number of records of the result. If it
is negative, no limit is specified.
skip -- specifies the number of skipped records of the result. If it
is not more than 0, no record is skipped.
"""
tdb_qrysearch = cfunc('tctdbqrysearch', libtc, TCLIST_P,
('qry', c_void_p, 1))
tdb_qrysearch.__doc__ =\
"""Execute the search of a query object.
qry -- specifies the query object.
The return value is a list object of the primary keys of the
corresponding records. This function does never fail. It returns an
empty list even if no record corresponds.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use.
"""
tdb_qrysearchout = cfunc('tctdbqrysearchout', libtc, c_bool,
('qry', c_void_p, 1))
tdb_qrysearchout.__doc__ =\
"""Remove each record corresponding to a query object.
qry -- specifies the query object of the database connected as a
writer.
If successful, the return value is true, else, it is false.
"""
tdb_qryproc = cfunc('tctdbqryproc', libtc, c_bool,
('qry', c_void_p, 1),
('proc', TDBQRYPROC, 1),
('op', c_void_p, 1))
tdb_qryproc.__doc__ =\
"""Process each record corresponding to a query object.
qry -- specifies the query object of the database connected as a
writer.
proc -- specifies the pointer to the iterator function called for each
record. It receives four parameters. The first parameter is
the pointer to the region of the primary key. The second
parameter is the size of the region of the primary key. The
third parameter is a map object containing columns. The
fourth parameter is the pointer to the optional opaque object.
It returns flags of the post treatment by bitwise-or: 'QPPUT'
to modify the record, 'QPOUT' to remove the record, 'QPSTOP'
to stop the iteration.
op -- specifies an arbitrary pointer to be given as a parameter of
the iterator function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
"""
tdb_qryhint = cfunc('tctdbqryhint', libtc, c_char_p,
('qry', c_void_p, 1))
tdb_qryhint.__doc__ =\
"""Get the hint string of a query object.
qry -- specifies the query object.
The return value is the hint string.
This function should be called after the query execution by
'tctdbqrysearch' and so on. The region of the return value is
overwritten when this function is called again.
"""
tdb_metasearch = cfunc('tctdbmetasearch', libtc, TCLIST_P,
('qrys', ListPOINTER(c_void_p), 1),
('num', c_int, 1),
('type', c_int, 1))
tdb_metasearch.__doc__ =\
"""Retrieve records with multiple query objects and get the set of the
result.
qrys -- specifies an array of the query objects.
num -- specifies the number of elements of the array.
type -- specifies a set operation type: 'MSUNION' for the union set,
'MSISECT' for the intersection set, 'MSDIFF' for the
difference set.
The return value is a list object of the primary keys of the
corresponding records. This function does never fail. It returns an
empty list even if no record corresponds.
If the first query object has the order setting, the result array is
sorted by the order. Because the object of the return value is
created with the function 'tclistnew', it should be deleted with the
function 'tclistdel' when it is no longer in use.
"""
# features for experts
tdb_setecode = cfunc('tctdbsetecode', libtc, None,
('tdb', c_void_p, 1),
('ecode', c_int, 1),
('filename', c_char_p, 1),
('line', c_int, 1),
('func', c_char_p, 1))
tdb_setecode.__doc__ =\
"""Set the error code of a table database object.
tdb -- specifies the table database object.
ecode -- specifies the error code.
file -- specifies the file name of the code.
line -- specifies the line number of the code.
func -- specifies the function name of the code.
"""
tdb_setdbgfd = cfunc('tctdbsetdbgfd', libtc, None,
('tdb', c_void_p, 1),
('fd', c_int, 1))
tdb_setdbgfd.__doc__ =\
"""Set the file descriptor for debugging output.
tdb -- specifies the table database object.
fd -- specifies the file descriptor for debugging output.
"""
tdb_dbgfd = cfunc('tctdbdbgfd', libtc, c_int,
('tdb', c_void_p, 1))
tdb_dbgfd.__doc__ =\
"""Get the file descriptor for debugging output.
tdb -- specifies the table database object.
The return value is the file descriptor for debugging output.
"""
tdb_hasmutex = cfunc('tctdbhasmutex', libtc, c_bool,
('tdb', c_void_p, 1))
tdb_hasmutex.__doc__ =\
"""Check whether mutual exclusion control is set to a table database
object.
tdb -- specifies the table database object.
If mutual exclusion control is set, it is true, else it is false.
"""
tdb_memsync = cfunc('tctdbmemsync', libtc, c_bool,
('tdb', c_void_p, 1),
('phys', c_bool, 1))
tdb_memsync.__doc__ =\
"""Synchronize updating contents on memory of a table database object.
tdb -- specifies the table database object connected as a writer.
phys -- specifies whether to synchronize physically.
If successful, the return value is true, else, it is false.
"""
tdb_bnum = cfunc('tctdbbnum', libtc, c_uint64,
('tdb', c_void_p, 1))
tdb_bnum.__doc__ =\
"""Get the number of elements of the bucket array of a table database
object.
tdb -- specifies the table database object.
The return value is the number of elements of the bucket array or 0 if
the object does not connect to any database file.
"""
tdb_align = cfunc('tctdbalign', libtc, c_uint32,
('tdb', c_void_p, 1))
tdb_align.__doc__ =\
"""Get the record alignment of a table database object.
tdb -- specifies the table database object.
The return value is the record alignment or 0 if the object does not
connect to any database file.
"""
tdb_fbpmax = cfunc('tctdbfbpmax', libtc, c_uint32,
('tdb', c_void_p, 1))
tdb_fbpmax.__doc__ =\
"""Get the maximum number of the free block pool of a table database
object.
tdb -- specifies the table database object.
The return value is the maximum number of the free block pool or 0 if
the object does not connect to any database file.
"""
tdb_inode = cfunc('tctdbinode', libtc, c_uint64,
('tdb', c_void_p, 1))
tdb_inode.__doc__ =\
"""Get the inode number of the database file of a table database
object.
tdb -- specifies the table database object.
The return value is the inode number of the database file or 0 if the
object does not connect to any database file.
"""
tdb_mtime = cfunc('tctdbmtime', libtc, c_time,
('tdb', c_void_p, 1))
tdb_mtime.__doc__ =\
"""Get the modification time of the database file of a table database
object.
tdb -- specifies the table database object.
The return value is the inode number of the database file or 0 if the
object does not connect to any database file.
"""
tdb_flags = cfunc('tctdbflags', libtc, c_uint8,
('tdb', c_void_p, 1))
tdb_flags.__doc__ =\
"""Get the additional flags of a table database object.
tdb -- specifies the table database object.
The return value is the additional flags.
"""
tdb_opts = cfunc('tctdbopts', libtc, c_uint8,
('tdb', c_void_p, 1))
tdb_opts.__doc__ =\
"""Get the options of a table database object.
tdb -- specifies the table database object.
The return value is the options.
"""
tdb_opaque = cfunc('tctdbopaque', libtc, c_char_p,
('tdb', c_void_p, 1))
tdb_opaque.__doc__ =\
"""Get the pointer to the opaque field of a table database object.
tdb -- specifies the table database object.
The return value is the pointer to the opaque field whose size is 128
bytes.
"""
tdb_bnumused = cfunc('tctdbbnumused', libtc, c_uint64,
('tdb', c_void_p, 1))
tdb_bnumused.__doc__ =\
"""Get the number of used elements of the bucket array of a table
database object.
tdb -- specifies the table database object.
The return value is the number of used elements of the bucket array or
0 if the object does not connect to any database file.
"""
tdb_inum = cfunc('tctdbinum', libtc, c_int,
('tdb', c_void_p, 1))
tdb_inum.__doc__ =\
"""Get the number of column indices of a table database object.
tdb -- specifies the table database object.
The return value is the number of column indices or 0 if the object
does not connect to any database file.
"""
tdb_uidseed = cfunc('tctdbuidseed', libtc, c_int64,
('tdb', c_void_p, 1))
tdb_uidseed.__doc__ =\
"""Get the seed of unique ID unumbers of a table database object.
tdb -- specifies the table database object.
The return value is the seed of unique ID numbers or -1 on failure.
"""
tdb_setuidseed = cfunc('tctdbsetuidseed', libtc, c_bool,
('tdb', c_void_p, 1),
('seed', c_int64, 1))
tdb_setuidseed.__doc__ =\
"""Set the seed of unique ID unumbers of a table database object.
tdb -- specifies the table database object connected as a writer.
If successful, the return value is true, else, it is false.
"""
tdb_setinvcache = cfunc('tctdbsetinvcache', libtc, c_bool,
('tdb', c_void_p, 1),
('iccmax', c_int64, 1, 0),
('iccsync', c_double, 1, 0))
tdb_setinvcache.__doc__ =\
"""Set the parameters of the inverted cache of a table database object.
tdb -- specifies the table database object.
iccmax -- specifies the maximum size. If it is not more than 0, the
default value is specified. The default value is 67108864.
iccsync -- specifies synchronization ratio. If it is not more than 0,
the default value is specified. The default value is 0.01.
If successful, the return value is true, else, it is false.
Note that the caching parameters should be set before the database is
opened.
"""
tdb_setcodecfunc = cfunc('tctdbsetcodecfunc', libtc, c_bool,
('tdb', c_void_p, 1),
('enc', TCCODEC, 1),
('encop', c_void_p, 1),
('dec', TCCODEC, 1),
('decop', c_void_p, 1))
tdb_setcodecfunc.__doc__ =\
"""Set the custom codec functions of a table database object.
tdb -- specifies the table database object.
enc -- specifies the pointer to the custom encoding function. It
receives four parameters. The first parameter is the pointer
to the region. The second parameter is the size of the
region. The third parameter is the pointer to the variable
into which the size of the region of the return value is
assigned. The fourth parameter is the pointer to the
optional opaque object. It returns the pointer to the result
object allocated with 'malloc' call if successful, else, it
returns 'NULL'.
encop -- specifies an arbitrary pointer to be given as a parameter of
the encoding function. If it is not needed, 'NULL' can be
specified.
dec -- specifies the pointer to the custom decoding function.
decop -- specifies an arbitrary pointer to be given as a parameter of
the decoding function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
Note that the custom codec functions should be set before the database
is opened and should be set every time the database is being opened.
"""
tdb_dfunit = cfunc('tctdbdfunit', libtc, c_uint32,
('tdb', c_void_p, 1))
tdb_dfunit.__doc__ =\
"""Get the unit step number of auto defragmentation of a table
database object.
tdb -- specifies the table database object.
The return value is the unit step number of auto defragmentation.
"""
tdb_defrag = cfunc('tctdbdefrag', libtc, c_bool,
('tdb', c_void_p, 1),
('step', c_int64, 1, 0))
tdb_defrag.__doc__ =\
"""Perform dynamic defragmentation of a table database object.
tdb -- specifies the table database object connected as a writer.
step -- specifie the number of steps. If it is not more than 0, the
whole file is defragmented gradually without keeping a
continuous lock.
If successful, the return value is true, else, it is false.
"""
# tdb_cacheclear = cfunc('tctdbcacheclear', libtc, c_bool,
# ('tdb', c_void_p, 1))
# tdb_cacheclear.__doc__ =\
# """Clear the cache of a table tree database object.
# tdb -- specifies the table tree database object.
# If successful, the return value is true, else, it is false.
# """
tdb_putproc = cfunc('tctdbputproc', libtc, c_bool,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1),
('cbuf', c_void_p, 1),
('csiz', c_int, 1),
('proc', TCPDPROC, 1),
('op', c_void_p, 1))
tdb_putproc.__doc__ =\
"""Store a record into a table database object with a duplication
handler.
tdb -- specifies the table database object connected as a writer.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
cbuf -- specifies the pointer to the region of the zero separated
column string where the name and the value of each column are
situated one after the other. 'NULL' means that record
addition is ommited if there is no corresponding record.
csiz -- specifies the size of the region of the column string.
proc -- specifies the pointer to the callback function to process
duplication. It receives four parameters. The first
parameter is the pointer to the region of the value. The
second parameter is the size of the region of the value. The
third parameter is the pointer to the variable into which the
size of the region of the return value is assigned. The
fourth parameter is the pointer to the optional opaque
object. It returns the pointer to the result object
allocated with 'malloc'. It is released by the caller. If
it is 'NULL', the record is not modified. If it is '(void
*)-1', the record is removed.
op -- specifies an arbitrary pointer to be given as a parameter of
the callback function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
Note that the callback function can not perform any database operation
because the function is called in the critical section guarded by the
same locks of database operations.
"""
tdb_get4 = cfunc('tctdbget4', libtc, tc_char_p,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1),
('nbuf', c_void_p, 1),
('nsiz', c_int, 1),
('sp', c_int_p, 2))
tdb_get4.errcheck = lambda result, func, arguments : (result, arguments[5])
tdb_get4.__doc__ =\
"""Retrieve the value of a column of a record in a table database
object.
tdb -- specifies the table database object.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
nbuf -- specifies the pointer to the region of the column name.
nsiz -- specifies the size of the region of the column name.
sp -- specifies the pointer to the variable into which the size of
the region of the return value is assigned.
If successful, the return value is the pointer to the region of the
value of the column of the corresponding record. 'NULL' is returned
if no record corresponds or there is no column.
Because an additional zero code is appended at the end of the region
of the return value, the return value can be treated as a character
string. Because the region of the return value is allocated with the
'malloc' call, it should be released with the 'free' call when it is
no longer in use.
"""
tdb_iterinit2 = cfunc('tctdbiterinit2', libtc, c_bool,
('tdb', c_void_p, 1),
('pkbuf', c_void_p, 1),
('pksiz', c_int, 1))
tdb_iterinit2.__doc__ =\
"""Move the iterator to the record corresponding a key of a table
database object.
tdb -- specifies the table database object.
pkbuf -- specifies the pointer to the region of the primary key.
pksiz -- specifies the size of the region of the primary key.
If successful, the return value is true, else, it is false. False is
returned if there is no record corresponding the condition.
"""
tdb_iterinit3 = cfunc('tctdbiterinit3', libtc, c_bool,
('tdb', c_void_p, 1),
('pkstr', c_char_p, 1))
tdb_iterinit3.__doc__ =\
"""Move the iterator to the record corresponding a key string of a
table database object.
tdb -- specifies the table database object.
kstr -- specifies the string of the primary key.
If successful, the return value is true, else, it is false. False is
returned if there is no record corresponding the condition.
"""
tdb_foreach = cfunc('tctdbforeach', libtc, c_bool,
('tdb', c_void_p, 1),
('iter', TCITER, 1),
('op', c_void_p, 1))
tdb_foreach.__doc__ =\
"""Process each record atomically of a table database object.
tdb -- specifies the table database object.
iter -- specifies the pointer to the iterator function called for each
record. It receives five parameters. The first parameter is
the pointer to the region of the key. The second parameter is
the size of the region of the key. The third parameter is the
pointer to the region of the value. The fourth parameter is
the size of the region of the value. The fifth parameter is
the pointer to the optional opaque object. It returns true to
continue iteration or false to stop iteration.
op -- specifies an arbitrary pointer to be given as a parameter of
the iterator function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
Note that the callback function can not perform any database operation
because the function is called in the critical section guarded by the
same locks of database operations.
"""
tdb_qryproc2 = cfunc('tctdbqryproc2', libtc, c_bool,
('qry', c_void_p, 1),
('proc', TDBQRYPROC, 1),
('op', c_void_p, 1))
tdb_qryproc2.__doc__ =\
"""Process each record corresponding to a query object with non-atomic
fashion.
qry -- specifies the query object of the database connected as a
writer.
proc -- specifies the pointer to the iterator function called for each
record. It receives four parameters. The first parameter is
the pointer to the region of the primary key. The second
parameter is the size of the region of the primary key. The
third parameter is a map object containing columns. The
fourth parameter is the pointer to the optional opaque object.
It returns flags of the post treatment by bitwise-or: 'QPPUT'
to modify the record, 'QPOUT' to remove the record, 'QPSTOP'
to stop the iteration.
op -- specifies an arbitrary pointer to be given as a parameter of
the iterator function. If it is not needed, 'NULL' can be
specified.
If successful, the return value is true, else, it is false.
"""
tdb_qrysearchout2 = cfunc('tctdbqrysearchout2', libtc, c_bool,
('qry', c_void_p, 1))
tdb_qrysearchout2.__doc__ =\
"""Remove each record corresponding to a query object with non-atomic
fashion.
qry -- specifies the query object of the database connected as a writer.
If successful, the return value is true, else, it is false.
"""
tdb_strtoindextype = cfunc('tctdbstrtoindextype', libtc, c_int,
('str', c_char_p, 1))
tdb_strtoindextype.__doc__ =\
"""Convert a string into the index type number.
str -- specifies a string.
The return value is the index type number or -1 on failure.
"""
tdb_strtometasearchtype = cfunc('tctdbstrtometasearcytype', libtc, c_int,
('str', c_char_p, 1))
tdb_strtometasearchtype.__doc__ =\
"""Convert a string into the meta search type number.
str -- specifies a string.
The return value is the meta search type number or -1 on failure.
"""
tdb_qrycount = cfunc('tctdbqrycount', libtc, c_int,
('qry', c_void_p, 1))
tdb_qrycount.__doc__ =\
"""Get the count of corresponding records of a query object.
qry -- specifies the query object.
The return value is the count of corresponding records.
"""
tdb_qrykwic = cfunc('tctdbqrykwic', libtc, TCLIST_P,
('qry', c_void_p, 1),
('cols', TCMAP_P, 1),
('name', c_char_p, 1),
('width', c_int, 1),
('opts', c_int, 1))
tdb_qrykwic.__doc__ =\
"""Generate keyword-in-context strings from a query object.
qry -- specifies the query object.
cols -- specifies a map object containing columns.
name -- specifies the name of a column. If it is 'NULL', the first
column of the query is specified.
width -- specifies the width of strings picked up around each keyword.
opts -- specifies options by bitwise-or: 'KWMUTAB' specifies that
each keyword is marked up between two tab characters,
'KWMUCTRL' specifies that each keyword is marked up by the
STX (0x02) code and the ETX (0x03) code, 'KWMUBRCT' specifies
that each keyword is marked up by the two square brackets,
'KWNOOVER' specifies that each context does not overlap,
'KWPULEAD' specifies that the lead string is picked up
forcibly.
The return value is the list object whose elements are strings around
keywords.
Because the object of the return value is created with the function
'tclistnew', it should be deleted with the function 'tclistdel' when
it is no longer in use.
"""
tdb_qrystrtocondop = cfunc('tctdbqrystrtocondop', libtc, c_int,
('str', c_char_p, 1))
tdb_qrystrtocondop.__doc__ =\
"""Convert a string into the query operation number.
str -- specifies a string.
The return value is the query operation number or -1 on failure.
"""
tdb_qrystrtoordertype = cfunc('tctdbqrystrtoordertype', libtc, c_int,
('str', c_char_p, 1))
tdb_qrystrtoordertype.__doc__ =\
"""Convert a string into the query order type number.
str -- specifies a string.
The return value is the query order type or -1 on failure.
"""
tdb_metastrtosettype = cfunc('tctdbmetastrtosettype', libtc, c_int,
('str', c_char_p, 1))
tdb_metastrtosettype.__doc__ =\
"""Convert a string into the set operation type number.
str -- specifies a string.
The return value is the set operation type or -1 on failure.
"""
|
aplanas/py-tcdb
|
tcdb/tc.py
|
Python
|
lgpl-3.0
| 308,185
|
[
"BLAST"
] |
685e047142d93420c27c11fcc222ca2789171b7b5dbcf24e8ba0ef4f3e81b19e
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various networks for Jax Dopamine agents."""
import time
from typing import Tuple, Union
from dopamine.discrete_domains import atari_lib
from flax import linen as nn
import gin
import jax
import jax.numpy as jnp
import numpy as onp
gin.constant('jax_networks.CARTPOLE_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.CARTPOLE_MIN_VALS',
(-2.4, -5., -onp.pi/12., -onp.pi*2.))
gin.constant('jax_networks.CARTPOLE_MAX_VALS',
(2.4, 5., onp.pi/12., onp.pi*2.))
gin.constant('jax_networks.ACROBOT_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.ACROBOT_MIN_VALS',
(-1., -1., -1., -1., -5., -5.))
gin.constant('jax_networks.ACROBOT_MAX_VALS',
(1., 1., 1., 1., 5., 5.))
gin.constant('jax_networks.LUNAR_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.MOUNTAINCAR_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.MOUNTAINCAR_MIN_VALS', (-1.2, -0.07))
gin.constant('jax_networks.MOUNTAINCAR_MAX_VALS', (0.6, 0.07))
def preprocess_atari_inputs(x):
"""Input normalization for Atari 2600 input frames."""
return x.astype(jnp.float32) / 255.
identity_preprocess_fn = lambda x: x
### DQN Networks ###
@gin.configurable
class NatureDQNNetwork(nn.Module):
"""The convolutional network used to compute the agent's Q-values."""
num_actions: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x):
initializer = nn.initializers.xavier_uniform()
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
q_values = nn.Dense(features=self.num_actions,
kernel_init=initializer)(x)
return atari_lib.DQNNetworkType(q_values)
@gin.configurable
class ClassicControlDQNNetwork(nn.Module):
"""Jax DQN network for classic control environments."""
num_actions: int
num_layers: int = 2
hidden_units: int = 512
min_vals: Union[None, Tuple[float, ...]] = None
max_vals: Union[None, Tuple[float, ...]] = None
inputs_preprocessed: bool = False
def setup(self):
if self.min_vals is not None:
assert self.max_vals is not None
self._min_vals = jnp.array(self.min_vals)
self._max_vals = jnp.array(self.max_vals)
initializer = nn.initializers.xavier_uniform()
self.layers = [
nn.Dense(features=self.hidden_units, kernel_init=initializer)
for _ in range(self.num_layers)]
self.final_layer = nn.Dense(features=self.num_actions,
kernel_init=initializer)
def __call__(self, x):
if not self.inputs_preprocessed:
x = x.astype(jnp.float32)
x = x.reshape((-1)) # flatten
if self.min_vals is not None:
x -= self._min_vals
x /= self._max_vals - self._min_vals
x = 2.0 * x - 1.0 # Rescale in range [-1, 1].
for layer in self.layers:
x = layer(x)
x = nn.relu(x)
q_values = self.final_layer(x)
return atari_lib.DQNNetworkType(q_values)
### Rainbow Networks ###
@gin.configurable
class RainbowNetwork(nn.Module):
"""Convolutional network used to compute the agent's return distributions."""
num_actions: int
num_atoms: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x, support):
initializer = nn.initializers.variance_scaling(
scale=1.0 / jnp.sqrt(3.0),
mode='fan_in',
distribution='uniform')
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Dense(features=self.num_actions * self.num_atoms,
kernel_init=initializer)(x)
logits = x.reshape((self.num_actions, self.num_atoms))
probabilities = nn.softmax(logits)
q_values = jnp.sum(support * probabilities, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
@gin.configurable
class ClassicControlRainbowNetwork(nn.Module):
"""Jax Rainbow network for classic control environments."""
num_actions: int
num_atoms: int
num_layers: int = 2
hidden_units: int = 512
min_vals: Union[None, Tuple[float, ...]] = None
max_vals: Union[None, Tuple[float, ...]] = None
inputs_preprocessed: bool = False
def setup(self):
if self.min_vals is not None:
self._min_vals = jnp.array(self.min_vals)
self._max_vals = jnp.array(self.max_vals)
initializer = nn.initializers.xavier_uniform()
self.layers = [
nn.Dense(features=self.hidden_units, kernel_init=initializer)
for _ in range(self.num_layers)]
self.final_layer = nn.Dense(features=self.num_actions * self.num_atoms,
kernel_init=initializer)
def __call__(self, x, support):
if not self.inputs_preprocessed:
x = x.astype(jnp.float32)
x = x.reshape((-1)) # flatten
if self.min_vals is not None:
x -= self._min_vals
x /= self._max_vals - self._min_vals
x = 2.0 * x - 1.0 # Rescale in range [-1, 1].
for layer in self.layers:
x = layer(x)
x = nn.relu(x)
x = self.final_layer(x)
logits = x.reshape((self.num_actions, self.num_atoms))
probabilities = nn.softmax(logits)
q_values = jnp.sum(support * probabilities, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
### Implicit Quantile Networks ###
class ImplicitQuantileNetwork(nn.Module):
"""The Implicit Quantile Network (Dabney et al., 2018).."""
num_actions: int
quantile_embedding_dim: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x, num_quantiles, rng):
initializer = nn.initializers.variance_scaling(
scale=1.0 / jnp.sqrt(3.0),
mode='fan_in',
distribution='uniform')
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
state_vector_length = x.shape[-1]
state_net_tiled = jnp.tile(x, [num_quantiles, 1])
quantiles_shape = [num_quantiles, 1]
quantiles = jax.random.uniform(rng, shape=quantiles_shape)
quantile_net = jnp.tile(quantiles, [1, self.quantile_embedding_dim])
quantile_net = (
jnp.arange(1, self.quantile_embedding_dim + 1, 1).astype(jnp.float32)
* onp.pi
* quantile_net)
quantile_net = jnp.cos(quantile_net)
quantile_net = nn.Dense(features=state_vector_length,
kernel_init=initializer)(quantile_net)
quantile_net = nn.relu(quantile_net)
x = state_net_tiled * quantile_net
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
quantile_values = nn.Dense(features=self.num_actions,
kernel_init=initializer)(x)
return atari_lib.ImplicitQuantileNetworkType(quantile_values, quantiles)
### Quantile Networks ###
@gin.configurable
class QuantileNetwork(nn.Module):
"""Convolutional network used to compute the agent's return quantiles."""
num_actions: int
num_atoms: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x):
initializer = nn.initializers.variance_scaling(
scale=1.0 / jnp.sqrt(3.0),
mode='fan_in',
distribution='uniform')
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Dense(features=self.num_actions * self.num_atoms,
kernel_init=initializer)(x)
logits = x.reshape((self.num_actions, self.num_atoms))
probabilities = nn.softmax(logits)
q_values = jnp.mean(logits, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
### Noisy Nets for FullRainbowNetwork ###
@gin.configurable
class NoisyNetwork(nn.Module):
"""Noisy Network from Fortunato et al. (2018).
Attributes:
rng_key: jax.interpreters.xla.DeviceArray, key for JAX RNG.
eval_mode: bool, whether to turn off noise during evaluation.
"""
rng_key: jax.interpreters.xla.DeviceArray
eval_mode: bool = False
@staticmethod
def sample_noise(key, shape):
return jax.random.normal(key, shape)
@staticmethod
def f(x):
# See (10) and (11) in Fortunato et al. (2018).
return jnp.multiply(jnp.sign(x), jnp.power(jnp.abs(x), 0.5))
@nn.compact
def __call__(self, x, features, bias=True, kernel_init=None):
def mu_init(key, shape):
# Initialization of mean noise parameters (Section 3.2)
low = -1 / jnp.power(x.shape[0], 0.5)
high = 1 / jnp.power(x.shape[0], 0.5)
return jax.random.uniform(key, minval=low, maxval=high, shape=shape)
def sigma_init(key, shape, dtype=jnp.float32): # pylint: disable=unused-argument
# Initialization of sigma noise parameters (Section 3.2)
return jnp.ones(shape, dtype) * (0.1 / onp.sqrt(x.shape[0]))
if self.eval_mode:
# Turn off noise during evaluation
w_epsilon = onp.zeros(shape=(x.shape[0], features), dtype=onp.float32)
b_epsilon = onp.zeros(shape=(features,), dtype=onp.float32)
else:
# Factored gaussian noise in (10) and (11) in Fortunato et al. (2018).
p = NoisyNetwork.sample_noise(self.rng_key, [x.shape[0], 1])
q = NoisyNetwork.sample_noise(self.rng_key, [1, features])
f_p = NoisyNetwork.f(p)
f_q = NoisyNetwork.f(q)
w_epsilon = f_p * f_q
b_epsilon = jnp.squeeze(f_q)
# See (8) and (9) in Fortunato et al. (2018) for output computation.
w_mu = self.param('kernel_mu', mu_init, (x.shape[0], features))
w_sigma = self.param('kernel_sigma', sigma_init, (x.shape[0], features))
w = w_mu + jnp.multiply(w_sigma, w_epsilon)
ret = jnp.matmul(x, w)
b_mu = self.param('bias_mu', mu_init, (features,))
b_sigma = self.param('bias_sigma', sigma_init, (features,))
b = b_mu + jnp.multiply(b_sigma, b_epsilon)
return jnp.where(bias, ret + b, ret)
### FullRainbowNetwork ###
def feature_layer(key, noisy, eval_mode=False):
"""Network feature layer depending on whether noisy_nets are used on or not."""
def noisy_net(x, features):
return NoisyNetwork(rng_key=key, eval_mode=eval_mode)(x, features)
def dense_net(x, features):
return nn.Dense(features, kernel_init=nn.initializers.xavier_uniform())(x)
return noisy_net if noisy else dense_net
@gin.configurable
class FullRainbowNetwork(nn.Module):
"""Jax Rainbow network for Full Rainbow.
Attributes:
num_actions: int, number of actions the agent can take at any state.
num_atoms: int, the number of buckets of the value function distribution.
noisy: bool, Whether to use noisy networks.
dueling: bool, Whether to use dueling network architecture.
distributional: bool, whether to use distributional RL.
"""
num_actions: int
num_atoms: int
noisy: bool = True
dueling: bool = True
distributional: bool = True
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x, support, eval_mode=False, key=None):
# Generate a random number generation key if not provided
if key is None:
key = jax.random.PRNGKey(int(time.time() * 1e6))
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
hidden_sizes = [32, 64, 64]
kernel_sizes = [8, 4, 3]
stride_sizes = [4, 2, 1]
for hidden_size, kernel_size, stride_size in zip(hidden_sizes, kernel_sizes,
stride_sizes):
x = nn.Conv(
features=hidden_size,
kernel_size=(kernel_size, kernel_size),
strides=(stride_size, stride_size),
kernel_init=nn.initializers.xavier_uniform())(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
net = feature_layer(key, self.noisy, eval_mode=eval_mode)
x = net(x, features=512) # Single hidden layer of size 512
x = nn.relu(x)
if self.dueling:
adv = net(x, features=self.num_actions * self.num_atoms)
value = net(x, features=self.num_atoms)
adv = adv.reshape((self.num_actions, self.num_atoms))
value = value.reshape((1, self.num_atoms))
logits = value + (adv - (jnp.mean(adv, axis=0, keepdims=True)))
else:
x = net(x, features=self.num_actions * self.num_atoms)
logits = x.reshape((self.num_actions, self.num_atoms))
if self.distributional:
probabilities = nn.softmax(logits)
q_values = jnp.sum(support * probabilities, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
q_values = jnp.sum(logits, axis=1) # Sum over all the num_atoms
return atari_lib.DQNNetworkType(q_values)
|
google/dopamine
|
dopamine/jax/networks.py
|
Python
|
apache-2.0
| 14,867
|
[
"Gaussian"
] |
307f91a26b91a383741adaeb69cfda2423cf43f6232b9ac409bb9b672442e5d7
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""
Define a common GULP Sile
"""
from sisl._internal import set_module
from ..sile import Sile, SileCDF
__all__ = ['SileGULP', 'SileCDFGULP']
@set_module("sisl.io.gulp")
class SileGULP(Sile):
pass
@set_module("sisl.io.gulp")
class SileCDFGULP(SileCDF):
pass
|
zerothi/sisl
|
sisl/io/gulp/sile.py
|
Python
|
mpl-2.0
| 472
|
[
"GULP"
] |
650374e65ecc58155b8453405eabf1c0df0851229c8b6440f6b2a1b0657e84e2
|
REF= "/home/ubuntu/refs/bwa_index/GCF_000001405.30_GRCh38.p4_genomic.fna"
BWA="/opt/biobuilds/bin/bwa"
SAMTOOLS="/opt/biobuilds/bin/samtools"
FREEBAYES="/home/ubuntu/freebayes/bin/freebayes"
VCFILTER="/home/ubuntu/vcflib/bin/vcffilter"
|
DCGenomics/vcf_parsing_hackathon_v002
|
scripts/snake_env.py
|
Python
|
cc0-1.0
| 236
|
[
"BWA"
] |
a26a56b70c6d7a3e03ae4be601dc341f7e5dd55da8bf666877bedcf0e83f2d40
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX BatchEnsemble training related functions."""
import dataclasses
import functools
import logging
from typing import Any, Callable, Mapping, Optional, Tuple
from clu import metric_writers
import flax.optim
import flax.struct
import flax.traverse_util
import jax
import jax.numpy as jnp
import numpy as np
import train_utils # local file import from baselines.jft
EvaluationOutput = Tuple[jnp.ndarray, ...]
Module = type(functools) # Python module.
Params = Mapping[str, Any]
MetricWriter = metric_writers.MetricWriter
PmapEvaluationFn = Callable[
[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray],
EvaluationOutput]
# TODO(dusenberrymw,zmariet): Clean up and generalize these log marginal probs.
def log_average_softmax_probs(logits: jnp.ndarray) -> jnp.ndarray:
# TODO(zmariet): dedicated eval loss function.
ens_size, _, _ = logits.shape
log_p = jax.nn.log_softmax(logits) # (ensemble_size, batch_size, num_classes)
log_p = jax.nn.logsumexp(log_p, axis=0) - jnp.log(ens_size)
return log_p
def log_average_sigmoid_probs(logits: jnp.ndarray) -> jnp.ndarray:
ens_size, _, _ = logits.shape
log_p = jax.nn.log_sigmoid(logits) # (ensemble_size, batch_size, num_classes)
log_p = jax.nn.logsumexp(log_p, axis=0) - jnp.log(ens_size)
log_not_p = jax.nn.log_sigmoid(-logits)
log_not_p = jax.nn.logsumexp(log_not_p, axis=0) - jnp.log(ens_size)
log_p = log_p - log_not_p
return log_p
def tree_clip_norm_global_pmax(tree, max_norm, axis_name):
"""Global norm clipping, with pmax of global norm before clipping."""
global_norm = jnp.sqrt(sum(jnp.vdot(x, x) for x in jax.tree_leaves(tree)))
global_norm = jax.lax.pmax(global_norm, axis_name=axis_name)
factor = jnp.minimum(1.0, max_norm / global_norm)
return jax.tree_map(lambda x: factor * x, tree), global_norm
def _traverse_with_names(tree):
"""Traverses nested dicts/dataclasses and emits (leaf_name, leaf_val)."""
if dataclasses.is_dataclass(tree):
tree = flax.serialization.to_state_dict(tree)
if isinstance(tree, (dict, flax.core.FrozenDict)):
keys = sorted(tree.keys())
for key in keys:
for path, v in _traverse_with_names(tree[key]):
yield (key + '/' + path).rstrip('/'), v
else:
yield '', tree
def tree_flatten_with_names(tree):
"""Populates tree_flatten with leaf names.
This function populates output of tree_flatten with leaf names, using a
custom traversal that produces names if provided. The custom traversal does
NOT have to traverse tree in the same order as jax, as we take care of
automatically aligning jax' and custom traversals.
Args:
tree: python tree.
Returns:
A list of values with names: [(name, value), ...].
A PyTreeDef tree definition object.
"""
vals, tree_def = jax.tree_flatten(tree)
# "Fake" token tree that is use to track jax internal tree traversal and
# adjust our custom tree traversal to be compatible with it.
tokens = range(len(vals))
token_tree = tree_def.unflatten(tokens)
val_names, perm = zip(*_traverse_with_names(token_tree))
inv_perm = np.argsort(perm)
# Custom traverasal should visit the same number of leaves.
if len(val_names) != len(vals):
raise ValueError(f'Pytree traversal detected {len(val_names)} names, '
f'but {len(vals)} leafs.\nTreeDef is:\n{tree_def}')
return [(val_names[i], v) for i, v in zip(inv_perm, vals)], tree_def
def tree_map_with_names(f, param_tree, match_name_fn=lambda name: True):
"""Like jax.tree_map but with a filter on the leaf path name.
Args:
f: The function to be applied to each parameter in `param_tree`.
param_tree: The tree of parameters `f` should be applied to.
match_name_fn: This function is called with each tree leave's path name,
which has a path-like format ("a/b/c"), and decides whether `f` should
be applied to that leaf or the leaf should be kept as-is.
Returns:
A tree identical in structure to `param_tree` but with the leaves the
result of calling `f` on them in the cases where `match_name_fn` returns
True for that leaf's path name.
"""
names_and_vals, tree_def = tree_flatten_with_names(param_tree)
vals = [f(v) if match_name_fn(name) else v for name, v in names_and_vals]
return tree_def.unflatten(vals)
def tree_rngs_split(rngs, num_splits=2):
"""Splits a PyTree of PRNGKeys into num_splits PyTrees."""
rngs = jax.tree_map(lambda rng: jax.random.split(rng, num_splits), rngs)
slice_rngs = lambda rngs, i: jax.tree_map(lambda rng: rng[i], rngs)
return tuple(slice_rngs(rngs, i) for i in range(num_splits))
def update_fn_be(
opt: flax.optim.Optimizer,
rngs: Mapping[str, jnp.ndarray],
lr: jnp.ndarray,
images: jnp.ndarray,
labels: jnp.ndarray,
batch_loss_fn: Callable[..., jnp.ndarray],
weight_decay_fn: Optional[Callable[[Any, float], Any]],
max_grad_norm_global: Optional[float],
fast_weight_lr_multiplier: float):
"""Updates a model on the given inputs for one step.
Args:
opt: Flax optimizer used during training.
rngs: A random number generator to be passed by stochastic operations.
lr: The learning rate to use in each device.
images: Array containing the images in a batch.
labels: Array containing the labels in a batch.
batch_loss_fn: Loss function that takes (params, images, labels, rng) as
inputs and produces the loss value for an entire batch.
weight_decay_fn: Function that takes a parameter and returns a new parameter
with weight decay applied. Use None to avoid any weight decay.
max_grad_norm_global: Float (or None) denoting the maximum norm of the
gradients allowed for before clipping. If the norm is larger than this,
the gradients are scaled to have this norm. Use None to avoid any norm
clipping.
fast_weight_lr_multiplier: the ratio of the fast weights LR to the slow
weights one.
Returns:
The optimizer with the updated parameters and state.
The split rng.
The loss value of the batch before the update.
A dictionary containing auxiliary information such as plots.
"""
# If rng is provided: split rng, and return next_rng for the following step.
rngs, next_rngs = tree_rngs_split(rngs, num_splits=2)
(loss, aux), grads = jax.value_and_grad(
batch_loss_fn, has_aux=True)(opt.target, images, labels, rngs=rngs)
# Average gradients.
grads = jax.lax.pmean(grads, axis_name='batch')
loss = jax.lax.pmean(loss, axis_name='batch')
aux['training_loss'] = loss
if max_grad_norm_global and max_grad_norm_global > 0.0:
# Normalize by 'global' norm (i.e. flatten all parameters).
grads, global_norm = tree_clip_norm_global_pmax(
grads, max_grad_norm_global, axis_name='batch')
aux['grad_norm_global'] = global_norm
if fast_weight_lr_multiplier and fast_weight_lr_multiplier != 1.0:
fast_weights_lr_fn = lambda x: x * fast_weight_lr_multiplier
match_fn = lambda name: ('fast_weight_alpha' in name or 'fast_weight_gamma' # pylint: disable=g-long-lambda
in name)
grads = tree_map_with_names(fast_weights_lr_fn, grads, match_fn)
opt = opt.apply_gradient(grads, learning_rate=lr)
if weight_decay_fn:
params = weight_decay_fn(opt.target, lr)
opt = opt.replace(target=params)
aux['learning_rate'] = lr
return opt, next_rngs, aux
def broadcast_batchensemble_biases(params, be_layers, ensemble_size):
"""Tiles BE biases when seeding downstream weights from a deterministic model."""
for layer in be_layers:
for block in [0, 1]:
be_block = params['Transformer'][f'encoderblock_{layer}']['MlpBlock_3']
# The biases already have the right shape if we are restarting from a
# checkpoint (e.g., after a job got preempted).
if be_block[f'Dense_{block}']['bias'].ndim != 2:
be_block[f'Dense_{block}']['bias'] = jnp.tile(
be_block[f'Dense_{block}']['bias'], (ensemble_size, 1))
return params
def create_init(model, config, train_ds):
"""Create the initialization function for model parameters.
Args:
model: The model to be used in updates.
config: The config of the experiment.
train_ds: tf.data.Dataset.
Returns:
Function that returns initialized model parameters.
"""
local_batch_size = config.batch_size // jax.process_count()
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@functools.partial(jax.jit, backend='cpu')
def init(rng):
image_size = tuple(train_ds.element_spec['image'].shape[2:])
logging.info('image_size = %s', image_size)
dummy_input = jnp.zeros((local_batch_size,) + image_size, jnp.float32)
params = flax.core.unfreeze(model.init(rng, dummy_input,
train=False))['params']
# Set bias in the head to a low value, such that loss is small initially.
params['batchensemble_head']['bias'] = jnp.full_like(
params['batchensemble_head']['bias'], config.get('init_head_bias', 0))
# init head kernel to all zeros for fine-tuning
if config.get('model_init'):
params['batchensemble_head']['kernel'] = jnp.full_like(
params['batchensemble_head']['kernel'], 0)
return params
return init
def create_batch_loss_fn(model, config):
"""Create the update function from model and config.
Args:
model: The model to be used in updates.
config: The config of the experiment.
Returns:
The function that updates the model for one step.
"""
def batch_loss_fn(params, images, labels, rngs):
logits, _ = model.apply({'params': flax.core.freeze(params)},
images,
train=True,
rngs=rngs)
labels = jnp.tile(labels, (config.model.transformer.ens_size, 1))
loss_fn = getattr(train_utils, config.get('loss', 'sigmoid_xent'))
loss = jnp.mean(loss_fn(logits=logits, labels=labels))
return loss, dict()
return batch_loss_fn
def create_update_fn(model, config):
"""Create the update function from model and config.
Args:
model: The model to be used in updates.
config: The config of the experiment.
Returns:
The function that updates the model for one step.
"""
batch_loss_fn = create_batch_loss_fn(model, config)
@functools.partial(jax.pmap, axis_name='batch', donate_argnums=(0, 1))
def update_fn(opt, lr, images, labels, rngs):
return update_fn_be(
opt=opt,
rngs=rngs,
lr=lr,
images=images,
labels=labels,
batch_loss_fn=batch_loss_fn,
weight_decay_fn=train_utils.get_weight_decay_fn(
weight_decay_rules=config.get('weight_decay', []) or [],
rescale_value=config.lr.base
if config.get('weight_decay_decouple') else 1.),
max_grad_norm_global=config.get('grad_clip_norm', None),
fast_weight_lr_multiplier=config.get('fast_weight_lr_multiplier', None))
return update_fn
# TODO(trandustin, zmariet): Unify all evaluation functions and other utility
# functions used in different models.
def create_evaluation_fn(model, config):
"""Create the evaluation function from model and config.
Args:
model: The model to be used in updates.
config: The config of the experiment.
Returns:
The function that evaluates the model for one step.
"""
@functools.partial(jax.pmap, axis_name='batch')
def evaluation_fn(params, images, labels, mask):
# Ignore the entries with all zero labels for evaluation.
mask *= labels.max(axis=1)
tiled_logits, out = model.apply({'params': flax.core.freeze(params)},
images,
train=False)
loss_name = config.get('loss', 'sigmoid_xent')
# TODO(dusenberrymw,zmariet): Clean up and generalize this.
ens_size = config.model.transformer.ens_size
if loss_name == 'sigmoid_xent':
ens_logits = log_average_sigmoid_probs(
jnp.asarray(jnp.split(tiled_logits, ens_size)))
else: # softmax
ens_logits = log_average_softmax_probs(
jnp.asarray(jnp.split(tiled_logits, ens_size)))
pre_logits = jnp.concatenate(
jnp.split(out['pre_logits'], ens_size), axis=-1)
losses = getattr(train_utils, loss_name)(
logits=ens_logits,
labels=labels[:, :config.num_classes],
reduction=False)
loss = jax.lax.psum(losses * mask, axis_name='batch')
top1_idx = jnp.argmax(ens_logits, axis=1)
top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]
ncorrect = jax.lax.psum(top1_correct * mask, axis_name='batch')
n = jax.lax.psum(mask, axis_name='batch')
metric_args = jax.lax.all_gather(
[ens_logits, labels, pre_logits, mask],
axis_name='batch')
return ncorrect, loss, n, metric_args
return evaluation_fn
|
google/uncertainty-baselines
|
baselines/jft/batchensemble_utils.py
|
Python
|
apache-2.0
| 13,638
|
[
"VisIt"
] |
2f74dfcd647a0e2354f5509c9dfe53d22f6e6f5721d60f18edfe7bc45c806cdd
|
#!/usr/bin/env python3
# - * -coding: utf - 8 - * -
import argparse
import logging
import os
import sys
import pysam
from tqdm import tqdm
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def mean(list_of_quals):
"""
Naive function for determining 'read quality'
:param list_of_quals: list of numeric quality values
:return: mean of the quality values
"""
return float(sum(list_of_quals)) / len(list_of_quals)
def load_snps(alignment_file, variant_file, file_delimiter, chromosome, base_q=None, read_q=None):
in_sam = pysam.AlignmentFile(alignment_file, 'rb')
total_read_number = in_sam.count()
logging.info('Number of fetched reads: {}'.format(total_read_number))
out_map = {}
total_variant_number = file_len(variant_file)
var_file = open(variant_file)
valid = ['A', 'T', 'C', 'G']
for line in tqdm(var_file, total=total_variant_number, desc="Variant counter", miniters=1):
splitted = line.split(file_delimiter)
snp_position = int(splitted[0])
snp_ref = splitted[1]
snp_alt = splitted[2]
for read in in_sam.fetch(chromosome, snp_position, snp_position + 1):
if read_q and mean(read.query_qualities) < read_q:
# read quality threshold
continue
if read.qname not in out_map:
out_map[read.qname] = {'snp_list': [], 'mq': 0}
try:
base_value = read.seq[snp_position - read.reference_start]
except IndexError:
continue
if base_value not in valid:
continue
if base_value == snp_ref:
allele = 0
elif base_value == snp_alt:
allele = 1
else:
continue
try:
base_quality = read.query_qualities[snp_position - read.reference_start]
if base_q and base_quality < base_q:
# base quality threshold
continue
except IndexError:
continue
logging.debug("Found Snp: {} {} {} {}".format(snp_position, base_value, allele, base_quality))
out_map[read.qname]['snp_list'].append((snp_position, base_value, allele, base_quality))
out_map[read.qname]['mq'] = read.mapping_quality
var_file.close()
in_sam.close()
return out_map
def write_wif(read_snp_map, out_dir):
output_file = open(out_dir + 'alignment.wif', 'w')
for read_name, snp_dict in read_snp_map.items():
snp_list = snp_dict['snp_list']
mq = snp_dict['mq']
if len(snp_list) == 0: # No SNP found for this read or read empty
continue
output_file.write("{}".format(read_name))
for snp in snp_list:
p = snp[0] # position
b = snp[1] # read base
a = snp[2] # allele
q = snp[3] # base quality
output_file.write(" : {} {} {} {}".format(p, b, a, q))
output_file.write(" # {} {} NA\n".format(len(snp_list), mq))
def calculate_coverage(read_snp_map):
read_number = len(read_snp_map)
cov = 0
for read_name, snp_dict in read_snp_map.items():
snp_list = snp_dict['snp_list']
cov += len(snp_list)
avg = (cov / read_number) * 100
logging.info("Average Coverage for {}: {}%".format(read_number, avg))
def main():
parser = argparse.ArgumentParser(
description='Convert an alignment file .bam in a .wif Hapcol compatible file')
parser.add_argument('-b', action='store', dest='fileBAM', help='Alignment file in BAM format.', required=True)
parser.add_argument('-vf', action='store', dest='variantFile',
help='SNPs variant file. Retrieved with get.variants.py script', required=True)
parser.add_argument('-o', action='store', dest='outputDir',
help='Output (root) directory. Default: current directory')
parser.add_argument('-file-delimiter', action='store', dest='fileDelim',
help='Set the file delimiter for the variant file. Default: \\t')
parser.add_argument('-c', action='store_true', dest='coverageFlag',
help="If set calculate reads coverage and return to stdout", default=False)
parser.add_argument('--chromosome', '-chr', action='store', dest='chromosome',
help='Chromosome to analyze', default='chr1')
parser.add_argument('--base-quality', '-bq', action='store', dest='baseQuality',
help='Set the minimum quality for base', type=int)
parser.add_argument('--read-quality', '-rq', action='store', dest='readQuality',
help='Set the minimum quality for read', type=int)
parser.add_argument('-v', help='increase output verbosity', action='count')
args = parser.parse_args()
if args.v == 0 or not args.v:
log_level = logging.INFO
else:
log_level = logging.DEBUG
logging.basicConfig(level=log_level,
format='%(levelname)-8s [%(asctime)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.info('BamToWif: Program Started')
if args.outputDir and not os.path.exists(args.outputDir):
logging.error('Output dir not found.')
sys.exit(1)
if args.outputDir:
out_dir = args.outputDir + '/'
else:
out_dir = os.getcwd() + '/'
if not os.path.exists(out_dir):
os.mkdir(out_dir)
ann_file_delimiter = '\t'
if args.fileDelim is not None:
ann_file_delimiter = args.fileDelim
logging.info('#### Calculating SNPs info ####')
chromosome = 'chr1'
if args.chromosome:
chromosome = args.chromosome
snps_map_info = load_snps(args.fileBAM, args.variantFile, ann_file_delimiter, chromosome, args.baseQuality,
args.readQuality)
if args.coverageFlag:
logging.info('#### Calculating coverage ####')
calculate_coverage(snps_map_info)
logging.info('#### Writing Wif ####')
write_wif(snps_map_info, out_dir)
logging.info('BamToWif: Program Completed')
if __name__ == '__main__':
main()
|
AlgoLab/HapCol
|
utils/bam_to_wif.py
|
Python
|
gpl-2.0
| 6,312
|
[
"pysam"
] |
a46d9927731968f5faa83392f55fedbf52a588ddca3171f022bd9ed4c7909361
|
#lint:disable
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import logging
from PyQt4 import QtCore, QtGui
from .editcustomslidedialog import Ui_CustomSlideEditDialog
log = logging.getLogger(__name__)
class EditCustomSlideForm(QtGui.QDialog, Ui_CustomSlideEditDialog):
"""
Class documentation goes here.
"""
log.info('Custom Verse Editor loaded')
def __init__(self, parent=None):
"""
Constructor
"""
super(EditCustomSlideForm, self).__init__(parent)
self.setupUi(self)
# Connecting signals and slots
self.insert_button.clicked.connect(self.on_insert_button_clicked)
self.split_button.clicked.connect(self.on_split_button_clicked)
def set_text(self, text):
"""
Set the text for slide_text_edit.
``text``
The text (unicode).
"""
self.slide_text_edit.clear()
if text:
self.slide_text_edit.setPlainText(text)
self.slide_text_edit.setFocus()
def get_text(self):
"""
Returns a list with all slides.
"""
return self.slide_text_edit.toPlainText().split('\n[===]\n')
def on_insert_button_clicked(self):
"""
Adds a slide split at the cursor.
"""
self.insert_single_line_text_at_cursor('[===]')
self.slide_text_edit.setFocus()
def on_split_button_clicked(self):
"""
Adds an optional split at cursor.
"""
self.insert_single_line_text_at_cursor('[---]')
self.slide_text_edit.setFocus()
def insert_single_line_text_at_cursor(self, text):
"""
Adds ``text`` in a single line at the cursor position.
"""
full_text = self.slide_text_edit.toPlainText()
position = self.slide_text_edit.textCursor().position()
if position and full_text[position - 1] != '\n':
text = '\n' + text
if position == len(full_text) or full_text[position] != '\n':
text += '\n'
self.slide_text_edit.insertPlainText(text)
|
marmyshev/item_title
|
openlp/plugins/custom/forms/editcustomslideform.py
|
Python
|
gpl-2.0
| 4,133
|
[
"Brian"
] |
a83175d8f11d12bf6d8ff8c11ec73d91f5b0069085ce3d0fb4fd3d604699cc0d
|
from __init__ import *
# writing data
def writ_maps_rec7_back():
gdat = tdpy.util.gdatstrt()
gdat.recotype = ['rec7']
gdat.enertype = ['back']
tdpy.util.writ_fdfm()
tdpy.util.writ_maps_main(gdat, os.environ["FERM_IGAL_DATA_PATH"])
tdpy.util.prep_maps('rec7', 'back', 'igal', os.environ["FERM_IGAL_DATA_PATH"], 256, 'tim0')
def writ_maps_rec8_back():
gdat = tdpy.util.gdatstrt()
gdat.recotype = ['rec8']
gdat.enertype = ['back']
tdpy.util.writ_maps_main(gdat, os.environ["FERM_IGAL_DATA_PATH"])
tdpy.util.prep_maps('rec8', 'back', 'igal', os.environ["FERM_IGAL_DATA_PATH"], 256, 'tim0')
def retr_plnkmapsorig(gdat, strgmapsplnk):
if strgmapsplnk == 'radi':
mapsplnkorig = pf.getdata(gdat.pathdatatdgu + 'plnk/HFI_CompMap_ThermalDustModel_2048_R1.20.fits', 1)['RADIANCE']
mapsplnkorig = hp.reorder(mapsplnkorig, n2r=True)
else:
mapsplnk = tdpy.util.retr_mapsplnkfreq(strgmapsplnk)
# plot the Planck map
tdpy.util.plot_maps(gdat.pathimag + 'mapsplnk%s.pdf' % strgmapsplnk, mapsplnk, satu=True)
if gdat.subspnts:
print 'Subtracting point sources...'
# subtract PSs from the Planck maps
## read PCCS
if strgmapsplnk[1] == '0':
strg = 'R2.04'
else:
strg = 'R2.01'
# temp
dataplnk = pf.getdata(gdat.pathdata + 'plnk/COM_PCCS_%s_%s.fits' % (strgmapsplnk[1:], strg), 1)
fluxpntsplnk = dataplnk['GAUFLUX'] * 1e-3 # [Jy]
lgalpntsplnk = dataplnk['GLON'] # [deg]
lgalpntsplnk = (lgalpntsplnk - 180.) % 360. - 180.
bgalpntsplnk = dataplnk['GLAT'] # [deg]
fwhmpntsplnk = dataplnk['GAU_FWHM_EFF'] / 60. # [deg]
stdvpntsplnk = fwhmpntsplnk / 2. / sqrt(2. * log(2.)) # [deg]
# filter out bad
indxpntsgood = where((stdvpntsplnk >= 0.) & (fluxpntsplnk > 0.))[0]
lgalpntsplnk = lgalpntsplnk[indxpntsgood]
bgalpntsplnk = bgalpntsplnk[indxpntsgood]
fluxpntsplnk = fluxpntsplnk[indxpntsgood]
stdvpntsplnk = stdvpntsplnk[indxpntsgood]
# sort PS with respect to flux
indxpntsplnk = argsort(fluxpntsplnk)
lgalpntsplnk = lgalpntsplnk[indxpntsplnk]
bgalpntsplnk = bgalpntsplnk[indxpntsplnk]
fluxpntsplnk = fluxpntsplnk[indxpntsplnk]
stdvpntsplnk = stdvpntsplnk[indxpntsplnk]
# temp
numbpntskeep = 3
lgalpntsplnk = lgalpntsplnk[0:numbpntskeep]
bgalpntsplnk = bgalpntsplnk[0:numbpntskeep]
fluxpntsplnk = fluxpntsplnk[0:numbpntskeep]
stdvpntsplnk = stdvpntsplnk[0:numbpntskeep]
numbpntsplnk = fluxpntsplnk.size
print 'Using %d PS from the PCCS...' % numbpntsplnk
## calculate PS map using PCCS
numbsidepnts = int(sqrt(mapsplnk.size / 12))
pathmapspntsplnk = gdat.pathdata + 'mapspntsplnk%s%04d.fits' % (strgmapsplnk, numbsidepnts)
if os.path.isfile(pathmapspntsplnk):
print 'Reading %s...' % pathmapspntsplnk
mapspntsplnk = pf.getdata(pathmapspntsplnk)
else:
mapspntsplnk = tdpy.util.retr_mapspnts(lgalpntsplnk, bgalpntsplnk, stdvpntsplnk, fluxpntsplnk, verbtype=2, numbside=numbsidepnts)
## plot the PCSC map
tdpy.util.plot_maps(gdat.pathimag + 'mapspntsplnk%s.pdf' % strgmapsplnk, mapspntsplnk, satu=True)
pf.writeto(pathmapspntsplnk, mapspntsplnk, clobber=True)
mapsorigplnk = mapsplnk - mapspntsplnk
else:
mapsorigplnk = mapsplnk
return mapsorigplnk
def defn_gtbn():
numbener = 30
minmener = 0.1
maxmener = 100.
binsener = logspace(log10(minmener), log10(maxmener), numbener + 1)
lowrener = binsener[:-1]
upprener = binsener[1:]
limtener = stack((lowrener, upprener), axis=1)
path = os.environ["TDPY_DATA_PATH"] + '/gtbndefn_back.dat'
savetxt(path, limtener, fmt='%10.5g')
def merg_maps_arry():
merg_maps(numbside=512)
merg_maps(mpolmerg=360.)
merg_maps(mpolmerg=90.)
def merg_maps(numbside=256, mpolmerg=180., mpolsmth=360., strgmaps='radi'):
# construct the global object
gdat = tdpy.util.gdatstrt()
# get the time stamp
strgtimestmp = tdpy.util.retr_strgtimestmp()
# run tag
rtag = strgtimestmp + '_%04d_%04d_%04d' % (numbside, mpolmerg, mpolsmth)
# paths
gdat.pathimag, gdat.pathdata = tdpy.util.retr_path('tdgu', 'ferm_igal/', 'ferm_igal', rtag)
# time stamp
strgtimestmp = tdpy.util.retr_strgtimestmp()
timeinit = time.time()
calcfactconv = False
gdat.subspnts = True
indxevttrofi = arange(4)
# analysis setup
## plots
alph = 0.5
plotfull = False
## Healpix grid
lgalheal, bgalheal, numbpixl, apix = tdpy.util.retr_healgrid(numbside)
## axes
### Fermi-LAT energy
binsener = array([0.1, 0.3, 1., 3., 10., 100.])
meanener = sqrt(binsener[1:] * binsener[:-1])
numbener = meanener.size
indxener = arange(numbener)
indxevttrofi = arange(3, 4)
numbevtt = indxevttrofi.size
indxevtt = arange(numbevtt)
## constants
consplnk = 6.63e-34 # [J s]
consbolt = 1.38e-23 # [J/K]
tempcmbr = 2.725 # [K]
velolght = 3e8 # [m/s]
## multipole
maxmmpol = 3. * numbside - 1.
numbalmc = int(maxmmpol * (maxmmpol + 1.) / 2. + maxmmpol + 1)
numbmpol = int(maxmmpol) + 1
mpol = arange(numbmpol)
mpolgrid, temp = hp.Alm.getlm(lmax=maxmmpol)
# read unit conversion data provided by Planck
factconvplnk = loadtxt(gdat.pathdata + 'plnkunitconv.dat')
## Fermi-LAT flux map
path = gdat.pathdata + '/fermflux_cmp0_igal.fits'
mapsfermorig = sum(pf.getdata(path), 2)
numbpixlferm = mapsfermorig.shape[1]
numbsideferm = int(sqrt(numbpixlferm / 12))
mapsfermorig -= mean(mapsfermorig, 1)[:, None]
mapsfermorig /= std(mapsfermorig, 1)[:, None]
mapsferm = empty_like(mapsfermorig)
for i in indxener:
tdpy.util.plot_maps(gdat.pathimag + 'mapsfermorig%04d.pdf' % i, mapsfermorig[i, :], satu=True)
mapsferm[i, :] = tdpy.util.smth(mapsfermorig[i, :], mpolsmth, mpol=True)
tdpy.util.plot_maps(gdat.pathimag + 'mapsferm%04d.pdf' % i, mapsferm[i, :], satu=True)
# 3FGL flux map
path = gdat.pathdata + 'gll_psc_v16.fit'
datafgl3 = pf.getdata(path)
lgalfgl3 = datafgl3['glon']
lgalfgl3 = ((lgalfgl3 - 180.) % 360.) - 180.
bgalfgl3 = datafgl3['glat']
stdvfgl3 = tdpy.util.retr_fwhmferm(meanener, indxevttrofi) / 2.
specfgl3 = stack((datafgl3['Flux100_300'], datafgl3['Flux300_1000'], datafgl3['Flux1000_3000'], \
datafgl3['Flux3000_10000'], datafgl3['Flux10000_100000'])) / gdat.diffenerfull[:, None]
# temp
numbpntsfgl3 = 2
indxfgl3brgt = argsort(specfgl3[2, :])
lgalfgl3 = lgalfgl3[indxfgl3brgt][:numbpntsfgl3]
bgalfgl3 = bgalfgl3[indxfgl3brgt][:numbpntsfgl3]
specfgl3 = specfgl3[:, indxfgl3brgt][:, :numbpntsfgl3]
mapspntsferm = empty((numbener, numbpixlferm, numbevtt))
for i in indxener:
for m in indxevtt:
mapspntsferm[i, :, m] = tdpy.util.retr_mapspnts(lgalfgl3, bgalfgl3, stdvfgl3[i, m], specfgl3[i, :], verbtype=2, numbside=numbsideferm)
if plotfull:
# plot tranmission spectra
figr, axis = plt.subplots()
axis.set_ylabel('$T$')
axis.set_xlabel('$f$ [GHz]')
axis.set_ylim([1e-4, 1.])
axis.set_xlim([10., 2e3])
for k in range(numbfreqplnk):
labl = '%d' % int(strgmapsplnk[k])
axis.loglog(1e-9 * freqband, tranband, label=labl)
axis.legend(loc=3, ncol=2)
plt.tight_layout()
path = gdat.pathimag + 'tran.pdf'
plt.savefig(path)
plt.close(figr)
strgmapsplnk = ['0030', '0044', '0070', '0100', '0143', '0217', '0353', '0545', '0857', 'radi']
for k in range(numbmapsplnk):
print 'Map number ', k
print 'Maps string: ', strgmapsplnk[k]
writ_plnk(strgmapsplnk[k])
# get Planck PS mask
if False:
print 'Reading the Planck mask...'
path = gdat.pathdatatdgu + 'plnk/HFI_Mask_PointSrc_2048_R2.00.fits'
mapsmask = pf.open(path)[1].data['F353']
mapsmask = hp.reorder(mapsmask, n2r=True)
tdpy.util.plot_maps(gdat.pathimag + 'mapsmask.pdf', mapsmask)
strgmapsplnk = 'radi'
#strgmapsplnk = '0857'
# get input maps
## Planck map
print 'Smoothing the Planck map...'
path = gdat.pathdata + 'mapsplnk.fits'
if os.path.isfile(path):
print 'Reading %s...' % path
mapsplnk = pf.getdata(path)
else:
mapsplnkorig = retr_plnkmapsorig(gdat, strgmapsplnk)
mapsplnkorig -= mean(mapsplnkorig)
mapsplnkorig /= std(mapsplnkorig)
tdpy.util.plot_maps(gdat.pathimag + 'mapsplnkorig.pdf', mapsplnkorig, satu=True)
# temp
indxpixlmask = []
mapsplnk, mapsalmc, mpolsmthplnk, wghtsmthplnk = tdpy.util.smth(mapsplnkorig, mpolsmth, mpol=True, retrfull=True, indxpixlmask=indxpixlmask, numbsideoutp=numbside)
tdpy.util.plot_maps(gdat.pathimag + 'mapsplnk.pdf', mapsplnk)
pf.writeto(path, mapsplnk, clobber=True)
almcplnktemp = hp.map2alm(mapsplnk)
numbmapsplnk = len(strgmapsplnk)
## PS
# temp
if False:
mapspnts = zeros((2, numbpixl))
numbpnts = array([100, 100000])
for k in arange(numbpnts.size):
stdv = zeros(numbpnts[k]) + 0.5
flux = zeros(numbpnts[k]) + 1.
lgal = (rand(numbpnts[k]) - 0.5) * 360.
bgal = (rand(numbpnts[k]) - 0.5) * 360.
mapspnts[k, :] = tdpy.util.retr_mapspnts(lgal, bgal, stdv, flux, numbside=numbside)
## Gaussian noise map
mapsgaus = sqrt(0.25 / 30.) * randn(numbpixl)
## Fermi Diffuse Model
# temp
print 'Reading the Fermi diffuse model...'
mapsfdfmorig = tdpy.util.retr_fdfm(binsener, numbside=numbside)
mapsfdfmorig -= mean(mapsfdfmorig, 1)[:, None]
mapsfdfmorig /= std(mapsfdfmorig, 1)[:, None]
mapsfdfm = empty_like(mapsfdfmorig)
almcfdfmorig = empty((numbener, numbalmc), dtype=complex)
almcfdfm = empty((numbener, numbalmc), dtype=complex)
for i in arange(numbener):
almcfdfmorig[i, :] = hp.map2alm(mapsfdfmorig[i, :])
tdpy.util.plot_maps(gdat.pathimag + 'mapsfdfmorig%04d.pdf' % i, mapsfdfmorig[i, :], satu=True)
mapsfdfm[i, :], almcfdfm[i, :], mpolsmthfdfm, wghtsmthfdfm = tdpy.util.smth(mapsfdfmorig[i,:], mpolsmth, mpol=True, retrfull=True)
# compute the weight
wghtsing = exp(-0.5 * mpol * (mpol + 1.) / mpolmerg**2)
wght = empty(numbalmc)
for l in mpol.astype(int):
wght[where(mpolgrid == l)] = wghtsing[l]
# plot the weight
figr, axis = plt.subplots()
axis.loglog(mpol, wghtsing, label='FDM')
axis.loglog(mpol, 1. - wghtsing, label='Planck')
axis.loglog(mpolsmthfdfm, wghtsmthfdfm, label='Smoothing Kernel')
#axis.loglog(mpolsmthplnk + 0.1, wghtsmthplnk, label='Smoothing Kernel')
axis.axvline(numbside, ls='-', color='black', alpha=alph, label='$N_{side}$')
axis.axvline(mpolsmth, ls='--', color='black', alpha=alph, label='$l_{smth}$')
axis.axvline(mpolmerg, ls='-.', color='black', alpha=alph, label='$l_{merg}$')
axis.set_ylabel('$w_l$')
axis.set_xlabel('$l$')
axis.set_ylim([1e-4, 1.])
axis.set_xlim([amin(mpol), amax(mpol)])
axis.legend(loc=2)
plt.tight_layout()
path = gdat.pathimag + 'wght.pdf'
plt.savefig(path)
plt.close(figr)
# compute power spectra
print 'Computing power spectra...'
## power spectrum prefactor
factmpol = mpol * (2. * mpol + 1.) / 4. / pi
## indices of multipoles over which the variance of Fermi diffuse model and Planck map is matched
indxmpoltemp = where(mpol < 10.)[0]
psecplnkuncr = factmpol * hp.anafast(mapsplnk)
psecgaus = factmpol * hp.anafast(mapsgaus)
psecfermorig = empty((numbener, numbmpol))
psecferm = empty((numbener, numbmpol))
psecfdfmorig = empty((numbener, numbmpol))
psecfdfm = empty((numbener, numbmpol))
psecplnk = empty((numbener, numbmpol))
almcplnk = empty((numbener, numbalmc), dtype=complex)
for i in arange(numbener):
psecfermorig[i, :] = factmpol * hp.anafast(mapsfermorig[i, :])
psecferm[i, :] = factmpol * hp.anafast(mapsferm[i, :])
psecfdfmorig[i, :] = factmpol * hp.anafast(mapsfdfmorig[i, :])
psecfdfm[i, :] = factmpol * hp.anafast(mapsfdfm[i, :])
## correct the Planck variance
factcorr = sum(psecfdfm[i, indxmpoltemp]) / sum(psecplnkuncr[indxmpoltemp])
psecplnk[i, :] = factcorr * psecplnkuncr
almcplnk[i, :] = sqrt(factcorr) * almcplnktemp
# merge the maps
mapsmerg = empty((numbener, numbpixl))
for i in arange(numbener):
almcmerg = almcfdfm[i, :] * wght + almcplnk[i, :] * (1. - wght)
mapsmerg[i, :] = hp.alm2map(almcmerg, numbside, verbose=False)
# calculate the power spectrum of the merged map
psecmerg = empty((numbener, numbmpol))
for i in arange(numbener):
psecmerg[i, :] = factmpol * hp.anafast(mapsmerg[i, :])
# plot the power spectra
for i in arange(numbener):
figr, axis = plt.subplots()
axis.loglog(mpol, psecfermorig[i, :], label='Fermi-LAT Native', color='b')
axis.loglog(mpol, psecferm[i, :], label='Fermi-LAT Smooth', color='b', alpha=0.5)
axis.loglog(mpol, psecfdfmorig[i, :], label='FDM Native', color='c')
axis.loglog(mpol, psecfdfm[i, :], label='FDM Smooth', color='c', alpha=0.5)
axis.loglog(mpol, psecplnkuncr, label='Planck Smooth', color='r', alpha=0.5)
axis.loglog(mpol, psecplnk[i, :], label='Planck Smooth Norm.', color='r')
axis.loglog(mpol, psecmerg[i, :], label='Merged', color='m')
axis.loglog(mpol, psecgaus, label='Uncorrelated Noise', alpha=0.1, ls='--', color='black')
# axis.loglog(mpol, 3e-3 * exp(mpol / 1800.), label='Planck beam deconvolved', alpha=0.1, ls='-.', color='black')
axis.axvline(numbside, ls='-', color='black', alpha=0.6, label='$N_{side}$')
axis.axvline(mpolsmth, ls='--', color='black', alpha=0.6, label='$l_{smth}$')
axis.axvline(mpolmerg, ls='-.', color='black', alpha=0.6, label='$l_{merg}$')
axis.set_ylabel('$l(2l+1)C_l/4\pi$')
axis.set_xlabel('$l$')
axis.set_ylim([1e-3, 1.])
axis.set_xlim([amin(mpol), amax(mpol)])
axis.legend(loc=3, ncol=2)
plt.tight_layout()
path = gdat.pathimag + 'psec%04d.pdf' % i
plt.savefig(path)
plt.close(figr)
for plotigal in [False, True]:
if plotigal:
minmlgal = -20.
maxmlgal = 20.
minmbgal = -20.
maxmbgal = 20.
else:
minmlgal = -180.
maxmlgal = 180.
minmbgal = -90.
maxmbgal = 90.
path = gdat.pathimag + 'mapsfdfm%04d.pdf' % i
tdpy.util.plot_maps(path, mapsfdfm[i, :], minmlgal=minmlgal, maxmlgal=maxmlgal, minmbgal=minmbgal, maxmbgal=maxmbgal, satu=True)
if i == 0:
path = gdat.pathimag + 'mapsplnk.pdf'
tdpy.util.plot_maps(path, mapsplnk, minmlgal=minmlgal, maxmlgal=maxmlgal, minmbgal=minmbgal, maxmbgal=maxmbgal, satu=True)
path = gdat.pathimag + 'mapsmerg%04d.pdf' % i
tdpy.util.plot_maps(path, mapsmerg[i, :], minmlgal=minmlgal, maxmlgal=maxmlgal, minmbgal=minmbgal, maxmbgal=maxmbgal, satu=True)
path = gdat.pathimag + 'mapsresifdfm%04d.pdf' % i
tdpy.util.plot_maps(path, mapsmerg[i, :] - mapsfdfm[i, :], minmlgal=minmlgal, maxmlgal=maxmlgal, minmbgal=minmbgal, maxmbgal=maxmbgal, resi=True, satu=True)
path = gdat.pathimag + 'mapsresiplnk%04d.pdf' % i
tdpy.util.plot_maps(path, mapsmerg[i, :] - mapsplnk, minmlgal=minmlgal, maxmlgal=maxmlgal, minmbgal=minmbgal, maxmbgal=maxmbgal, resi=True, satu=True)
def writ_data():
verbtype=1
strgexpr='fermflux_cmp0_igal.fits'
# construct the global object
gdat = tdpy.util.gdatstrt()
gdat.verbtype = verbtype
gdat.strgexpr = strgexpr
indxevttrofi=arange(3, 4)
gdat.indxevttrofi = indxevttrofi
maxmgangdata=20.
gdat.maxmgangdata = maxmgangdata
minmlgal = -maxmgangdata
maxmlgal = maxmgangdata
minmbgal = -maxmgangdata
maxmbgal = maxmgangdata
# axes
gdat.binsener = array([0.1, 0.3, 1., 3., 10., 100.])
gdat.binsener, gdat.meanener, gdat.diffener, gdat.numbener, gdat.indxener = tdpy.util.retr_axis(bins=gdat.binsener, scal='logt')
gdat.strgbinsener = ['%.3g GeV - %.3g GeV' % (gdat.binsener[i], gdat.binsener[i+1]) for i in gdat.indxener]
## event type
gdat.indxevttfull = arange(4)
gdat.numbevttfull = gdat.indxevttfull.size
gdat.indxevttrofi = gdat.indxevttfull[gdat.indxevttrofi]
gdat.numbevtt = gdat.indxevttrofi.size
gdat.indxevtt = arange(gdat.numbevtt)
## pixelization
gdat.numbside = 256
gdat.numbpixlfull = gdat.numbside**2 * 12
gdat.lgalheal, gdat.bgalheal, gdat.numbpixl, gdat.apix = tdpy.util.retr_healgrid(gdat.numbside)
gdat.indxpixlnorm = where((abs(gdat.lgalheal) < 10.) & (abs(gdat.bgalheal) < 10.))[0]
gdat.indxpixlrofi = where((abs(gdat.lgalheal) < gdat.maxmgangdata) & (abs(gdat.bgalheal) < gdat.maxmgangdata))[0]
gdat.numbpixl = gdat.indxpixlrofi.size
gdat.indxpixl = arange(gdat.numbpixl)
indxdatacubefilt = meshgrid(gdat.indxener, gdat.indxpixlrofi, gdat.indxevttrofi, indexing='ij')
# paths
gdat.pathimag, gdat.pathdata = tdpy.util.retr_path('tdgu', 'ferm_igal/', 'ferm_igal/', 'inpt')
## data
path = gdat.pathdata + gdat.strgexpr
gdat.exprflux = pf.getdata(path)
### filter
gdat.dataflux = gdat.exprflux[indxdatacubefilt]
## templates
#strgback=['', '', '', 'plnk/HFI_CompMap_ThermalDustModel_2048_R1.20.fits', 'wssa_sample_1024.fits', 'lambda_sfd_ebv.fits', '']
#listnameback = ['isotflux', 'fdfmflux', 'fdfmfluxnorm', 'plnkdust', 'wisestar', 'finkdust', 'darktemp']
strgback=['']
listnameback = ['fdfmflux']
#strgback=['', '', '']
#listnameback = ['isotflux', 'fdfmflux', 'darktemp']
for nameback in deepcopy(listnameback):
listnameback += [nameback + 'smth']
numbback = len(listnameback)
gdat.indxback = arange(numbback)
gdat.fluxbackfull = empty((numbback, gdat.numbener, gdat.numbpixlfull, gdat.numbevttfull))
# power spectrum calculation
gdat.numbmapsplot = numbback + 1
gdat.mapsplot = empty((gdat.numbmapsplot, gdat.numbpixlfull))
gdat.mapsplot[0, gdat.indxpixlrofi] = sum(gdat.exprflux[1, gdat.indxpixlrofi, :], 1)
for c, strg in enumerate(listnameback):
# temp -- ROI should be fixed at 40 X 40 degree^2
path = gdat.pathdata + strg + '.fits'
if False and os.path.isfile(path):
print 'Reading %s...' % path
gdat.fluxbackfull[c, :, :, :] = pf.getdata(path)
else:
print 'c'
print c
print 'strg'
print strg
if strg == 'isotflux':
gdat.fluxbackorig = tdpy.util.retr_isot(gdat.binsener)
if strg.startswith('fdfmflux'):
gdat.fluxbackorig = tdpy.util.retr_fdfm(gdat.binsener)
if strg == 'plnkdust':
pathtemp = gdat.pathdata + strgback[c]
gdat.fluxbackorig = pf.getdata(pathtemp, 1)['RADIANCE']
gdat.fluxbackorig = hp.ud_grade(gdat.fluxbackorig, gdat.numbside, order_in='NESTED', order_out='RING')
if strg == 'wisestar':
pathtemp = gdat.pathdata + strgback[c]
gdat.fluxbackorig = pf.getdata(pathtemp, 0)
gdat.fluxbackorig = hp.ud_grade(gdat.fluxbackorig, gdat.numbside, order_in='RING', order_out='RING')
if strg == 'finkdust':
pathtemp = gdat.pathdata + strgback[c]
gdat.fluxbackorig = pf.getdata(pathtemp)['TEMPERATURE']
gdat.fluxbackorig = hp.ud_grade(gdat.fluxbackorig, gdat.numbside, order_in='NESTED', order_out='RING')
if strg == 'darktemp':
gdat.fluxbackorig = tdpy.util.retr_nfwp(1., gdat.numbside)
if strg.endswith('smth'):
indxbackorig = listnameback.index(strg[:-4])
print 'c'
print 'indxbackorig'
print indxbackorig
print 'gdat.fluxbackfull[indxbackorig, :, :, :]'
summgene(gdat.fluxbackfull[indxbackorig, :, :, :])
# smooth
gdat.fluxbackfull[c, :, :, :] = tdpy.util.smth_ferm(gdat.fluxbackfull[indxbackorig, :, :, :], gdat.meanener, gdat.indxevttfull)
print 'gdat.fluxbackfull[c, :, :, :]'
summgene(gdat.fluxbackfull[c, :, :, :])
# normalize
for i in gdat.indxener:
for m in gdat.indxevttfull:
gdat.fluxbackfull[c, i, :, m] = gdat.fluxbackfull[c, i, :, m] / mean(gdat.fluxbackfull[c, i, gdat.indxpixlnorm, m])
else:
# make copies
for m in gdat.indxevttfull:
if strg == 'isotflux' or strg.startswith('fdfmflux'):
gdat.fluxbackfull[c, :, :, m] = gdat.fluxbackorig
else:
for i in gdat.indxener:
gdat.fluxbackfull[c, i, :, m] = gdat.fluxbackorig
# temp
#gdat.fluxback[where(gdat.fluxback < 0.)] = 0.
print 'Writing to %s...' % path
pf.writeto(path, gdat.fluxbackfull[c, :, :, :], clobber=True)
print
# take only the energy bins, spatial pixels and event types of interest
gdat.fluxback = empty((numbback, gdat.numbener, gdat.numbpixl, gdat.numbevtt))
for c in gdat.indxback:
for i in gdat.indxener:
for m in gdat.indxevtt:
gdat.fluxback[c, i, :, m] = gdat.fluxbackfull[c, gdat.indxener[i], gdat.indxpixlrofi, gdat.indxevttrofi[m]]
# load the map to the array whose power spectrum will be calculated
gdat.mapsplot[1:, gdat.indxpixlrofi] = gdat.fluxback[:, 0, :, 0]
# plot the power spectra
listlabl = ['Data', 'Isotropic']
listcolr = ['black', 'b']
listlabl.append('FDM, %s' % gdat.strgbinsener[1])
listcolr.append('g')
listlabl.extend(['Planck', r'WISE 12$\mu$m', 'NFW'])
listcolr.extend(['r', 'm', 'y'])
figr, axis = plt.subplots()
mpol = arange(3 * gdat.numbside, dtype=int)
for n in range(numbback):
psec = hp.anafast(gdat.mapsplot[n, :])
axis.loglog(mpol, mpol * (mpol + 1.) * psec)#, label=listlabl[n])
axis.set_ylabel('$l(l+1)C_l$')
axis.set_xlabel('$l$')
axis.legend(loc=4, ncol=2)
path = gdat.pathimag + 'psec.pdf'
plt.tight_layout()
plt.savefig(path)
# plot the input spatial templates
for c in gdat.indxback:
for i in gdat.indxener:
for m in gdat.indxevtt:
path = gdat.pathimag + 'fluxback_%d%d%d.pdf' % (c, i, m)
tdpy.util.plot_maps(path, gdat.fluxback[c, i, :, m], indxpixlrofi=gdat.indxpixlrofi, numbpixl=gdat.numbpixlfull, \
minmlgal=minmlgal, maxmlgal=maxmlgal, minmbgal=minmbgal, maxmbgal=maxmbgal)
# plot the spectra of spatially averaged background components
listlabl = ['Data', 'Isotropic', 'FDM', 'PlanckDust', r'WISE 12$\mu$m', 'SFD', 'NFW']
figr, axis = plt.subplots()
numbvarb = numbback + 1
listydat = empty((numbvarb, gdat.numbener))
listyerr = zeros((2, numbvarb, gdat.numbener))
xdat = gdat.meanener
for k in range(numbvarb):
ydat = gdat.meanener**2 * listydat[k, :]
yerr = gdat.meanener**2 * listyerr[:, k, :]
axis.errorbar(xdat, ydat, yerr=yerr, marker='o', markersize=5, label=listlabl[k])
# Fermi-LAT results
listname = ['data', 'pion', 'invc', 'brem', 'pnts', 'isot']
listmrkr = ['o', 's', 'p', '*', 'D', '^']
listcolr = ['g', 'g', 'g', 'g', 'g', 'g']
listlabl = ['Fermi-LAT Data', r'Fermi-LAT $\pi^0$', 'Fermi-LAT ICS', 'Fermi-LAT Brem', 'Fermi-LAT PS', 'Fermi-LAT Iso']
for k, name in enumerate(listname):
path = os.environ["TDGU_DATA_PATH"] + '/ferm_igal/data/fermspec' + name + '.csv'
data = loadtxt(path)
enertemp = data[:, 0] # [GeV]
fluxtemp = data[:, 1] * 1e-3 # [GeV/cm^2/s/sr]
fluxtemp = interp(gdat.meanener, enertemp, fluxtemp)
#fluxtemp = interpolate.interp1d(enertemp, fluxtemp)(gdat.meanener)
axis.plot(gdat.meanener, fluxtemp, marker=listmrkr[k], color=listcolr[k], label=listlabl[k])
axis.set_xlim([amin(gdat.binsener), amax(gdat.binsener)])
axis.set_yscale('log')
axis.set_xlabel('E [GeV]')
axis.set_xscale('log')
axis.set_ylabel('$E^2dN/dAdtd\Omega dE$ [GeV/cm$^2$/s/sr]')
axis.legend(loc=4, ncol=2)
path = gdat.pathimag + 'backspec.pdf'
plt.tight_layout()
plt.savefig(path)
plt.close(figr)
# temp
#indxpixlmean = where(abs(gdat.bgalheal) < 2.)[0]
#plot_backspec(gdat, indxpixlmean)
#indxpixlmean = where((abs(gdat.lgalheal) < 5.) & (abs(gdat.bgalheal) < 5.))[0]
#plot_backspec(gdat, indxpixlmean)
globals().get(sys.argv[1])()
|
tdaylan/tdgu
|
ferm_igal_oldd.py
|
Python
|
mit
| 26,094
|
[
"Gaussian"
] |
c1671ac77c4b98c8bd3688a9081caac1dfc17c7ea40b3e588ee60e3982bc8591
|
from ase.optimize import BFGS
from ase.structure import molecule
from ase.parallel import paropen
from gpaw import GPAW, PW
from gpaw.xc.exx import EXX
# N -------------------------------------------
N = molecule('N')
N.cell = (6, 6, 7)
calc = GPAW(mode=PW(600),
dtype=complex,
nbands=8,
maxiter=300,
xc='PBE',
hund=True,
txt='N_pbe.txt',
convergence={'density': 1.e-6})
N.calc = calc
E1_pbe = N.get_potential_energy()
exx = EXX(calc, txt='N_exx.txt')
exx.calculate()
E1_hf = exx.get_total_energy()
calc.diagonalize_full_hamiltonian(nbands=4800)
calc.write('N.gpw', mode='all')
# N2 ------------------------------------------
N2 = molecule('N2')
N2.cell = (6, 6, 7)
calc = GPAW(mode=PW(600),
dtype=complex,
maxiter=300,
xc='PBE',
txt='N2_pbe.txt',
convergence={'density': 1.e-6})
N2.calc = calc
dyn = BFGS(N2)
dyn.run(fmax=0.05)
E2_pbe = N2.get_potential_energy()
exx = EXX(calc, txt='N2_exx.txt')
exx.calculate()
E2_hf = exx.get_total_energy()
f = paropen('PBE_HF.dat', 'w')
print >> f, 'PBE: ', E2_pbe - 2 * E1_pbe
print >> f, 'HF: ', E2_hf - 2 * E1_hf
f.close()
calc.diagonalize_full_hamiltonian(nbands=4800)
calc.write('N2.gpw', mode='all')
|
robwarm/gpaw-symm
|
doc/tutorials/rpa/gs_N2.py
|
Python
|
gpl-3.0
| 1,300
|
[
"ASE",
"GPAW"
] |
d2d5993c516cb67be4a0d2b078e1ed4734d8cc1f884e8f4a56035429b71fc356
|
"""
========================
Random Number Generation
========================
==================== =========================================================
Utility functions
==============================================================================
random Uniformly distributed values of a given shape.
bytes Uniformly distributed random bytes.
random_integers Uniformly distributed integers in a given range.
random_sample Uniformly distributed floats in a given range.
random Alias for random_sample
ranf Alias for random_sample
sample Alias for random_sample
choice Generate a weighted random sample from a given array-like
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
seed Seed the random number generator.
==================== =========================================================
==================== =========================================================
Compatibility functions
==============================================================================
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
randint Uniformly distributed integers in a given range.
==================== =========================================================
==================== =========================================================
Univariate distributions
==============================================================================
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== =========================================================
Multivariate distributions
==============================================================================
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== =========================================================
==================== =========================================================
Standard distributions
==============================================================================
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
==============================================================================
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
from __future__ import division, absolute_import, print_function
import warnings
# To get sub-modules
from .info import __all__
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
from .mtrand import *
# Some aliases:
ranf = random = sample = random_sample
__all__.extend(['ranf', 'random', 'sample'])
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
Note that the state of the RandomState returned here is irrelevant, as this function's
entire purpose is to return a newly allocated RandomState whose state pickle can set.
Consequently the RandomState returned by this function is a freshly allocated copy
with a seed=0.
See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
"""
return RandomState(seed=0)
from numpy.testing.nosetester import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
|
DailyActie/Surrogate-Model
|
01-codes/numpy-master/numpy/random/__init__.py
|
Python
|
mit
| 5,485
|
[
"Gaussian"
] |
f9ca8ea8e20608ebc864347e830e8a5b866887fa9e2b21ea5d72134e1f8092e5
|
########################################################################
# $HeadURL $
# File: FTSAgent.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/05/31 10:00:13
########################################################################
""" :mod: FTSAgent
==============
.. module: FTSAgent
:synopsis: agent propagating scheduled RMS request in FTS
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
DIRAC agent propagating scheduled RMS request in FTS
Request processing phases (each in a separate thread):
1. MONITOR
...active FTSJobs, prepare FTSFiles dictionary with files to submit, fail, register and reschedule
2. CHECK REPLICAS
...just in case if all transfers are done, if yes, end processing
3. FAILED FILES:
...if at least one Failed FTSFile is found, set Request.Operation.File to 'Failed', end processing
4. UPDATE Waiting#SourceSE FTSFiles
...if any found in FTSDB
5. REGISTER REPLICA
...insert RegisterReplica operation to request, if some FTSFiles failed to register, end processing
6. RESCHEDULE FILES
...for FTSFiles failed with missing sources error
7. SUBMIT
...but read 'Waiting' FTSFiles first from FTSDB and merge those with FTSFiles to retry
"""
__RCSID__ = "$Id: $"
# #
# @file FTSAgent.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/05/31 10:00:51
# @brief Definition of FTSAgent class.
# # imports
import time
import datetime
import re
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gMonitor
# # from CS
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getRegistrationProtocols
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
# # from Core
from DIRAC.Core.Utilities.LockRing import LockRing
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.Time import fromString
from DIRAC.Core.Utilities.List import breakListIntoChunks
# # from DMS
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.private.FTSPlacement import FTSPlacement
from DIRAC.DataManagementSystem.private.FTSHistoryView import FTSHistoryView
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile
# # from RMS
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
# # from RSS
# #from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
# # from Resources
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
# # from Accounting
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.ConfigurationSystem.Client.PathFinder import getServiceSection
# # agent base name
AGENT_NAME = "DataManagement/FTSAgent"
class escapeTry( Exception ):
pass
########################################################################
class FTSAgent( AgentModule ):
"""
.. class:: FTSAgent
Agent propagating Scheduled request to Done or Failed state in the FTS system.
Requests and associated FTSJobs (and so FTSFiles) are kept in cache.
"""
# # fts placement refresh in seconds
FTSPLACEMENT_REFRESH = FTSHistoryView.INTERVAL / 2
# # placeholder for max job per channel
MAX_ACTIVE_JOBS = 50
# # min threads
MIN_THREADS = 1
# # max threads
MAX_THREADS = 10
# # files per job
MAX_FILES_PER_JOB = 100
# # MAX FTS transfer per FTSFile
MAX_ATTEMPT = 256
# # stage flag
PIN_TIME = 0
# # FTS submission command
SUBMIT_COMMAND = 'glite-transfer-submit'
# # FTS monitoring command
MONITOR_COMMAND = 'glite-transfer-status'
# # placeholder for FTS client
__ftsClient = None
# # placeholder for the FTS version
__ftsVersion = None
# # placeholder for request client
__requestClient = None
# # placeholder for resources helper
__resources = None
# # placeholder for RSS client
__rssClient = None
# # placeholder for FTSPlacement
__ftsPlacement = None
# # placement regeneration time delta
__ftsPlacementValidStamp = None
# # placeholder for threadPool
__threadPool = None
# # update lock
__updateLock = None
# # se cache
__seCache = dict()
# # request cache
__reqCache = dict()
def updateLock( self ):
""" update lock """
if not self.__updateLock:
self.__updateLock = LockRing().getLock( "FTSAgentLock" )
return self.__updateLock
@classmethod
def requestClient( cls ):
""" request client getter """
if not cls.__requestClient:
cls.__requestClient = ReqClient()
return cls.__requestClient
@classmethod
def ftsClient( cls ):
""" FTS client """
if not cls.__ftsClient:
cls.__ftsClient = FTSClient()
return cls.__ftsClient
@classmethod
def rssClient( cls ):
""" RSS client getter """
if not cls.__rssClient:
cls.__rssClient = ResourceStatus()
return cls.__rssClient
@classmethod
def getSE( cls, seName ):
""" keep SEs in cache """
if seName not in cls.__seCache:
cls.__seCache[seName] = StorageElement( seName )
return cls.__seCache[seName]
@classmethod
def getSECache( cls ):
return cls.__seCache
@classmethod
def getRequest( cls, reqID ):
""" get Requests systematically and refresh cache """
getRequest = cls.requestClient().getRequest( reqID )
if not getRequest["OK"]:
cls.__reqCache.pop( reqID, None )
return getRequest
getRequest = getRequest["Value"]
if not getRequest:
cls.__reqCache.pop( reqID, None )
return S_ERROR( "request of id '%s' not found in ReqDB" % reqID )
cls.__reqCache[reqID] = getRequest
return S_OK( cls.__reqCache[reqID] )
@classmethod
def putRequest( cls, request, clearCache = True ):
""" put request back to ReqDB
:param Request request: Request instance
:param bool clearCache: clear the cache?
also finalize request if status == Done
"""
# # put back request
if request.RequestID not in cls.__reqCache:
return S_OK()
put = cls.requestClient().putRequest( request )
if not put["OK"]:
return put
# # finalize first if possible
if request.Status == "Done" and request.JobID:
finalizeRequest = cls.requestClient().finalizeRequest( request.RequestID, request.JobID )
if not finalizeRequest["OK"]:
request.Status = "Scheduled"
# # del request from cache if needed
if clearCache:
cls.__reqCache.pop( request.RequestID, None )
return S_OK()
@classmethod
def putFTSJobs( cls, ftsJobsList ):
""" put back fts jobs to the FTSDB """
for ftsJob in ftsJobsList:
put = cls.ftsClient().putFTSJob( ftsJob )
if not put["OK"]:
return put
return S_OK()
@staticmethod
def updateFTSFileDict( ftsFilesDict, toUpdateDict ):
""" update :ftsFilesDict: with FTSFiles in :toUpdateDict: """
for category, ftsFileList in ftsFilesDict.items():
for ftsFile in toUpdateDict.get( category, [] ):
if ftsFile not in ftsFileList:
ftsFileList.append( ftsFile )
return ftsFilesDict
# def resources( self ):
# """ resource helper getter """
# if not self.__resources:
# self.__resources = Resources()
# return self.__resources
def threadPool( self ):
""" thread pool getter """
if not self.__threadPool:
self.__threadPool = ThreadPool( self.MIN_THREADS, self.MAX_THREADS )
self.__threadPool.daemonize()
return self.__threadPool
def resetFTSPlacement( self ):
""" create fts Placement """
ftsHistory = self.ftsClient().getFTSHistory()
if not ftsHistory["OK"]:
self.log.error( "unable to get FTS history:", ftsHistory["Message"] )
return ftsHistory
ftsHistory = ftsHistory["Value"]
try:
self.updateLock().acquire()
if not self.__ftsPlacement:
self.__ftsPlacement = FTSPlacement( csPath = None, ftsHistoryViews = ftsHistory )
else:
self.__ftsPlacement.refresh( ftsHistoryViews = ftsHistory )
finally:
self.updateLock().release()
# # save time stamp
self.__ftsPlacementValidStamp = datetime.datetime.now() + datetime.timedelta( seconds = self.FTSPLACEMENT_REFRESH )
return S_OK()
def initialize( self ):
""" agent's initialization """
# # data manager
self.dataManager = DataManager()
log = self.log.getSubLogger( "initialize" )
self.FTSPLACEMENT_REFRESH = self.am_getOption( "FTSPlacementValidityPeriod", self.FTSPLACEMENT_REFRESH )
log.info( "FTSPlacement validity period = %s s" % self.FTSPLACEMENT_REFRESH )
self.SUBMIT_COMMAND = self.am_getOption( "SubmitCommand", self.SUBMIT_COMMAND )
log.info( "FTS submit command = %s" % self.SUBMIT_COMMAND )
self.MONITOR_COMMAND = self.am_getOption( "MonitorCommand", self.MONITOR_COMMAND )
log.info( "FTS commands: submit = %s monitor %s" % ( self.SUBMIT_COMMAND, self.MONITOR_COMMAND ) )
self.PIN_TIME = self.am_getOption( "PinTime", self.PIN_TIME )
log.info( "Stage files before submission = ", {True: "yes", False: "no"}[bool( self.PIN_TIME )] )
self.MAX_ACTIVE_JOBS = self.am_getOption( "MaxActiveJobsPerRoute", self.MAX_ACTIVE_JOBS )
log.info( "Max active FTSJobs/route = ", str( self.MAX_ACTIVE_JOBS ) )
self.MAX_FILES_PER_JOB = self.am_getOption( "MaxFilesPerJob", self.MAX_FILES_PER_JOB )
log.info( "Max FTSFiles/FTSJob = ", str( self.MAX_FILES_PER_JOB ) )
self.MAX_ATTEMPT = self.am_getOption( "MaxTransferAttempts", self.MAX_ATTEMPT )
log.info( "Max transfer attempts = ", str( self.MAX_ATTEMPT ) )
# # thread pool
self.MIN_THREADS = self.am_getOption( "MinThreads", self.MIN_THREADS )
self.MAX_THREADS = self.am_getOption( "MaxThreads", self.MAX_THREADS )
minmax = ( abs( self.MIN_THREADS ), abs( self.MAX_THREADS ) )
self.MIN_THREADS, self.MAX_THREADS = min( minmax ), max( minmax )
log.info( "ThreadPool min threads = ", str( self.MIN_THREADS ) )
log.info( "ThreadPool max threads = ", str( self.MAX_THREADS ) )
self.__ftsVersion = Operations().getValue( 'DataManagement/FTSVersion', 'FTS2' )
log.info( "FTSVersion : %s" % self.__ftsVersion )
log.info( "initialize: creation of FTSPlacement..." )
createPlacement = self.resetFTSPlacement()
if not createPlacement["OK"]:
log.error( "initialize: %s" % createPlacement["Message"] )
return createPlacement
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
log.info( "will use DataManager proxy" )
self.registrationProtocols = getRegistrationProtocols()
# # gMonitor stuff here
gMonitor.registerActivity( "RequestsAtt", "Attempted requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RequestsOK", "Successful requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RequestsFail", "Failed requests executions",
"FTSAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsSubAtt", "FTSJobs creation attempts",
"FTSAgent", "Created FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsSubOK", "FTSJobs submitted successfully",
"FTSAgent", "Successful FTSJobs submissions/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsSubFail", "FTSJobs submissions failed",
"FTSAgent", "Failed FTSJobs submissions/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsMonAtt", "FTSJobs monitored",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsMonOK", "FTSJobs monitored successfully",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSJobsMonFail", "FTSJobs attempts failed",
"FTSAgent", "FTSJobs/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSMonitorFail", "Failed FTS monitor executions",
"FTSAgent", "Execution/mins", gMonitor.OP_SUM )
pollingTime = self.am_getOption( "PollingTime", 60 )
for status in list( FTSJob.INITSTATES + FTSJob.TRANSSTATES + FTSJob.FAILEDSTATES + FTSJob.FINALSTATES ):
gMonitor.registerActivity( "FTSJobs%s" % status, "FTSJobs %s" % status ,
"FTSAgent", "FTSJobs/cycle", gMonitor.OP_ACUM, pollingTime )
gMonitor.registerActivity( "FtSJobsPerRequest", "Average FTSJobs per request",
"FTSAgent", "FTSJobs/Request", gMonitor.OP_MEAN )
gMonitor.registerActivity( "FTSFilesPerJob", "FTSFiles per FTSJob",
"FTSAgent", "Number of FTSFiles per FTSJob", gMonitor.OP_MEAN )
gMonitor.registerActivity( "FTSSizePerJob", "Average FTSFiles size per FTSJob",
"FTSAgent", "Average submitted size per FTSJob", gMonitor.OP_MEAN )
return S_OK()
def finalize( self ):
""" finalize processing """
# log = self.log.getSubLogger( "finalize" )
# if self.__reqCache:
# log.info( 'putting back %d requests from cache' % len( self.__reqCache ) )
# else:
# log.info( 'no requests to put back' )
# for request in self.__reqCache.values():
# put = self.requestClient().putRequest( request )
# if not put["OK"]:
# log.error( "unable to put back request '%s': %s" % ( request.RequestName, put["Message"] ) )
return S_OK()
def execute( self ):
""" one cycle execution """
log = gLogger.getSubLogger( "execute" )
# # reset FTSPlacement if expired
now = datetime.datetime.now()
if now > self.__ftsPlacementValidStamp:
log.info( "resetting expired FTS placement..." )
resetFTSPlacement = self.resetFTSPlacement()
if not resetFTSPlacement["OK"]:
log.error( "FTSPlacement recreation error: %s" % resetFTSPlacement["Message"] )
return resetFTSPlacement
self.__ftsPlacementValidStamp = now + datetime.timedelta( seconds = self.FTSPLACEMENT_REFRESH )
requestIDs = self.requestClient().getRequestIDsList( [ "Scheduled" ] )
if not requestIDs["OK"]:
log.error( "unable to read scheduled request ids: %s" % requestIDs["Message"] )
return requestIDs
if not requestIDs["Value"]:
requestIDs = self.__reqCache.keys()
else:
requestIDs = [ req[0] for req in requestIDs["Value"] ]
requestIDs = list( set ( requestIDs + self.__reqCache.keys() ) )
if not requestIDs:
log.info( "no 'Scheduled' requests to process" )
return S_OK()
log.info( "found %s requests to process:" % len( requestIDs ) )
log.info( " => from internal cache: %s" % ( len( self.__reqCache ) ) )
log.info( " => new read from RMS: %s" % ( len( requestIDs ) - len( self.__reqCache ) ) )
for requestID in requestIDs:
request = self.getRequest( requestID )
if not request["OK"]:
log.error( "Error getting request", "%s: %s" % ( requestID, request["Message"] ) )
continue
request = request["Value"]
sTJId = request.RequestID
while True:
queue = self.threadPool().generateJobAndQueueIt( self.processRequest,
args = ( request, ),
sTJId = sTJId )
if queue["OK"]:
log.info( "Request enqueued for execution", sTJId )
gMonitor.addMark( "RequestsAtt", 1 )
break
time.sleep( 1 )
# # process all results
self.threadPool().processAllResults()
return S_OK()
def processRequest( self, request ):
""" process one request
:param Request request: ReqDB.Request
"""
log = self.log.getSubLogger( "req_%s/%s" % ( request.RequestID, request.RequestName ) )
operation = request.getWaiting()
if not operation["OK"]:
log.error( "Unable to find 'Scheduled' ReplicateAndRegister operation in request" )
return self.putRequest( request )
operation = operation["Value"]
if not isinstance( operation, Operation ):
log.error( "Waiting returned operation is not an operation:", type( operation ) )
return self.putRequest( request )
if operation.Type != "ReplicateAndRegister":
log.error( "operation to be executed is not a ReplicateAndRegister but", operation.Type )
return self.putRequest( request )
if operation.Status != "Scheduled":
log.error( "operation in a wrong state, expecting 'Scheduled', got", operation.Status )
return self.putRequest( request )
log.info( 'start processRequest' )
# # select FTSJobs, by default all in TRANS_STATES and INIT_STATES
ftsJobs = self.ftsClient().getFTSJobsForRequest( request.RequestID )
if not ftsJobs["OK"]:
log.error( ftsJobs["Message"] )
return ftsJobs
ftsJobs = [ftsJob for ftsJob in ftsJobs.get( "Value", [] ) if ftsJob.Status not in FTSJob.FINALSTATES]
# # Use a try: finally: for making sure FTS jobs are put back before returnin
try:
# # dict keeping info about files to reschedule, submit, fail and register
ftsFilesDict = dict( [ ( k, list() ) for k in ( "toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate" ) ] )
if ftsJobs:
log.info( "==> found %s FTSJobs to monitor" % len( ftsJobs ) )
# # PHASE 0 = monitor active FTSJobs
for ftsJob in ftsJobs:
monitor = self.__monitorJob( request, ftsJob )
if not monitor["OK"]:
log.error( "unable to monitor FTSJob %s: %s" % ( ftsJob.FTSJobID, monitor["Message"] ) )
ftsJob.Status = "Submitted"
else:
ftsFilesDict = self.updateFTSFileDict( ftsFilesDict, monitor["Value"] )
log.info( "monitoring of FTSJobs completed" )
for key, ftsFiles in ftsFilesDict.items():
if ftsFiles:
log.debug( " => %s FTSFiles to %s" % ( len( ftsFiles ), key[2:].lower() ) )
# # PHASE ONE - check ready replicas
missingReplicas = self.__checkReadyReplicas( request, operation )
if not missingReplicas["OK"]:
log.error( missingReplicas["Message"] )
else:
missingReplicas = missingReplicas["Value"]
for opFile in operation:
# Actually the condition below should never happen... Change printout for checking
if opFile.LFN not in missingReplicas and opFile.Status not in ( 'Done', 'Failed' ):
log.warn( "File should be set Done! %s is replicated at all targets" % opFile.LFN )
opFile.Status = "Done"
if missingReplicas:
# Check if these files are in the FTSDB
ftsFiles = self.ftsClient().getAllFTSFilesForRequest( request.RequestID )
if not ftsFiles['OK']:
log.error( ftsFiles['Message'] )
else:
ftsFiles = ftsFiles['Value']
ftsLfns = set( [ftsFile.LFN for ftsFile in ftsFiles] )
# Recover files not in FTSDB
toSchedule = set( missingReplicas ) - ftsLfns
if toSchedule:
log.warn( '%d files in operation are not in FTSDB, reset them Waiting' % len( toSchedule ) )
for opFile in operation:
if opFile.LFN in toSchedule and opFile.Status == 'Scheduled':
opFile.Status = 'Waiting'
# Recover files with target not in FTSDB
toSchedule = set( [missing for missing, missingSEs in missingReplicas.items()
if not [ftsFile for ftsFile in ftsFiles
if ftsFile.LFN == missing and ftsFile.TargetSE in missingSEs]] )
if toSchedule:
log.warn( '%d targets in operation are not in FTSDB, reset files Waiting' % len( toSchedule ) )
for opFile in operation:
if opFile.LFN in toSchedule and opFile.Status == 'Scheduled':
opFile.Status = 'Waiting'
# identify missing LFNs that are waiting for a replication which is finished
for ftsFile in [f for f in ftsFiles if f.LFN in missingReplicas and f.Status.startswith( 'Waiting#' )]:
targetSE = ftsFile.Status.split( '#' )[1]
finishedFiles = [f for f in ftsFiles if
f.LFN == ftsFile.LFN and
f.Status == 'Finished' and
f.TargetSE == targetSE and
f not in ftsFilesDict['toUpdate']]
if finishedFiles:
log.warn( "%s is %s while replication was Finished to %s, update" % ( ftsFile.LFN, ftsFile.Status, targetSE ) )
ftsFilesDict['toUpdate'] += finishedFiles
# identify Finished transfer for which the replica is still missing
for ftsFile in [f for f in ftsFiles if f.Status == 'Finished' and f.TargetSE in missingReplicas.get( f.LFN, [] ) and f not in ftsFilesDict['toRegister'] ]:
# Check if there is a registration operation for that file and that target
regOp = [op for op in request if
op.Type == 'RegisterReplica' and
op.TargetSE == ftsFile.TargetSE and
[f for f in op if f.LFN == ftsFile.LFN]]
if not regOp:
ftsFilesDict['toReschedule'].append( ftsFile )
toFail = ftsFilesDict.get( "toFail", [] )
toReschedule = ftsFilesDict.get( "toReschedule", [] )
toSubmit = ftsFilesDict.get( "toSubmit", [] )
toRegister = ftsFilesDict.get( "toRegister", [] )
toUpdate = ftsFilesDict.get( "toUpdate", [] )
# # PHASE TWO = Failed files? -> make request Failed and return
if toFail:
log.error( "==> found %s 'Failed' FTSFiles, but maybe other files can be processed..." % len( toFail ) )
for opFile in operation:
for ftsFile in toFail:
if opFile.FileID == ftsFile.FileID:
opFile.Error = ftsFile.Error
opFile.Status = "Failed"
operation.Error = "%s files are missing any replicas" % len( toFail )
# # requets.Status should be Failed if all files in the operation "Failed"
if request.Status == "Failed":
request.Error = "ReplicateAndRegister %s failed" % operation.Order
log.error( "request is set to 'Failed'" )
# # putRequest is done by the finally: clause... Not good to do it twice
raise escapeTry
# # PHASE THREE - update Waiting#TargetSE FTSFiles
if toUpdate:
log.info( "==> found %s possible FTSFiles to update..." % ( len( toUpdate ) ) )
byTarget = {}
for ftsFile in toUpdate:
byTarget.setdefault( ftsFile.TargetSE, [] ).append( ftsFile.FileID )
for targetSE, fileIDList in byTarget.items():
update = self.ftsClient().setFTSFilesWaiting( operation.OperationID, targetSE, fileIDList )
if not update["OK"]:
log.error( "update FTSFiles failed:", update["Message"] )
# # PHASE FOUR - add 'RegisterReplica' Operations
if toRegister:
log.info( "==> found %d Files waiting for registration, adding 'RegisterReplica' operations" % len( toRegister ) )
registerFiles = self.__insertRegisterOperation( request, operation, toRegister )
if not registerFiles["OK"]:
log.error( "unable to create 'RegisterReplica' operations:", registerFiles["Message"] )
# if request.Status == "Waiting":
# log.info( "request is in 'Waiting' state, will put it back to RMS" )
# return self.putRequest( request )
# # PHASE FIVE - reschedule operation files
if toReschedule:
log.info( "==> found %s Files to reschedule" % len( toReschedule ) )
rescheduleFiles = self.__reschedule( request, operation, toReschedule )
if not rescheduleFiles["OK"]:
log.error( 'Failed to reschedule files', rescheduleFiles["Message"] )
# # PHASE SIX - read Waiting ftsFiles and submit new FTSJobs. We get also Failed files to recover them if needed
ftsFiles = self.ftsClient().getFTSFilesForRequest( request.RequestID, [ "Waiting", "Failed", 'Submitted', 'Canceled' ] )
if not ftsFiles["OK"]:
log.error( ftsFiles["Message"] )
else:
retryIds = set ( [ ftsFile.FTSFileID for ftsFile in toSubmit ] )
for ftsFile in ftsFiles["Value"]:
if ftsFile.FTSFileID not in retryIds:
if ftsFile.Status in ( 'Failed', 'Canceled' ):
# If the file was not unrecoverable failed and is not yet set toSubmit
_reschedule, submit, _fail = self.__checkFailed( ftsFile )
elif ftsFile.Status == 'Submitted':
if ftsFile.FTSGUID not in [job.FTSGUID for job in ftsJobs]:
log.warn( 'FTS GUID %s not found in FTS jobs, resubmit file transfer' % ftsFile.FTSGUID )
ftsFile.Status = 'Waiting'
submit = True
else:
submit = False
else:
submit = True
if submit:
toSubmit.append( ftsFile )
retryIds.add( ftsFile.FTSFileID )
# # submit new ftsJobs
if toSubmit:
if request.Status != 'Scheduled':
log.info( "Found %d FTSFiles to submit while request is no longer in Scheduled status (%s)" \
% ( len( toSubmit ), request.Status ) )
else:
self.__checkDuplicates( request.RequestID, toSubmit )
log.info( "==> found %s FTSFiles to submit" % len( toSubmit ) )
submit = self.__submit( request, operation, toSubmit )
if not submit["OK"]:
log.error( submit["Message"] )
else:
ftsJobs += submit["Value"]
# # status change? - put back request
if request.Status != "Scheduled":
log.info( "request no longer in 'Scheduled' state (%s), will put it back to RMS" % request.Status )
except escapeTry:
# This clause is raised when one wants to return from within the try: clause
pass
except Exception, exceptMessage:
log.exception( "Exception in processRequest", lException = exceptMessage )
finally:
putRequest = self.putRequest( request, clearCache = ( request.Status != "Scheduled" ) )
if not putRequest["OK"]:
log.error( "unable to put back request:", putRequest["Message"] )
# # put back jobs in all cases
if ftsJobs:
for ftsJob in list( ftsJobs ):
if not len( ftsJob ):
log.warn( 'FTS job empty, removed: %s' % ftsJob.FTSGUID )
self.ftsClient().deleteFTSJob( ftsJob.FTSJobID )
ftsJobs.remove( ftsJob )
putJobs = self.putFTSJobs( ftsJobs )
if not putJobs["OK"]:
log.error( "unable to put back FTSJobs:", putJobs["Message"] )
putRequest = putJobs
# This is where one returns from after execution of the finally: block
return putRequest
def __checkDuplicates( self, reqID, toSubmit ):
""" Check in a list of FTSFiles whether there are duplicates
"""
tupleList = []
log = self.log.getSubLogger( "%s/checkDuplicates" % reqID )
for ftsFile in list( toSubmit ):
fTuple = ( ftsFile.LFN, ftsFile.SourceSE, ftsFile.TargetSE )
if fTuple in tupleList:
log.warn( "Duplicate file to submit, removed:", ', '.join( fTuple ) )
toSubmit.remove( ftsFile )
self.ftsClient().deleteFTSFiles( ftsFile.OperationID, [ftsFile.FileID] )
else:
tupleList.append( fTuple )
def __reschedule( self, request, operation, toReschedule ):
""" reschedule list of :toReschedule: files in request for operation :operation:
:param Request request:
:param Operation operation:
:param list toReschedule: list of FTSFiles
"""
log = self.log.getSubLogger( "req_%s/%s/reschedule" % ( request.RequestID, request.RequestName ) )
ftsFileIDs = [ftsFile.FileID for ftsFile in toReschedule]
for opFile in operation:
if opFile.FileID in ftsFileIDs:
opFile.Status = "Waiting"
toSchedule = []
# # filter files
for opFile in [ opFile for opFile in operation if opFile.Status == "Waiting" ]:
replicas = self.__filterReplicas( opFile )
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas["Valid"]
noMetaReplicas = replicas["NoMetadata"]
noReplicas = replicas["NoReplicas"]
badReplicas = replicas['Bad']
if validReplicas:
validTargets = list( set( operation.targetSEList ) - set( validReplicas ) )
if not validTargets:
log.info( "file %s is already present at all targets" % opFile.LFN )
opFile.Status = "Done"
else:
toSchedule.append( ( opFile.toJSON()["Value"], validReplicas, validTargets ) )
elif noMetaReplicas:
log.warn( "unable to schedule '%s', couldn't get metadata at %s" % ( opFile.LFN, ','.join( noMetaReplicas ) ) )
elif noReplicas:
log.warn( "unable to schedule %s, file doesn't exist at %s" % ( opFile.LFN, ','.join( noReplicas ) ) )
opFile.Status = 'Failed'
elif badReplicas:
log.warn( "unable to schedule %s, all replicas have a bad checksum at %s" % ( opFile.LFN, ','.join( badReplicas ) ) )
opFile.Status = 'Failed'
# # do real schedule here
if toSchedule:
log.info( "Rescheduling %d files" % len( toReschedule ) )
ftsSchedule = self.ftsClient().ftsSchedule( request.RequestID,
operation.OperationID,
toSchedule )
if not ftsSchedule["OK"]:
log.error( "Error scheduling files", ftsSchedule["Message"] )
return ftsSchedule
ftsSchedule = ftsSchedule["Value"]
for opFile in operation:
fileID = opFile.FileID
if fileID in ftsSchedule["Successful"]:
opFile.Status = "Scheduled"
elif fileID in ftsSchedule["Failed"]:
opFile.Error = ftsSchedule["Failed"][fileID]
log.error( "Error scheduling file %s" % opFile.LFN, opFile.Error )
return S_OK()
def __submit( self, request, operation, toSubmit ):
""" create and submit new FTSJobs using list of FTSFiles
:param Request request: ReqDB.Request instance
:param list ftsFiles: list of FTSFile instances
:return: [ FTSJob, FTSJob, ...]
"""
log = self.log.getSubLogger( "req_%s/%s/submit" % ( request.RequestID, request.RequestName ) )
bySourceAndTarget = {}
for ftsFile in toSubmit:
if ftsFile.SourceSE not in bySourceAndTarget:
bySourceAndTarget.setdefault( ftsFile.SourceSE, {} )
if ftsFile.TargetSE not in bySourceAndTarget[ftsFile.SourceSE]:
bySourceAndTarget[ftsFile.SourceSE].setdefault( ftsFile.TargetSE, [] )
bySourceAndTarget[ftsFile.SourceSE][ftsFile.TargetSE].append( ftsFile )
ftsJobs = []
for source, targetDict in bySourceAndTarget.items():
for target, ftsFileList in targetDict.items():
log.info( "found %s files to submit from %s to %s" % ( len( ftsFileList ), source, target ) )
route = self.__ftsPlacement.findRoute( source, target )
if not route["OK"]:
log.error( route["Message"] )
continue
route = route["Value"]
routeValid = self.__ftsPlacement.isRouteValid( route )
if not routeValid['OK']:
log.error( "Route invalid : %s" % routeValid['Message'] )
continue
sourceSE = self.getSE( source )
sourceToken = sourceSE.getStorageParameters( "SRM2" )
if not sourceToken["OK"]:
log.error( "unable to get sourceSE '%s' parameters: %s" % ( source, sourceToken["Message"] ) )
continue
seStatus = sourceSE.getStatus()['Value']
targetSE = self.getSE( target )
targetToken = targetSE.getStorageParameters( "SRM2" )
if not targetToken["OK"]:
log.error( "unable to get targetSE '%s' parameters: %s" % ( target, targetToken["Message"] ) )
continue
# # create FTSJob
for fileList in breakListIntoChunks( ftsFileList, self.MAX_FILES_PER_JOB ):
ftsJob = FTSJob()
ftsJob.RequestID = request.RequestID
ftsJob.OperationID = operation.OperationID
ftsJob.SourceSE = source
ftsJob.TargetSE = target
ftsJob.SourceToken = sourceToken["Value"].get( "SpaceToken", "" )
ftsJob.TargetToken = targetToken["Value"].get( "SpaceToken", "" )
ftsJob.FTSServer = route.toNode.FTSServer
for ftsFile in fileList:
ftsFile.Attempt += 1
ftsFile.Error = ""
ftsJob.addFile( ftsFile )
submit = ftsJob.submitFTS( self.__ftsVersion, command = self.SUBMIT_COMMAND, pinTime = self.PIN_TIME if seStatus['TapeSE'] else 0 )
if not submit["OK"]:
log.error( "unable to submit FTSJob:", submit["Message"] )
continue
log.info( "FTSJob '%s'@'%s' has been submitted" % ( ftsJob.FTSGUID, ftsJob.FTSServer ) )
# # update statuses for job files
for ftsFile in ftsJob:
ftsFile.FTSGUID = ftsJob.FTSGUID
ftsFile.Status = "Submitted"
ftsFile.Attempt += 1
# # update placement route
try:
self.updateLock().acquire()
self.__ftsPlacement.startTransferOnRoute( route )
finally:
self.updateLock().release()
ftsJobs.append( ftsJob )
log.info( "%s new FTSJobs have been submitted" % len( ftsJobs ) )
return S_OK( ftsJobs )
def __monitorJob( self, request, ftsJob ):
""" execute FTSJob.monitorFTS for a given :ftsJob:
if ftsJob is in a final state, finalize it
:param Request request: ReqDB.Request instance
:param FTSJob ftsJob: FTSDB.FTSJob instance
"""
log = self.log.getSubLogger( "req_%s/%s/monitor/%s" % ( request.RequestID, request.requestName, ftsJob.FTSGUID ) )
log.info( "FTSJob '%s'@'%s'" % ( ftsJob.FTSGUID, ftsJob.FTSServer ) )
# # this will be returned
ftsFilesDict = dict( [ ( k, list() ) for k in ( "toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate" ) ] )
monitor = ftsJob.monitorFTS( self.__ftsVersion , command = self.MONITOR_COMMAND )
if not monitor["OK"]:
gMonitor.addMark( "FTSMonitorFail", 1 )
log.error( monitor["Message"] )
if "getTransferJobSummary2: Not authorised to query request" in monitor["Message"] or \
'was not found' in monitor['Message'] or\
'Unknown transfer state' in monitor['Message']:
log.error( "FTSJob not known (expired on server?): delete it" )
for ftsFile in ftsJob:
ftsFile.Status = "Waiting"
ftsFilesDict["toSubmit"].append( ftsFile )
# # No way further for that job: delete it
res = self.ftsClient().deleteFTSJob( ftsJob.FTSJobID )
if not res['OK']:
log.error( "Unable to delete FTSJob", res['Message'] )
return S_OK( ftsFilesDict )
return monitor
monitor = monitor["Value"]
log.info( "FTSJob Status = %s Completeness = %s" % ( ftsJob.Status, ftsJob.Completeness ) )
# # monitor status change
gMonitor.addMark( "FTSJobs%s" % ftsJob.Status, 1 )
if ftsJob.Status in FTSJob.FINALSTATES:
finalizeFTSJob = self.__finalizeFTSJob( request, ftsJob )
if not finalizeFTSJob["OK"]:
if 'Unknown transfer state' in finalizeFTSJob['Message']:
for ftsFile in ftsJob:
ftsFile.Status = "Waiting"
ftsFilesDict["toSubmit"].append( ftsFile )
# # No way further for that job: delete it
res = self.ftsClient().deleteFTSJob( ftsJob.FTSJobID )
if not res['OK']:
log.error( "Unable to delete FTSJob", res['Message'] )
else:
log.error( finalizeFTSJob["Message"] )
return finalizeFTSJob
else:
ftsFilesDict = self.updateFTSFileDict( ftsFilesDict, finalizeFTSJob["Value"] )
return S_OK( ftsFilesDict )
def __finalizeFTSJob( self, request, ftsJob ):
""" finalize FTSJob
:param Request request: ReqDB.Request instance
:param FTSJob ftsJob: FTSDB.FTSJob instance
"""
log = self.log.getSubLogger( "req_%s/%s/monitor/%s/finalize" % ( request.RequestID,
request.RequestName,
ftsJob.FTSJobID ) )
log.info( "finalizing FTSJob %s@%s" % ( ftsJob.FTSGUID, ftsJob.FTSServer ) )
# # this will be returned
ftsFilesDict = dict( [ ( k, list() ) for k in ( "toRegister", "toSubmit", "toFail", "toReschedule", "toUpdate" ) ] )
monitor = ftsJob.monitorFTS(self.__ftsVersion, command = self.MONITOR_COMMAND, full = True )
if not monitor["OK"]:
log.error( monitor["Message"] )
return monitor
# # split FTSFiles to different categories
processFiles = self.__filterFiles( ftsJob )
if not processFiles["OK"]:
log.error( processFiles["Message"] )
return processFiles
processFiles = processFiles['Value']
if processFiles['toRegister']:
log.error( "Some files could not be registered in FC:", len( processFiles['toRegister'] ) )
ftsFilesDict = self.updateFTSFileDict( ftsFilesDict, processFiles )
# # send accounting record for this job
self.__sendAccounting( ftsJob, request.OwnerDN )
# # update placement - remove this job from placement
route = self.__ftsPlacement.findRoute( ftsJob.SourceSE, ftsJob.TargetSE )
if route["OK"]:
try:
self.updateLock().acquire()
self.__ftsPlacement.finishTransferOnRoute( route['Value'] )
finally:
self.updateLock().release()
log.info( "FTSJob is finalized" )
return S_OK( ftsFilesDict )
def __checkFailed( self, ftsFile ):
reschedule = False
submit = False
fail = False
if ftsFile.Status in ( "Failed", 'Canceled' ):
if ftsFile.Error == "MissingSource":
reschedule = True
else:
if ftsFile.Attempt < self.MAX_ATTEMPT:
submit = True
else:
fail = True
return reschedule, submit, fail
def __filterFiles( self, ftsJob ):
""" process ftsFiles from finished ftsJob
:param FTSJob ftsJob: monitored FTSJob instance
"""
# # lists for different categories
toUpdate = []
toReschedule = []
toRegister = []
toSubmit = []
toFail = []
# # loop over files in fts job
for ftsFile in ftsJob:
# # successful files
if ftsFile.Status == "Finished":
if ftsFile.Error == "AddCatalogReplicaFailed":
toRegister.append( ftsFile )
toUpdate.append( ftsFile )
continue
reschedule, submit, fail = self.__checkFailed( ftsFile )
if reschedule:
toReschedule.append( ftsFile )
elif submit:
toSubmit.append( ftsFile )
elif fail:
toFail.append( ftsFile )
return S_OK( { "toUpdate": toUpdate,
"toSubmit": toSubmit,
"toRegister": toRegister,
"toReschedule": toReschedule,
"toFail": toFail } )
def __insertRegisterOperation( self, request, operation, toRegister ):
""" add RegisterReplica operation
:param Request request: request instance
:param Operation transferOp: 'ReplicateAndRegister' operation for this FTSJob
:param list toRegister: [ FTSDB.FTSFile, ... ] - files that failed to register
"""
log = self.log.getSubLogger( "req_%s/%s/registerFiles" % ( request.RequestID, request.RequestName ) )
byTarget = {}
for ftsFile in toRegister:
if ftsFile.TargetSE not in byTarget:
byTarget.setdefault( ftsFile.TargetSE, [] )
byTarget[ftsFile.TargetSE].append( ftsFile )
log.info( "will create %s 'RegisterReplica' operations" % len( byTarget ) )
for target, ftsFileList in byTarget.items():
log.info( "creating 'RegisterReplica' operation for targetSE %s with %s files..." % ( target,
len( ftsFileList ) ) )
registerOperation = Operation()
registerOperation.Type = "RegisterReplica"
registerOperation.Status = "Waiting"
registerOperation.TargetSE = target
targetSE = self.getSE( target )
for ftsFile in ftsFileList:
opFile = File()
opFile.LFN = ftsFile.LFN
pfn = returnSingleResult( targetSE.getURL( ftsFile.TargetSURL, protocol = self.registrationProtocols ) )
if not pfn["OK"]:
continue
opFile.PFN = pfn["Value"]
registerOperation.addFile( opFile )
request.insertBefore( registerOperation, operation )
return S_OK()
@staticmethod
def __sendAccounting( ftsJob, ownerDN ):
""" prepare and send DataOperation to AccouringDB """
dataOp = DataOperation()
dataOp.setStartTime( fromString( ftsJob.SubmitTime ) )
dataOp.setEndTime( fromString( ftsJob.LastUpdate ) )
accountingDict = dict()
accountingDict["OperationType"] = "ReplicateAndRegister"
username = getUsernameForDN( ownerDN )
if not username["OK"]:
username = ownerDN
else:
username = username["Value"]
accountingDict["User"] = username
accountingDict["Protocol"] = "FTS3" if 'fts3' in ftsJob.FTSServer.lower() else 'FTS'
accountingDict['ExecutionSite'] = ftsJob.FTSServer
accountingDict['RegistrationTime'] = ftsJob._regTime
accountingDict['RegistrationOK'] = ftsJob._regSuccess
accountingDict['RegistrationTotal'] = ftsJob._regTotal
accountingDict["TransferOK"] = len( [ f for f in ftsJob if f.Status in FTSFile.SUCCESS_STATES ] )
accountingDict["TransferTotal"] = len( ftsJob )
accountingDict["TransferSize"] = ftsJob.Size - ftsJob.FailedSize
accountingDict["FinalStatus"] = ftsJob.Status
accountingDict["Source"] = ftsJob.SourceSE
accountingDict["Destination"] = ftsJob.TargetSE
# dt = ftsJob.LastUpdate - ftsJob.SubmitTime
# transferTime = dt.days * 86400 + dt.seconds
# accountingDict["TransferTime"] = transferTime
accountingDict['TransferTime'] = sum( [int( f._duration ) for f in ftsJob if f.Status in FTSFile.SUCCESS_STATES ] )
dataOp.setValuesFromDict( accountingDict )
dataOp.commit()
def __checkReadyReplicas( self, request, operation ):
""" check ready replicas for transferOperation """
log = self.log.getSubLogger( "req_%s/%s/checkReadyReplicas" % ( request.RequestID, request.RequestName ) )
targetSESet = set( operation.targetSEList )
# # { LFN: [ targetSE, ... ] }
missingReplicas = {}
scheduledFiles = dict( [ ( opFile.LFN, opFile ) for opFile in operation
if opFile.Status in ( "Scheduled", "Waiting" ) ] )
# # get replicas
replicas = FileCatalog().getReplicas( scheduledFiles.keys() )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
return replicas
replicas = replicas["Value"]
fullyReplicated = 0
missingSEs = {}
for successfulLFN in replicas["Successful"]:
reps = set( replicas['Successful'][successfulLFN] )
if targetSESet.issubset( reps ):
log.verbose( "%s has been replicated to all targets" % successfulLFN )
fullyReplicated += 1
scheduledFiles[successfulLFN].Status = "Done"
else:
missingReplicas[successfulLFN] = sorted( targetSESet - reps )
ses = ",".join( missingReplicas[ successfulLFN ] )
missingSEs[ses] = missingSEs.setdefault( ses, 0 ) + 1
log.verbose( "%s is still missing at %s" % ( successfulLFN, ses ) )
if fullyReplicated:
log.info( "%d new files have been replicated to all targets" % fullyReplicated )
if missingSEs:
for ses in missingSEs:
log.info( "%d replicas still missing at %s" % ( missingSEs[ses], ses ) )
reMissing = re.compile( "no such file or directory" )
for failedLFN, errStr in replicas["Failed"].items():
scheduledFiles[failedLFN].Error = errStr
if reMissing.search( errStr.lower() ):
log.error( "%s is missing, setting its status to 'Failed'" % failedLFN )
scheduledFiles[failedLFN].Status = "Failed"
else:
log.warn( "unable to read replicas for %s: %s" % ( failedLFN, errStr ) )
return S_OK( missingReplicas )
def __filterReplicas( self, opFile ):
""" filter out banned/invalid source SEs """
from DIRAC.DataManagementSystem.Agent.RequestOperations.ReplicateAndRegister import filterReplicas
return filterReplicas( opFile, logger = self.log, dataManager = self.dataManager, seCache = self.getSECache() )
|
miloszz/DIRAC
|
DataManagementSystem/Agent/FTSAgent.py
|
Python
|
gpl-3.0
| 45,763
|
[
"DIRAC"
] |
ce0eff82dade205007fa27ddcbe54b0167f9caf6b9d4db492fa8bb8d53e48147
|
import csv
stockDict = {
'MIIIU': 'M III Acquisition Corp.',
'ORC': 'Orchid Island Capital, Inc.',
'GRBK': 'Green Brick Partners, Inc.',
'GAS': 'AGL Resources, Inc.',
'THG': 'The Hanover Insurance Group, Inc.',
'USLM': 'United States Lime & Minerals, Inc.',
'NSEC': 'National Security Group, Inc.',
'AEO': 'American Eagle Outfitters, Inc.',
'KINS': 'Kingstone Companies, Inc',
'IDLB': 'PowerShares FTSE International Low Beta Equal Weight Portfolio',
'EVI': 'EnviroStarm, Inc.',
'SUI-A': 'Sun Communities, Inc.',
'PRXL': 'PAREXEL International Corporation',
'WABC': 'Westamerica Bancorporation',
'IONS': 'Ionis Pharmaceuticals, Inc.',
'WWAV': 'Whitewave Foods Company (The)',
'DLTH': 'Duluth Holdings Inc.',
'BLVD': 'Boulevard Acquisition Corp. II',
'OMED': 'OncoMed Pharmaceuticals, Inc.',
'BCOV': 'Brightcove Inc.',
'CVGI': 'Commercial Vehicle Group, Inc.',
'LSCC': 'Lattice Semiconductor Corporation',
'TASR': 'TASER International, Inc.',
'ICBK': 'County Bancorp, Inc.',
'ECOM': 'ChannelAdvisor Corporation',
'OHAI': 'OHA Investment Corporation',
'MKL': 'Markel Corporation',
'IPHI': 'Inphi Corporation',
'KRNY': 'Kearny Financial',
'CECO': 'Career Education Corporation',
'CERS': 'Cerus Corporation',
'BSMX': 'Grupo Financiero Santander Mexico S.A. B. de C.V.',
'QNST': 'QuinStreet, Inc.',
'CBSH': 'Commerce Bancshares, Inc.',
'WPXP': 'WPX Energy, Inc.',
'DTZ': 'DTE Energy Company',
'LULU': 'lululemon athletica inc.',
'ONP': 'Orient Paper, Inc.',
'WPZ': 'Williams Partners LP',
'QUOT': 'Quotient Technology Inc.',
'CNBKA': 'Century Bancorp, Inc.',
'FLML': 'Flamel Technologies S.A.',
'EMZ': 'Entergy Mississippi, Inc.',
'GOL': 'Gol Linhas Aereas Inteligentes S.A.',
'TTF': 'Thai Fund, Inc. (The)',
'CRC': 'California Resources Corporation',
'RRD': 'R.R. Donnelley & Sons Company',
'WIX': 'Wix.com Ltd.',
'SQ': 'Square, Inc.',
'EPIQ': 'EPIQ Systems, Inc.',
'HTBK': 'Heritage Commerce Corp',
'C-S': 'Citigroup Inc.',
'GLUU': 'Glu Mobile Inc.',
'QABA': 'First Trust NASDAQ ABA Community Bank Index Fund',
'RLJ': 'RLJ Lodging Trust',
'GNW': 'Genworth Financial Inc',
'ENIA': 'Enersis Americas S.A.',
'PCK': 'Pimco California Municipal Income Fund II',
'GNCMA': 'General Communication, Inc.',
'STT-C': 'State Street Corporation',
'INSM': 'Insmed, Inc.',
'RIG': 'Transocean Ltd.',
'KFFB': 'Kentucky First Federal Bancorp',
'TEL': 'TE Connectivity Ltd.',
'YELP': 'Yelp Inc.',
'SMMF': 'Summit Financial Group, Inc.',
'LGCYO': 'Legacy Reserves LP',
'CFD': 'Nuveen Diversified Commodity Fund',
'QTEC': 'First Trust NASDAQ-100- Technology Index Fund',
'SKT': 'Tanger Factory Outlet Centers, Inc.',
'ADSK': 'Autodesk, Inc.',
'CARO': 'Carolina Financial Corporation',
'LLTC': 'Linear Technology Corporation',
'BWG': 'Legg Mason BW Global Income Opportunities Fund Inc.',
'UMBF': 'UMB Financial Corporation',
'CPHC': 'Canterbury Park Holding Corporation',
'SWX': 'Southwest Gas Corporation',
'AMAT': 'Applied Materials, Inc.',
'DFRG': 'Del Frisco\'s Restaurant Group, Inc.',
'INTU': 'Intuit Inc.',
'INN-B': 'Summit Hotel Properties, Inc.',
'EIGR': 'Eiger BioPharmaceuticals, Inc.',
'BLRX': 'BioLineRx Ltd.',
'BNJ': 'BlackRock New Jersey Municipal Income Trust',
'ALDR': 'Alder BioPharmaceuticals, Inc.',
'CTSH': 'Cognizant Technology Solutions Corporation',
'NLY-C': 'Annaly Capital Management Inc',
'VRTU': 'Virtusa Corporation',
'GPI': 'Group 1 Automotive, Inc.',
'BXS': 'BancorpSouth, Inc.',
'SSKN': 'Strata Skin Sciences, Inc.',
'PZC': 'PIMCO California Municipal Income Fund III',
'TORM': 'TOR Minerals International Inc',
'NAN': 'Nuveen New York Dividend Advantage Municipal Fund',
'SFBS': 'ServisFirst Bancshares, Inc.',
'GIGM': 'GigaMedia Limited',
'SAN-C': 'Banco Santander, S.A.',
'EVG': 'Eaton Vance Short Diversified Income Fund',
'EGI': 'Entree Gold Inc',
'TMP': 'Tompkins Financial Corporation',
'EDN': 'Empresa Distribuidora Y Comercializadora Norte S.A. (Edenor)',
'NEOT': 'Neothetics, Inc.',
'RY-T': 'Royal Bank Of Canada',
'DHF': 'Dreyfus High Yield Strategies Fund',
'GMT': 'GATX Corporation',
'ICB': 'MS Income Securities, Inc.',
'LGCY': 'Legacy Reserves LP',
'AUY': 'Yamana Gold Inc.',
'WYY': 'WidePoint Corporation',
'UIS': 'Unisys Corporation',
'DSKX': 'DS Healthcare Group, Inc.',
'TROV': 'TrovaGene, Inc.',
'TISI': 'Team, Inc.',
'ENO': 'Entergy New Orleans, Inc.',
'LANC': 'Lancaster Colony Corporation',
'MSCA': 'Main Street Capital Corporation',
'VIDI': 'Vident International Equity Fund',
'WTFC': 'Wintrust Financial Corporation',
'GCI': 'TEGNA Inc.',
'AEZS': 'AEterna Zentaris Inc.',
'RSTI': 'Rofin-Sinar Technologies, Inc.',
'HRTX': 'Heron Therapeutics, Inc. ',
'MBCN': 'Middlefield Banc Corp.',
'MSI': 'Motorola Solutions, Inc.',
'RSO': 'Resource Capital Corp.',
'ADUS': 'Addus HomeCare Corporation',
'EVP': 'Eaton Vance Pennsylvania Municipal Income Trust',
'LTS': 'Ladenburg Thalmann Financial Services Inc',
'DSCI': 'Derma Sciences, Inc.',
'TSBK': 'Timberland Bancorp, Inc.',
'NMS': 'Nuveen Minnesota Municipal Income Fund',
'ALGT': 'Allegiant Travel Company',
'FDC': 'First Data Corporation',
'TPH': 'TRI Pointe Group, Inc.',
'FFHL': 'Fuwei Films (Holdings) Co., Ltd.',
'MFC': 'Manulife Financial Corp',
'JSYN': 'Jensyn Acquistion Corp.',
'NEWTL': 'Newtek Business Services Corp.',
'LVNTA': 'Liberty Interactive Corporation',
'SEV': 'Sevcon, Inc.',
'GNST': 'GenSight Biologics S.A.',
'NTIC': 'Northern Technologies International Corporation',
'CADT': 'DT Asia Investments Limited',
'HLG': 'Hailiang Education Group Inc.',
'VCRA': 'Vocera Communications, Inc.',
'FSV': 'FirstService Corporation',
'SHO': 'Sunstone Hotel Investors, Inc.',
'HCN-J': 'Welltower Inc.',
'LBAI': 'Lakeland Bancorp, Inc.',
'CI': 'Cigna Corporation',
'NXJ': 'Nuveen New Jersey Dividend Advantage Municipal Fund',
'BNTC': 'Benitec Biopharma Limited',
'MYL': 'Mylan N.V.',
'HRMN': 'Harmony Merger Corp.',
'VMW': 'Vmware, Inc.',
'MOD': 'Modine Manufacturing Company',
'OCX': 'OncoCyte Corporation',
'GPM': 'Guggenheim Enhanced Equity Income Fund',
'DLR-H': 'Digital Realty Trust, Inc.',
'FN': 'Fabrinet',
'BVN': 'Buenaventura Mining Company Inc.',
'PFN': 'PIMCO Income Strategy Fund II',
'NMY': 'Nuveen Maryland Premium Income Municipal Fund',
'ORMP': 'Oramed Pharmaceuticals Inc.',
'NOAH': 'Noah Holdings Ltd.',
'SQNS': 'Sequans Communications S.A.',
'CHMA': 'Chiasma, Inc.',
'LGL': 'LGL Group, Inc. (The)',
'FTHI': 'First Trust High Income ETF',
'PGND': 'Press Ganey Holdings, Inc.',
'CFFN': 'Capitol Federal Financial, Inc.',
'BTA': 'BlackRock Long-Term Municipal Advantage Trust',
'ICLR': 'ICON plc',
'PACB': 'Pacific Biosciences of California, Inc.',
'PAVMU': 'PAVmed Inc.',
'UIHC': 'United Insurance Holdings Corp.',
'BML-I': 'Bank of America Corporation',
'NBIX': 'Neurocrine Biosciences, Inc.',
'BKEP': 'Blueknight Energy Partners L.P., L.L.C.',
'DHY': 'Credit Suisse High Yield Bond Fund',
'EEML': 'iShares MSCI Emerging Markets Latin America Index Fund',
'AMTG-A': 'Apollo Residential Mortgage, Inc.',
'ADI': 'Analog Devices, Inc.',
'COLL': 'Collegium Pharmaceutical, Inc.',
'PFH': 'CABCO Series 2004-101 Trust',
'SELF': 'Global Self Storage, Inc.',
'PLXS': 'Plexus Corp.',
'HWKN': 'Hawkins, Inc.',
'AEY': 'ADDvantage Technologies Group, Inc.',
'SQBG': 'Sequential Brands Group, Inc.',
'CS': 'Credit Suisse Group',
'BVXVW': 'BiondVax Pharmaceuticals Ltd.',
'BA': 'Boeing Company (The)',
'CALL': 'magicJack VocalTec Ltd',
'OVLY': 'Oak Valley Bancorp (CA)',
'SNX': 'Synnex Corporation',
'IOC': 'InterOil Corporation',
'PIH': '1347 Property Insurance Holdings, Inc.',
'CUBI': 'Customers Bancorp, Inc',
'ISF': 'ING Group, N.V.',
'TFSCW': '1347 Capital Corp.',
'KVHI': 'KVH Industries, Inc.',
'NCR': 'NCR Corporation',
'ASC': 'Ardmore Shipping Corporation',
'AMX': 'America Movil, S.A.B. de C.V.',
'CRZO': 'Carrizo Oil & Gas, Inc.',
'AGIIL': 'Argo Group International Holdings, Ltd.',
'AEG': 'Aegon NV',
'CERU': 'Cerulean Pharma Inc.',
'MNRK': 'Monarch Financial Holdings, Inc.',
'ATNI': 'Atlantic Tele-Network, Inc.',
'WPT': 'World Point Terminals, LP',
'CHD': 'Church & Dwight Company, Inc.',
'VET': 'Vermilion Energy Inc.',
'RSO-B': 'Resource Capital Corp.',
'AMTX': 'Aemetis, Inc',
'CORR-A': 'CorEnergy Infrastructure Trust, Inc.',
'ARLP': 'Alliance Resource Partners, L.P.',
'GIG': 'GigPeak, Inc.',
'SGY': 'Stone Energy Corporation',
'FSIC': 'FS Investment Corporation',
'HBMD': 'Howard Bancorp, Inc.',
'DGRS': 'WisdomTree U.S. SmallCap Quality Dividend Growth Fund',
'KMG': 'KMG Chemicals, Inc.',
'ARI-A': 'Apollo Commercial Real Estate Finance',
'ORN': 'Orion Group Holdings, Inc.',
'DCIX': 'Diana Containerships Inc.',
'INN': 'Summit Hotel Properties, Inc.',
'GRUB': 'GrubHub Inc.',
'SBGI': 'Sinclair Broadcast Group, Inc.',
'UTF': 'Cohen & Steers Infrastructure Fund, Inc',
'CHEF': 'The Chefs\' Warehouse, Inc.',
'FBIZ': 'First Business Financial Services, Inc.',
'PAACU': 'Pacific Special Acquisition Corp.',
'DEA': 'Easterly Government Properties, Inc.',
'RIV': 'RiverNorth Opportunities Fund, Inc.',
'NYMT': 'New York Mortgage Trust, Inc.',
'BHB': 'Bar Harbor Bankshares, Inc.',
'CASY': 'Caseys General Stores, Inc.',
'TAL': 'TAL International Group, Inc.',
'UEPS': 'Net 1 UEPS Technologies, Inc.',
'SHOR': 'ShoreTel, Inc.',
'STK': 'Columbia Seligman Premium Technology Growth Fund, Inc',
'FDUS': 'Fidus Investment Corporation',
'BDGE': 'Bridge Bancorp, Inc.',
'CLMT': 'Calumet Specialty Products Partners, L.P.',
'MET': 'MetLife, Inc.',
'PLBC': 'Plumas Bancorp',
'QVCB': 'Liberty Interactive Corporation',
'CSWI': 'CSW Industrials, Inc.',
'JKI': 'iShares Morningstar Mid-Cap ETF',
'ZG': 'Zillow Group, Inc.',
'GOODP': 'Gladstone Commercial Corporation',
'RELX': 'RELX PLC',
'FRGI': 'Fiesta Restaurant Group, Inc.',
'PNTA': 'PennantPark Investment Corporation',
'TUSA': 'First Trust Total US Market AlphaDEX ETF',
'MIME': 'Mimecast Limited',
'WNS': 'WNS (Holdings) Limited',
'PEGI': 'Pattern Energy Group Inc.',
'CLCD': 'CoLucid Pharmaceuticals, Inc.',
'PJH': 'Prudential Financial, Inc.',
'QTS': 'QTS Realty Trust, Inc.',
'NGLS-A': 'Targa Resources Partners LP',
'SRG': 'Seritage Growth Properties',
'HBI': 'Hanesbrands Inc.',
'FULLL': 'Full Circle Capital Corporation',
'TPX': 'Tempur Sealy International, Inc.',
'FCX': 'Freeport-McMoran, Inc.',
'TRU': 'TransUnion',
'CVA': 'Covanta Holding Corporation',
'JOBS': '51job, Inc.',
'ARRY': 'Array BioPharma Inc.',
'SXCP': 'SunCoke Energy Partners, L.P.',
'SIEN': 'Sientra, Inc.',
'KMT': 'Kennametal Inc.',
'PICO': 'PICO Holdings Inc.',
'ETV': 'Eaton Vance Corporation',
'PTXP': 'PennTex Midstream Partners, LP',
'FICO': 'Fair Isaac Corporation',
'OLBK': 'Old Line Bancshares, Inc.',
'GRC': 'Gorman-Rupp Company (The)',
'SBCF': 'Seacoast Banking Corporation of Florida',
'QUAD': 'Quad Graphics, Inc',
'AE': 'Adams Resources & Energy, Inc.',
'SBSA': 'Spanish Broadcasting System, Inc.',
'BHAC': 'Barington/Hilco Acquisition Corp.',
'ZBK': 'Zions Bancorporation',
'RIGP': 'Transocean Partners LLC',
'KUTV': 'Ku6 Media Co., Ltd.',
'RBPAA': 'Royal Bancshares of Pennsylvania, Inc.',
'GMLP': 'Golar LNG Partners LP',
'TLGT': 'Teligent, Inc.',
'ROCK': 'Gibraltar Industries, Inc.',
'EVER-A': 'EverBank Financial Corp.',
'RMAX': 'RE/MAX Holdings, Inc.',
'ROX': 'Castle Brands, Inc.',
'KTOS': 'Kratos Defense & Security Solutions, Inc.',
'CBAK': 'China BAK Battery, Inc.',
'GXP-A': 'Great Plains Energy Inc',
'KMPR': 'Kemper Corporation',
'AMPH': 'Amphastar Pharmaceuticals, Inc.',
'AFL': 'Aflac Incorporated',
'GLBL': 'TerraForm Global, Inc.',
'NXRT': 'NexPoint Residential Trust, Inc.',
'VAL': 'Valspar Corporation (The)',
'NCS': 'NCI Building Systems, Inc.',
'GASS': 'StealthGas, Inc.',
'SYKE': 'Sykes Enterprises, Incorporated',
'SU': 'Suncor Energy Inc.',
'HQL': 'Tekla Life Sciences Investors',
'JNJ': 'Johnson & Johnson',
'BBG': 'Bill Barrett Corporation',
'ANAT': 'American National Insurance Company',
'TPRE': 'Third Point Reinsurance Ltd.',
'BLE': 'BlackRock Municipal Income Trust II',
'MHO-A': 'M/I Homes, Inc.',
'TSE': 'Trinseo S.A.',
'CSF': 'Victory CEMP US Discovery Enhanced Volatility Wtd Index ETF',
'GAM': 'General American Investors, Inc.',
'OPY': 'Oppenheimer Holdings, Inc.',
'COT': 'Cott Corporation',
'FTAI': 'Fortress Transportation and Infrastructure Investors LLC',
'PLX': 'Protalix BioTherapeutics, Inc.',
'ENDP': 'Endo International plc',
'ABAC': 'Aoxin Tianli Group, Inc.',
'SCHW-B': 'The Charles Schwab Corporation',
'CVR': 'Chicago Rivet & Machine Co.',
'PLUS': 'ePlus inc.',
'DTLK': 'Datalink Corporation',
'HFBL': 'Home Federal Bancorp, Inc. of Louisiana',
'SKYS': 'Sky Solar Holdings, Ltd.',
'HIMX': 'Himax Technologies, Inc.',
'HUSI-H.CL': 'HSBC USA, Inc.',
'STC': 'Stewart Information Services Corporation',
'VSH': 'Vishay Intertechnology, Inc.',
'MSFG': 'MainSource Financial Group, Inc.',
'ELA': 'Entergy Louisiana, Inc.',
'AZO': 'AutoZone, Inc.',
'ETSY': 'Etsy, Inc.',
'HOLI': 'Hollysys Automation Technologies, Ltd.',
'LPSB': 'LaPorte Bancorp, Inc.',
'ARLZ': 'Aralez Pharmaceuticals Inc.',
'GNRT': 'Gener8 Maritime, Inc.',
'AXTA': 'Axalta Coating Systems Ltd.',
'CLH': 'Clean Harbors, Inc.',
'BNED': 'Barnes & Noble Education, Inc',
'CERC': 'Cerecor Inc.',
'SBRAP': 'Sabra Healthcare REIT, Inc.',
'GLU': 'The Gabelli Global Utility and Income Trust',
'EMG': 'Emergent Capital, Inc.',
'CSCD': 'Cascade Microtech, Inc.',
'BPOPN': 'Popular, Inc.',
'YOD': 'You On Demand Holdings, Inc.',
'FELP': 'Foresight Energy LP',
'PPX': 'PPL Capital Funding, Inc.',
'TBBK': 'The Bancorp, Inc.',
'MRLN': 'Marlin Business Services Corp.',
'PBBI': 'PB Bancorp, Inc.',
'MGYR': 'Magyar Bancorp, Inc.',
'Z': 'Zillow Group, Inc.',
'ONVI': 'Onvia, Inc.',
'C-C': 'Citigroup Inc.',
'XLNX': 'Xilinx, Inc.',
'ECA': 'Encana Corporation',
'RWLK': 'ReWalk Robotics Ltd',
'RENN': 'Renren Inc.',
'MCUR': 'MACROCURE LTD.',
'HTA': 'Healthcare Trust of America, Inc.',
'WMGIZ': 'Wright Medical Group N.V.',
'WVE': 'WAVE Life Sciences Ltd.',
'CHCI': 'Comstock Holding Companies, Inc.',
'NLSN': 'Nielsen N.V.',
'NR': 'Newpark Resources, Inc.',
'GGP-A': 'General Growth Properties, Inc.',
'FEP': 'First Trust Europe AlphaDEX Fund',
'CO': 'China Cord Blood Corporation',
'AHT-A': 'Ashford Hospitality Trust Inc',
'ABRN': 'Arbor Realty Trust',
'LMRK': 'Landmark Infrastructure Partners LP',
'FGBI': 'First Guaranty Bancshares, Inc.',
'JOB': 'General Employment Enterprises, Inc.',
'SCWX': 'SecureWorks Corp.',
'STX': 'Seagate Technology PLC',
'AIV-Z': 'Apartment Investment and Management Company',
'FNWB': 'First Northwest Bancorp',
'EVTC': 'Evertec, Inc.',
'TAP': 'Molson Coors Brewing Company',
'SEAC': 'SeaChange International, Inc.',
'AMRI': 'Albany Molecular Research, Inc.',
'CZWI': 'Citizens Community Bancorp, Inc.',
'OZRK': 'Bank of the Ozarks',
'PMTS': 'CPI Card Group Inc.',
'NX': 'Quanex Building Products Corporation',
'DGRW': 'WisdomTree U.S. Quality Dividend Growth Fund',
'DXPE': 'DXP Enterprises, Inc.',
'MGN': 'Mines Management, Inc.',
'JPM.WS': 'J P Morgan Chase & Co',
'NLS': 'Nautilus Group, Inc. (The)',
'MTP': 'Midatech Pharma PLC',
'LVNTB': 'Liberty Interactive Corporation',
'ECL': 'Ecolab Inc.',
'ERS': 'Empire Resources, Inc.',
'BXP': 'Boston Properties, Inc.',
'FMB': 'First Trust Managed Municipal ETF',
'SYY': 'Sysco Corporation',
'TY': 'Tri Continental Corporation',
'WEYS': 'Weyco Group, Inc.',
'ECTE': 'Echo Therapeutics, Inc.',
'USNA': 'USANA Health Sciences, Inc.',
'RBCN': 'Rubicon Technology, Inc.',
'HIFR': 'InfraREIT, Inc.',
'QLYS': 'Qualys, Inc.',
'BCOM': 'B Communications Ltd.',
'GF': 'New Germany Fund, Inc. (The)',
'REXX': 'Rex Energy Corporation',
'TTOO': 'T2 Biosystems, Inc.',
'RNN': 'Rexahn Pharmaceuticals, Inc.',
'NAV-D': 'Navistar International Corporation',
'HCOM': 'Hawaiian Telcom Holdco, Inc.',
'RITT': 'RIT Technologies Ltd.',
'NRP': 'Natural Resource Partners LP',
'CBO': 'CBO (Listing Market - NYSE - Networks A/E)',
'SKIS': 'Peak Resorts, Inc.',
'URI': 'United Rentals, Inc.',
'RAIL': 'Freightcar America, Inc.',
'TSI': 'TCW Strategic Income Fund, Inc.',
'VNO-K': 'Vornado Realty Trust',
'ENTG': 'Entegris, Inc.',
'AINC': 'Ashford Inc.',
'MXC': 'Mexco Energy Corporation',
'AGRX': 'Agile Therapeutics, Inc.',
'HDB': 'HDFC Bank Limited',
'DK': 'Delek US Holdings, Inc.',
'BDL': 'Flanigan\'s Enterprises, Inc.',
'AMRK': 'A-Mark Precious Metals, Inc.',
'EPAM': 'EPAM Systems, Inc.',
'HDS': 'HD Supply Holdings, Inc.',
'IM': 'Ingram Micro Inc.',
'INOV': 'Inovalon Holdings, Inc.',
'BSET': 'Bassett Furniture Industries, Incorporated',
'FCAM': 'Fiat Chrysler Automobiles N.V.',
'PCG-I': 'Pacific Gas & Electric Co.',
'BAC.WS.A': 'Bank of America Corporation',
'UAN': 'CVR Partners, LP',
'WHLM': 'Wilhelmina International, Inc.',
'PXS': 'Pyxis Tankers Inc.',
'RCKY': 'Rocky Brands, Inc.',
'PSCI': 'PowerShares S&P SmallCap Industrials Portfolio',
'GPIC': 'Gaming Partners International Corporation',
'CRMD': 'CorMedix Inc',
'TEDU': 'Tarena International, Inc.',
'TRNO-A': 'Terreno Realty Corporation',
'HGG': 'HHGregg, Inc.',
'RLGY': 'Realogy Holdings Corp.',
'TMK': 'Torchmark Corporation',
'LNT': 'Alliant Energy Corporation',
'LENS': 'Presbia PLC',
'BITA': 'Bitauto Holdings Limited',
'PWOD': 'Penns Woods Bancorp, Inc.',
'BYD': 'Boyd Gaming Corporation',
'MDLY': 'Medley Management Inc.',
'NVTR': 'Nuvectra Corporation',
'EQIX': 'Equinix, Inc.',
'INZ': 'ING Group, N.V.',
'NQ': 'NQ Mobile Inc.',
'MO': 'Altria Group',
'DSX': 'Diana Shipping inc.',
'CVV': 'CVD Equipment Corporation',
'ENLK': 'EnLink Midstream Partners, LP',
'CA': 'CA Inc.',
'LMFA': 'LM Funding America, Inc.',
'SYBT': 'Stock Yards Bancorp, Inc.',
'INTX': 'Intersections, Inc.',
'PIM': 'Putnam Master Intermediate Income Trust',
'SBBP': 'Strongbridge Biopharma plc',
'RSE': 'Rouse Properties, Inc.',
'SHOP': 'Shopify Inc.',
'AMP': 'AMERIPRISE FINANCIAL SERVICES, INC.',
'NAK': 'Northern Dynasty Minerals, Ltd.',
'VGR': 'Vector Group Ltd.',
'EXTR': 'Extreme Networks, Inc.',
'JMPC': 'JMP Group LLC',
'VNRX': 'VolitionRX Limited',
'PHIIK': 'PHI, Inc.',
'WIN': 'Windstream Holdings, Inc.',
'NVS': 'Novartis AG',
'GM.WS.A': 'General Motors Company',
'IVAC': 'Intevac, Inc.',
'NVEE': 'NV5 Global, Inc.',
'FE': 'FirstEnergy Corporation',
'RMT': 'Royce Micro-Cap Trust, Inc.',
'NAII': 'Natural Alternatives International, Inc.',
'FRED': 'Fred\'s, Inc.',
'CPS': 'Cooper-Standard Holdings Inc.',
'TVIX': 'region',
'VMBS': 'Vanguard Mortgage-Backed Securities ETF',
'YUME': 'YuMe, Inc.',
'MTDR': 'Matador Resources Company',
'FSC': 'Fifth Street Finance Corp.',
'RLJE': 'RLJ Entertainment, Inc.',
'NTEC': 'Intec Pharma Ltd.',
'MDRX': 'Allscripts Healthcare Solutions, Inc.',
'ALJ': 'Alon USA Energy, Inc.',
'CHRS': 'Coherus BioSciences, Inc.',
'FCH-A': 'FelCor Lodging Trust Incorporated',
'CCOI': 'Cogent Communications Holdings, Inc.',
'VLTC': 'Voltari Corporation',
'VGSH': 'Vanguard Short-Term Government ETF',
'PB': 'Prosperity Bancshares, Inc.',
'SAFT': 'Safety Insurance Group, Inc.',
'ARR-A': 'ARMOUR Residential REIT, Inc.',
'RDN': 'Radian Group Inc.',
'MWR': 'Morgan Stanley',
'SANW': 'S&W Seed Company',
'MEP': 'Midcoast Energy Partners, L.P.',
'RPAI': 'Retail Properties of America, Inc.',
'LPT': 'Liberty Property Trust',
'EZT': 'Entergy Texas Inc',
'PNI': 'Pimco New York Municipal Income Fund II',
'CLBS': 'Caladrius Biosciences, Inc.',
'PCYG': 'Park City Group, Inc.',
'PLXP': 'PLx Pharma Inc.',
'JRI': 'Nuveen Real Asset Income and Growth Fund',
'LL': 'Lumber Liquidators Holdings, Inc',
'RUN': 'Sunrun Inc.',
'CBU': 'Community Bank System, Inc.',
'XON': 'Intrexon Corporation',
'PPSI': 'Pioneer Power Solutions, Inc.',
'HRMNW': 'Harmony Merger Corp.',
'VLRS': 'Controladora Vuela Compania de Aviacion, S.A.B. de C.V.',
'GLO': 'Clough Global Opportunities Fund',
'PLKI': 'Popeyes Louisiana Kitchen, Inc.',
'FNLC': 'First Bancorp, Inc (ME)',
'BNTCW': 'Benitec Biopharma Limited',
'CYRN': 'CYREN Ltd.',
'RPTP': 'Raptor Pharmaceutical Corp.',
'BDR': 'Blonder Tongue Laboratories, Inc.',
'CNXR': 'Connecture, Inc.',
'FNTCU': 'FinTech Acquisition Corp.',
'ABUS': 'Arbutus Biopharma Corporation',
'LEDS': 'SemiLEDS Corporation',
'LMRKP': 'Landmark Infrastructure Partners LP',
'MBLY': 'Mobileye N.V.',
'WTBA': 'West Bancorporation',
'NPD': 'China Nepstar Chain Drugstore Ltd',
'MKTX': 'MarketAxess Holdings, Inc.',
'NWBO': 'Northwest Biotherapeutics, Inc.',
'GLDC': 'Golden Enterprises, Inc.',
'FLDM': 'Fluidigm Corporation',
'PYS': 'PPlus Trust',
'TAIT': 'Taitron Components Incorporated',
'MGI': 'Moneygram International, Inc.',
'EBIO': 'Eleven Biotherapeutics, Inc.',
'MXWL': 'Maxwell Technologies, Inc.',
'NCT-D': 'Newcastle Investment Corporation',
'CYAD': 'Celyad SA',
'SWIR': 'Sierra Wireless, Inc.',
'GWW': 'W.W. Grainger, Inc.',
'SA': 'Seabridge Gold, Inc.',
'KTEC': 'Key Technology, Inc.',
'TCS': 'Container Store (The)',
'MAV': 'Pioneer Municipal High Income Advantage Trust',
'JMPB': 'JMP Group LLC',
'PRE-E': 'PartnerRe Ltd.',
'NWFL': 'Norwood Financial Corp.',
'RAS-C': 'RAIT Financial Trust',
'SKYY': 'First Trust ISE Cloud Computing Index Fund',
'PEGA': 'Pegasystems Inc.',
'TRVN': 'Trevena, Inc.',
'NURO': 'NeuroMetrix, Inc.',
'WCG': 'WellCare Health Plans, Inc.',
'ANF': 'Abercrombie & Fitch Company',
'GBAB': 'Guggenheim Build America Bonds Managed Duration Trust',
'STL': 'Sterling Bancorp',
'ARES': 'Ares Management L.P.',
'VYGR': 'Voyager Therapeutics, Inc.',
'SCLN': 'SciClone Pharmaceuticals, Inc.',
'NKE': 'Nike, Inc.',
'ACN': 'Accenture plc',
'LNGR': 'Global X Longevity Thematic ETF',
'NK': 'NantKwest, Inc.',
'TWMC': 'Trans World Entertainment Corp.',
'VNRAP': 'Vanguard Natural Resources LLC',
'SPH': 'Suburban Propane Partners, L.P.',
'MT': 'ArcelorMittal',
'TTNP': 'Titan Pharmaceuticals, Inc.',
'HZN': 'Horizon Global Corporation',
'ANIP': 'ANI Pharmaceuticals, Inc.',
'MDSY': 'ModSys International Ltd.',
'NXEOW': 'Nexeo Solutions, Inc.',
'NRF-D': 'Northstar Realty Finance Corp.',
'VBF': 'Invesco Bond Fund',
'CAH': 'Cardinal Health, Inc.',
'UNAM': 'Unico American Corporation',
'GILD': 'Gilead Sciences, Inc.',
'NXEO': 'Nexeo Solutions, Inc.',
'ATVI': 'Activision Blizzard, Inc',
'UMH-A': 'UMH Properties, Inc.',
'VXUS': 'Vanguard Total International Stock ETF',
'BK-C': 'Bank Of New York Mellon Corporation (The)',
'CE': 'Celanese Corporation',
'SLW': 'Silver Wheaton Corp',
'SPG-J': 'Simon Property Group, Inc.',
'NBR': 'Nabors Industries Ltd.',
'TRUE': 'TrueCar, Inc.',
'IMPV': 'Imperva, Inc.',
'VA': 'Virgin America Inc.',
'MGM': 'MGM Resorts International',
'TEN': 'Tenneco Inc.',
'CLDX': 'Celldex Therapeutics, Inc.',
'EXPD': 'Expeditors International of Washington, Inc.',
'TBPH': 'Theravance Biopharma, Inc.',
'BFO': 'Blackrock Florida Municipal 2020 Term Trust',
'BFS': 'Saul Centers, Inc.',
'PBF': 'PBF Energy Inc.',
'CLRBZ': 'Cellectar Biosciences, Inc.',
'MGPI': 'MGP Ingredients, Inc.',
'AIRT': 'Air T, Inc.',
'TCO-J': 'Taubman Centers, Inc.',
'MLAB': 'Mesa Laboratories, Inc.',
'CTIB': 'CTI Industries Corporation',
'CISG': 'CNinsure Inc.',
'MTB-': 'M&T Bank Corporation',
'EBTC': 'Enterprise Bancorp Inc',
'AKP': 'Alliance California Municipal Income Fund Inc',
'MKC': 'McCormick & Company, Incorporated',
'RXN': 'Rexnord Corporation',
'EQBK': 'Equity Bancshares, Inc.',
'CC': 'Chemours Company (The)',
'AFA': 'American Financial Group, Inc.',
'QLIK': 'Qlik Technologies Inc.',
'EML': 'Eastern Company (The)',
'PSCU': 'PowerShares S&P SmallCap Utilities Portfolio',
'PVH': 'PVH Corp.',
'NEP': 'NextEra Energy Partners, LP',
'GPC': 'Genuine Parts Company',
'EGF': 'Blackrock Enhanced Government Fund, Inc',
'UA': 'Under Armour, Inc.',
'CKH': 'SEACOR Holdings, Inc.',
'NUS': 'Nu Skin Enterprises, Inc.',
'HTH': 'Hilltop Holdings Inc.',
'FLL': 'Full House Resorts, Inc.',
'AAN': 'Aaron\'s, Inc.',
'ELON': 'Echelon Corporation',
'AKO.B': 'Embotelladora Andina S.A.',
'CENT': 'Central Garden & Pet Company',
'ARMK': 'Aramark',
'ASYS': 'Amtech Systems, Inc.',
'TTEC': 'TeleTech Holdings, Inc.',
'HTD': 'John Hancock Tax Advantaged Dividend Income Fund',
'EV': 'Eaton Vance Corporation',
'DGII': 'Digi International Inc.',
'TRCH': 'Torchlight Energy Resources, Inc.',
'SPIL': 'Siliconware Precision Industries Company, Ltd.',
'VRNT': 'Verint Systems Inc.',
'CHKE': 'Cherokee Inc.',
'TOT': 'TotalFinaElf, S.A.',
'UTI': 'Universal Technical Institute Inc',
'NLST': 'Netlist, Inc.',
'GTN': 'Gray Television, Inc.',
'CHDN': 'Churchill Downs, Incorporated',
'ALP-O': 'Alabama Power Company',
'AFSI-E': 'AmTrust Financial Services, Inc.',
'SBOT': 'Stellar Biotechnologies, Inc.',
'YHOO': 'Yahoo! Inc.',
'BGCP': 'BGC Partners, Inc.',
'JIVE': 'Jive Software, Inc.',
'REGN': 'Regeneron Pharmaceuticals, Inc.',
'DNOW': 'NOW Inc.',
'OSG': 'Overseas Shipholding Group, Inc.',
'ADRD': 'BLDRS Developed Markets 100 ADR Index Fund',
'CLSD': 'Clearside Biomedical, Inc.',
'CTQ': 'Qwest Corporation',
'BHI': 'Baker Hughes Incorporated',
'STAR-D': 'iStar Financial Inc.',
'PFGC': 'Performance Food Group Company',
'JBT': 'John Bean Technologies Corporation',
'AMCX': 'AMC Networks Inc.',
'SPKE': 'Spark Energy, Inc.',
'MNR-A': 'Monmouth Real Estate Investment Corporation',
'MXF': 'Mexico Fund, Inc. (The)',
'PSET': 'Principal Price Setters Index ETF',
'BTN': 'Ballantyne Strong, Inc',
'KOPN': 'Kopin Corporation',
'AROC': 'Archrock, Inc.',
'FSS': 'Federal Signal Corporation',
'ALKS': 'Alkermes plc',
'ACW': 'Accuride Corporation New',
'PLCE': 'Children\'s Place, Inc. (The)',
'SXL': 'Sunoco Logistics Partners LP',
'AEHR': 'Aehr Test Systems',
'MICTW': 'Micronet Enertec Technologies, Inc.',
'LTRPB': 'Liberty TripAdvisor Holdings, Inc.',
'WY-A': 'Weyerhaeuser Company',
'TSEM': 'Tower Semiconductor Ltd.',
'PBT': 'Permian Basin Royalty Trust',
'ENH': 'Endurance Specialty Holdings Ltd',
'TCAP': 'Triangle Capital Corporation',
'MGA': 'Magna International, Inc.',
'FYC': 'First Trust Small Cap Growth AlphaDEX Fund',
'KNX': 'Knight Transportation, Inc.',
'IRG': 'Ignite Restaurant Group, Inc.',
'VCEL': 'Vericel Corporation',
'PTCT': 'PTC Therapeutics, Inc.',
'BRG-A': 'Bluerock Residential Growth REIT, Inc.',
'APT': 'Alpha Pro Tech, Ltd.',
'FEI': 'First Trust MLP and Energy Income Fund',
'GXP-E': 'Great Plains Energy Inc',
'KATE': 'Kate Spade & Company',
'CSV': 'Carriage Services, Inc.',
'ADMS': 'Adamas Pharmaceuticals, Inc.',
'STAG-B': 'Stag Industrial, Inc.',
'ENTL': 'Entellus Medical, Inc.',
'DXYN': 'The Dixie Group, Inc.',
'WNRL': 'Western Refining Logistics, LP',
'PKO': 'Pimco Income Opportunity Fund',
'NBCP': 'NB Capital Acquisition Corp.',
'LYTS': 'LSI Industries Inc.',
'TTEK': 'Tetra Tech, Inc.',
'DKS': 'Dick\'s Sporting Goods Inc',
'TACOW': 'Del Taco Restaurants, Inc.',
'CHMT': 'Chemtura Corp.',
'AGD': 'Alpine Global Dynamic Dividend Fund',
'PNF': 'PIMCO New York Municipal Income Fund',
'UCBA': 'United Community Bancorp',
'PODD': 'Insulet Corporation',
'AP': 'Ampco-Pittsburgh Corporation',
'AKR': 'Acadia Realty Trust',
'CVG': 'Convergys Corporation',
'CSBK': 'Clifton Bancorp Inc.',
'UMH-B': 'UMH Properties, Inc.',
'VZA': 'Verizon Communications Inc.',
'FF': 'FutureFuel Corp.',
'UTHR': 'United Therapeutics Corporation',
'KNDI': 'Kandi Technologies Group, Inc.',
'CEN': 'Center Coast MLP & Infrastructure Fund',
'HYB': 'New America High Income Fund, Inc. (The)',
'FTCS': 'First Trust Capital Strength ETF',
'TLLP': 'Tesoro Logistics LP',
'CMP': 'Compass Minerals International, Inc.',
'MFINL': 'Medallion Financial Corp.',
'BEAV': 'B/E Aerospace, Inc.',
'RMP': 'Rice Midstream Partners LP',
'DLR-E': 'Digital Realty Trust, Inc.',
'ANET': 'Arista Networks, Inc.',
'DHIL': 'Diamond Hill Investment Group, Inc.',
'CVT': 'CVENT, INC.',
'EACQ': 'Easterly Acquisition Corp.',
'AMSF': 'AMERISAFE, Inc.',
'MELI': 'MercadoLibre, Inc.',
'IMOS': 'ChipMOS TECHNOLOGIES (Bermuda) LTD.',
'NFEC': 'NF Energy Saving Corporation',
'MSLI': 'Merus Labs International Inc.',
'DHG': 'DWS High Income Opportunities Fund, Inc.',
'GOLD': 'Randgold Resources Limited',
'CUTR': 'Cutera, Inc.',
'CASH': 'Meta Financial Group, Inc.',
'PEP': 'Pepsico, Inc.',
'REIS': 'Reis, Inc',
'BWXT': 'BWX Technologies, Inc.',
'MAC': 'Macerich Company (The)',
'ECCA': 'Eagle Point Credit Company Inc.',
'CLS': 'Celestica, Inc.',
'MORE ': 'Monogram Residential Trust, Inc.',
'DAIO': 'Data I/O Corporation',
'AEPI': 'AEP Industries Inc.',
'RYAAY': 'Ryanair Holdings plc',
'AEL': 'American Equity Investment Life Holding Company',
'CHK-D': 'Chesapeake Energy Corporation',
'APAM': 'Artisan Partners Asset Management Inc.',
'LOXO': 'Loxo Oncology, Inc.',
'BOXC': 'Brookfield Canada Office Properties',
'HRL': 'Hormel Foods Corporation',
'DRAD': 'Digirad Corporation',
'BSPM': 'Biostar Pharmaceuticals, Inc.',
'CIR': 'CIRCOR International, Inc.',
'CET': 'Central Securities Corporation',
'L': 'Loews Corporation',
'PFBX': 'Peoples Financial Corporation',
'IPCC': 'Infinity Property and Casualty Corporation',
'DD-A': 'E.I. du Pont de Nemours and Company',
'OGEN': 'Oragenics, Inc.',
'XBIT': 'XBiotech Inc.',
'RYI': 'Ryerson Holding Corporation',
'BRK.A': 'Berkshire Hathaway Inc.',
'WINT': 'Windtree Therapeutics, Inc.',
'GIMO': 'Gigamon Inc.',
'TRI': 'Thomson Reuters Corp',
'XELB': 'Xcel Brands, Inc',
'BSAC': 'Banco Santander Chile',
'COF-C': 'Capital One Financial Corporation',
'HSC': 'Harsco Corporation',
'FBSS': 'Fauquier Bankshares, Inc.',
'CAB': 'Cabela\'s Inc',
'GOF': 'Guggenheim Strategic Opportunities Fund',
'NRT': 'North European Oil Royality Trust',
'ANFI': 'Amira Nature Foods Ltd',
'LNC': 'Lincoln National Corporation',
'COF.WS': 'Capital One Financial Corporation',
'VLO': 'Valero Energy Corporation',
'STRA': 'Strayer Education, Inc.',
'NC': 'NACCO Industries, Inc.',
'HPQ': 'HP Inc.',
'DYN.WS': 'Dynegy Inc.',
'ZLTQ': 'ZELTIQ Aesthetics, Inc.',
'GIM': 'Templeton Global Income Fund, Inc.',
'BNFT': 'Benefitfocus, Inc.',
'LFVN': 'Lifevantage Corporation',
'COF': 'Capital One Financial Corporation',
'CTO': 'Consolidated-Tomoka Land Co.',
'BSTC': 'BioSpecifics Technologies Corp',
'PZRX': 'PhaseRx, Inc.',
'PFNX': 'Pfenex Inc.',
'ASG': 'Liberty All-Star Growth Fund, Inc.',
'WFC-Q': 'Wells Fargo & Company',
'ASB.WS': 'Associated Banc-Corp',
'GWR': 'Genesee & Wyoming, Inc.',
'JONE': 'Jones Energy, Inc.',
'DSPG': 'DSP Group, Inc.',
'DXPS': 'WisdomTree United Kingdom Hedged Equity Fund',
'DMD': 'Demand Media Inc.',
'GNRX': 'VanEck Vectors Generic Drugs ETF',
'CFA': 'Victory CEMP US 500 Volatility Wtd Index ETF',
'GWRS': 'Global Water Resources, Inc.',
'VTWG': 'Vanguard Russell 2000 Growth ETF',
'CNCR': 'Loncar Cancer Immunotherapy ETF',
'ASET': 'FlexShares Real Assets Allocation Index Fund',
'CLIR': 'ClearSign Combustion Corporation',
'WDC': 'Western Digital Corporation',
'ARNA': 'Arena Pharmaceuticals, Inc.',
'UPIP': 'Unwired Planet, Inc.',
'EAC ': 'Erickson Incorporated',
'AAWW': 'Atlas Air Worldwide Holdings',
'ABR-A': 'Arbor Realty Trust',
'QSR': 'Restaurant Brands International Inc.',
'BTZ': 'BlackRock Credit Allocation Income Trust',
'CAPNW': 'Capnia, Inc.',
'VNR': 'Vanguard Natural Resources LLC',
'CELGZ': 'Celgene Corporation',
'INUV': 'Inuvo, Inc',
'LBTYK': 'Liberty Global plc',
'PZZA': 'Papa John\'S International, Inc.',
'NEM': 'Newmont Mining Corporation',
'PED': 'Pedevco Corp.',
'EVY': 'Eaton Vance New York Municipal Income Trust',
'AIZ': 'Assurant, Inc.',
'COG': 'Cabot Oil & Gas Corporation',
'CSAL': 'Communications Sales & Leasing,Inc.',
'EXAM': 'ExamWorks Group, Inc.',
'NMRX': 'Numerex Corp.',
'SIMO': 'Silicon Motion Technology Corporation',
'BCX': 'BlackRock Resources',
'TAX': 'Liberty Tax, Inc.',
'IVC': 'Invacare Corporation',
'MMV': 'Eaton Vance Massachusetts Municipal Income Trust',
'HIL': 'Hill International, Inc.',
'WSTC': 'West Corporation',
'FAB': 'First Trust Multi Cap Value AlphaDEX Fund',
'WLL': 'Whiting Petroleum Corporation',
'DX': 'Dynex Capital, Inc.',
'DSXN': 'Diana Shipping inc.',
'RVLT': 'Revolution Lighting Technologies, Inc.',
'TI': 'Telecom Italia S.P.A.',
'ARTW': 'Art\'s-Way Manufacturing Co., Inc.',
'EQGP': 'EQT GP Holdings, LP',
'BLPH': 'Bellerophon Therapeutics, Inc.',
'NSTG': 'NanoString Technologies, Inc.',
'CJES': 'C&J Energy Services, Ltd.',
'CAMP': 'CalAmp Corp.',
'AIA': 'iShares Asia 50 ETF',
'GLP': 'Global Partners LP',
'VTVT': 'vTv Therapeutics Inc.',
'PFG': 'Principal Financial Group Inc',
'GIS': 'General Mills, Inc.',
'PRZM': 'Prism Technologies Group, Inc.',
'TFSC': '1347 Capital Corp.',
'TRMR': 'Tremor Video, Inc.',
'DTE': 'DTE Energy Company',
'ICON': 'Iconix Brand Group, Inc.',
'AHGP': 'Alliance Holdings GP, L.P.',
'NAD': 'Nuveen Dividend Advantage Municipal Fund',
'GRSHU': 'Gores Holdings, Inc.',
'HRS': 'Harris Corporation',
'PBIB': 'Porter Bancorp, Inc.',
'DEX': 'Delaware Enhanced Global Dividend',
'EOD': 'Wells Fargo Global Dividend Opportunity Fund',
'DTF': 'Duff & Phelps Utilities Tax-Free Income, Inc.',
'BMLA': 'BullMark LatAm Select Leaders ETF',
'EVRI': 'Everi Holdings Inc.',
'USAC': 'USA Compression Partners, LP',
'PAGG': 'PowerShares Global Agriculture Portfolio',
'TWI': 'Titan International, Inc.',
'ROIA': 'Radio One, Inc.',
'HYI': 'Western Asset High Yield Defined Opportunity Fund Inc.',
'ISRL': 'Isramco, Inc.',
'BLOX': 'Infoblox Inc.',
'PSB-T': 'PS Business Parks, Inc.',
'QRHC': 'Quest Resource Holding Corporation.',
'NCT-C': 'Newcastle Investment Corporation',
'CVM.WS': 'Cel-Sci Corporation',
'MDCO': 'The Medicines Company',
'AGO-B': 'Assured Guaranty Ltd.',
'HMLP': 'Hoegh LNG Partners LP',
'EFUT': 'eFuture Holding Inc.',
'XOM': 'Exxon Mobil Corporation',
'MRCY': 'Mercury Systems Inc',
'JRS': 'Nuveen Real Estate Fund',
'NPF': 'Nuveen Premier Municipal Income Fund, Inc.',
'DERM': 'Dermira, Inc.',
'BLVDW': 'Boulevard Acquisition Corp. II',
'BML-L': 'Bank of America Corporation',
'MBLX': 'Metabolix, Inc.',
'EVOK': 'Evoke Pharma, Inc.',
'NTC': 'Nuveen Connecticut Premium Income Municipal Fund',
'SHEN': 'Shenandoah Telecommunications Co',
'HBCP': 'Home Bancorp, Inc.',
'MNR': 'Monmouth Real Estate Investment Corporation',
'PSCC': 'PowerShares S&P SmallCap Consumer Staples Portfolio',
'ECAC': 'E-compass Acquisition Corp.',
'CMPR': 'Cimpress N.V',
'CENTA': 'Central Garden & Pet Company',
'RCS': 'PIMCO Strategic Income Fund, Inc.',
'FIZZ': 'National Beverage Corp.',
'JJSF': 'J & J Snack Foods Corp.',
'IBO': 'IBO (Listing Market - NYSE Amex Network B F)',
'POOL': 'Pool Corporation',
'IMI': 'Intermolecular, Inc.',
'HA': 'Hawaiian Holdings, Inc.',
'AXTI': 'AXT Inc',
'LGCYP': 'Legacy Reserves LP',
'FEYE': 'FireEye, Inc.',
'CNV': 'Cnova N.V.',
'VLY.WS': 'Valley National Bancorp',
'ALSN': 'Allison Transmission Holdings, Inc.',
'BMCH': 'BMC Stock Holdings, Inc.',
'FAX': 'Aberdeen Asia-Pacific Income Fund Inc',
'PZG': 'Paramount Gold Nevada Corp.',
'NEE-H': 'NextEra Energy, Inc.',
'PLUG': 'Plug Power, Inc.',
'CMA.WS': 'Comerica Incorporated',
'RAVN': 'Raven Industries, Inc.',
'GRVY': 'GRAVITY Co., Ltd.',
'VKTX': 'Viking Therapeutics, Inc.',
'SCHW-C': 'The Charles Schwab Corporation',
'CBSHP': 'Commerce Bancshares, Inc.',
'HMY': 'Harmony Gold Mining Company Limited',
'SCQ': 'Stellus Capital Investment Corporation',
'STXS': 'Stereotaxis, Inc.',
'GEVO': 'Gevo, Inc.',
'MWW': 'Monster Worldwide, Inc.',
'EEFT': 'Euronet Worldwide, Inc.',
'BABY': 'Natus Medical Incorporated',
'ENRJ-': 'EnerJex Resources, Inc.',
'NFG': 'National Fuel Gas Company',
'CWCO': 'Consolidated Water Co. Ltd.',
'BNCN': 'BNC Bancorp',
'GLNG': 'Golar LNG Limited',
'ACOR': 'Acorda Therapeutics, Inc.',
'GAB-J': 'Gabelli Equity Trust, Inc. (The)',
'FOXA': 'Twenty-First Century Fox, Inc.',
'ADGE': 'American DG Energy Inc.',
'LIND': 'Lindblad Expeditions Holdings Inc.',
'WGBS': 'WaferGen Bio-systems, Inc.',
'AGEN': 'Agenus Inc.',
'ESS': 'Essex Property Trust, Inc.',
'ESXB': 'Community Bankers Trust Corporation.',
'FOR': 'Forestar Group Inc',
'PSB-V': 'PS Business Parks, Inc.',
'HBANP': 'Huntington Bancshares Incorporated',
'OCC': 'Optical Cable Corporation',
'IGR': 'CBRE Clarion Global Real Estate Income Fund',
'ACY': 'AeroCentury Corp.',
'VCF': 'Delaware Investments Colorado Municipal Income Fund, Inc',
'ACET': 'Aceto Corporation',
'RPD': 'Rapid7, Inc.',
'INOD': 'Innodata Inc.',
'FDS': 'FactSet Research Systems Inc.',
'FPL': 'First Trust New Opportunities MLP & Energy Fund',
'FEMB': 'First Trust Emerging Markets Local Currency Bond ETF',
'HT-D': 'Hersha Hospitality Trust',
'FTI': 'FMC Technologies, Inc.',
'PRE-G': 'PartnerRe Ltd.',
'ROST': 'Ross Stores, Inc.',
'MSA': 'MSA Safety Incorporporated',
'KSU': 'Kansas City Southern',
'SEMG': 'Semgroup Corporation',
'VHI': 'Valhi, Inc.',
'XTLB': 'XTL Biopharmaceuticals Ltd.',
'NEV': 'Nuveen Enhanced Municipal Value Fund',
'CAA': 'CalAtlantic Group, Inc.',
'SCE-K': 'Southern California Edison Company',
'IDA': 'IDACORP, Inc.',
'FCNCA': 'First Citizens BancShares, Inc.',
'UDBI': 'Legg Mason US Diversified Core ETF',
'BME': 'Blackrock Health Sciences Trust',
'ACCO': 'Acco Brands Corporation',
'AUPH': 'Aurinia Pharmaceuticals Inc',
'HIW': 'Highwoods Properties, Inc.',
'NRO': 'Neuberger Berman Real Estate Securities Income Fund, Inc.',
'HPJ': 'Highpower International Inc',
'ZOES': 'Zoe\'s Kitchen, Inc.',
'PCMI': 'PCM, Inc.',
'AMC': 'AMC Entertainment Holdings, Inc.',
'CUR': 'Neuralstem, Inc.',
'PCG': 'Pacific Gas & Electric Co.',
'KTWO': 'K2M Group Holdings, Inc.',
'XTLY': 'Xactly Corporation',
'STAR-G': 'iStar Financial Inc.',
'VWR': 'VWR Corporation',
'WTW': 'Weight Watchers International Inc',
'MAMS': 'MAM Software Group, Inc.',
'BPOPM': 'Popular, Inc.',
'WFT': 'Weatherford International plc',
'RICK': 'RCI Hospitality Holdings, Inc.',
'EAGL': 'Double Eagle Acquisition Corp.',
'PGC': 'Peapack-Gladstone Financial Corporation',
'SM': 'SM Energy Company',
'GSB': 'GlobalSCAPE, Inc.',
'PFS': 'Provident Financial Services, Inc',
'KS': 'KapStone Paper and Packaging Corporation',
'PCCC': 'PC Connection, Inc.',
'EXEL': 'Exelixis, Inc.',
'INFI': 'Infinity Pharmaceuticals, Inc.',
'FCE.B': 'Forest City Realty Trust, Inc.',
'PSA-B': 'Public Storage',
'LTS-A': 'Ladenburg Thalmann Financial Services Inc',
'BRX': 'Brixmor Property Group Inc.',
'DLPH': 'Delphi Automotive plc',
'SLRC': 'Solar Capital Ltd.',
'EOT': 'Eaton Vance Municipal Income Trust',
'BLDR': 'Builders FirstSource, Inc.',
'CHU': 'China Unicom (Hong Kong) Ltd',
'AVA': 'Avista Corporation',
'RGT': 'Royce Global Value Trust, Inc.',
'PFO': 'Flaherty & Crumrine Preferred Income Opportunity Fund Inc',
'SGRY': 'Surgery Partners, Inc.',
'VVI': 'Viad Corp',
'LFL': 'LATAM Airlines Group S.A.',
'KMM': 'Scudder Multi-Market Income Trust',
'IKGH': 'Iao Kun Group Holding Company Limited',
'STB': 'Student Transportation Inc',
'AMPE': 'Ampio Pharmaceuticals, Inc.',
'AMSWA': 'American Software, Inc.',
'DFP': 'Flaherty & Crumrine Dynamic Preferred and Income Fund Inc.',
'CAT': 'Caterpillar, Inc.',
'FARO': 'FARO Technologies, Inc.',
'NDRM': 'NeuroDerm Ltd.',
'CLNT': 'Cleantech Solutions International, Inc.',
'SINA': 'Sina Corporation',
'KRC': 'Kilroy Realty Corporation',
'BAC-E': 'Bank of America Corporation',
'FNHC': 'Federated National Holding Company',
'CADTW': 'DT Asia Investments Limited',
'ACRS': 'Aclaris Therapeutics, Inc.',
'AGN': 'Allergan plc.',
'UMH': 'UMH Properties, Inc.',
'GGN-B': 'GAMCO Global Gold, Natural Reources & Income Trust ',
'TNP-C': 'Tsakos Energy Navigation Ltd',
'DTYL': 'region',
'APLE': 'Apple Hospitality REIT, Inc.',
'SENEB': 'Seneca Foods Corp.',
'TJX': 'TJX Companies, Inc. (The)',
'MTN': 'Vail Resorts, Inc.',
'INDB': 'Independent Bank Corp.',
'TCPC': 'TCP Capital Corp.',
'WMS': 'Advanced Drainage Systems, Inc.',
'BKFS': 'Black Knight Financial Services, Inc.',
'BV': 'Bazaarvoice, Inc.',
'AXSM': 'Axsome Therapeutics, Inc.',
'VNO-I': 'Vornado Realty Trust',
'TOO-A': 'Teekay Offshore Partners L.P.',
'KHI': 'Scudder High Income Trust',
'SBPH': 'Spring Bank Pharmaceuticals, Inc.',
'EBF': 'Ennis, Inc.',
'PDVW': 'pdvWireless, Inc.',
'UBOH': 'United Bancshares, Inc.',
'MKC.V': 'McCormick & Company, Incorporated',
'MTSI': 'MACOM Technology Solutions Holdings, Inc.',
'KLREU': 'KLR Energy Acquisition Corp.',
'IIJI': 'Internet Initiative Japan, Inc.',
'AEGR': 'Aegerion Pharmaceuticals, Inc.',
'VII': 'Vicon Industries, Inc.',
'GRA': 'W.R. Grace & Co.',
'FBHS': 'Fortune Brands Home & Security, Inc.',
'WWE': 'World Wrestling Entertainment, Inc.',
'LEN': 'Lennar Corporation',
'SAP': 'SAP SE',
'ADRE': 'BLDRS Emerging Markets 50 ADR Index Fund',
'BIOA': 'BioAmber Inc.',
'JSYNU': 'Jensyn Acquistion Corp.',
'WPCS': 'WPCS International Incorporated',
'AMRC': 'Ameresco, Inc.',
'CMRX': 'Chimerix, Inc.',
'BML-H': 'Bank of America Corporation',
'MHD': 'Blackrock MuniHoldings Fund, Inc.',
'FLN': 'First Trust Latin America AlphaDEX Fund',
'ISDR': 'Issuer Direct Corporation',
'TTS': 'Tile Shop Hldgs, Inc.',
'PLPC': 'Preformed Line Products Company',
'KMPA': 'Kemper Corporation',
'UBIO': 'Proshares UltraPro Nasdaq Biotechnology',
'HE': 'Hawaiian Electric Industries, Inc.',
'ASB-C': 'Associated Banc-Corp',
'WOOF': 'VCA Inc. ',
'DSU': 'Blackrock Debt Strategies Fund, Inc.',
'HOMB': 'Home BancShares, Inc.',
'TANO': 'TravelCenters of America LLC',
'USLV': 'region',
'FLT': 'FleetCor Technologies, Inc.',
'IMN': 'Imation Corporation',
'BGNE': 'BeiGene, Ltd.',
'PRAN': 'Prana Biotechnology Ltd',
'FPT': 'Federated Premier Intermediate Municipal Income Fund',
'JCE': 'Nuveen Core Equity Alpha Fund',
'PWE': 'Penn West Petroleum Ltd',
'SONS': 'Sonus Networks, Inc.',
'TAC': 'TransAlta Corporation',
'CATM': 'Cardtronics, Inc.',
'MSBF': 'MSB Financial Corp.',
'MSB': 'Mesabi Trust',
'NUE': 'Nucor Corporation',
'JTD': 'Nuveen Tax-Advantaged Dividend Growth Fund',
'LOGI': 'Logitech International S.A.',
'SBH': 'Sally Beauty Holdings, Inc.',
'CLSN': 'Celsion Corporation',
'SRF': 'Cushing Energy Income Fund (The)',
'SNDX': 'Syndax Pharmaceuticals, Inc.',
'TMO': 'Thermo Fisher Scientific Inc',
'REPH': 'Recro Pharma, Inc.',
'MFNC': 'Mackinac Financial Corporation',
'TFX': 'Teleflex Incorporated',
'SOL': 'Renesola Ltd.',
'SO': 'Southern Company (The)',
'ENS': 'Enersys',
'COR': 'CoreSite Realty Corporation',
'SEP': 'Spectra Energy Partners, LP',
'CAE': 'CAE Inc',
'MBUU': 'Malibu Boats, Inc.',
'CARV': 'Carver Bancorp, Inc.',
'GV': 'Goldfield Corporation (The)',
'KODK.WS': 'Eastman Kodak Company',
'PSB': 'PS Business Parks, Inc.',
'MPA': 'Blackrock MuniYield Pennsylvania Quality Fund',
'ASB': 'Associated Banc-Corp',
'PTEN': 'Patterson-UTI Energy, Inc.',
'GLDD': 'Great Lakes Dredge & Dock Corporation',
'LXP': 'Lexington Realty Trust',
'HSON': 'Hudson Global, Inc.',
'CNCO': 'Cencosud S.A.',
'AMCO': 'Armco Metals Holdings, Inc.',
'AGRO': 'Adecoagro S.A.',
'DAL': 'Delta Air Lines, Inc.',
'CDOR': 'Condor Hospitality Trust, Inc.',
'BIOL': 'Biolase, Inc.',
'CSH': 'Cash America International, Inc.',
'BMRN': 'BioMarin Pharmaceutical Inc.',
'CG': 'The Carlyle Group L.P.',
'AMBC': 'Ambac Financial Group, Inc.',
'STI-A': 'SunTrust Banks, Inc.',
'CUO': 'Continental Materials Corporation',
'BAS': 'Basic Energy Services, Inc.',
'ZHNE': 'Zhone Technologies, Inc.',
'KW': 'Kennedy-Wilson Holdings Inc.',
'SYMC': 'Symantec Corporation',
'ENT': 'Global Eagle Entertainment Inc.',
'CCI-A': 'Crown Castle International Corporation',
'BSM': 'Black Stone Minerals, L.P.',
'PTNR': 'Partner Communications Company Ltd.',
'MUA': 'Blackrock MuniAssets Fund, Inc.',
'DHI': 'D.R. Horton, Inc.',
'NXP': 'Nuveen Select Tax Free Income Portfolio',
'MEN': 'Blackrock MuniEnhanced Fund, Inc.',
'TRR': 'TRC Companies, Inc.',
'ENIC': 'Enersis Chile S.A.',
'ROG': 'Rogers Corporation',
'CFBK': 'Central Federal Corporation',
'STEM': 'StemCells, Inc.',
'MELR': 'Melrose Bancorp, Inc.',
'GIB': 'CGI Group, Inc.',
'QQXT': 'First Trust NASDAQ-100 Ex-Technology Sector Index Fund',
'ABBV': 'AbbVie Inc.',
'IRDM': 'Iridium Communications Inc',
'TKAI': 'Tokai Pharmaceuticals, Inc.',
'NEE-Q': 'NextEra Energy, Inc.',
'HEI.A': 'Heico Corporation',
'IVR-B': 'INVESCO MORTGAGE CAPITAL INC',
'RDVY': 'First Trust NASDAQ Rising Dividend Achievers ETF',
'ACPW': 'Active Power, Inc.',
'PBSK': 'Poage Bankshares, Inc.',
'NEA': 'Nuveen AMT-Free Municipal Income Fund',
'IBP': 'Installed Building Products, Inc.',
'MRK': 'Merck & Company, Inc.',
'PTC': 'PTC Inc.',
'IGC': 'India Globalization Capital Inc.',
'MUX': 'McEwen Mining Inc.',
'ENZL': 'iShares MSCI New Zealand Capped ETF',
'LPL': 'LG Display Co., Ltd.',
'JPM-F': 'J P Morgan Chase & Co',
'SYPR': 'Sypris Solutions, Inc.',
'MBFI': 'MB Financial Inc.',
'ABCB': 'Ameris Bancorp',
'BMTC': 'Bryn Mawr Bank Corporation',
'IIVI': 'II-VI Incorporated',
'ALLT': 'Allot Communications Ltd.',
'GVA': 'Granite Construction Incorporated',
'FCPT': 'Four Corners Property Trust, Inc.',
'PPHM': 'Peregrine Pharmaceuticals Inc.',
'ICCC': 'ImmuCell Corporation',
'NOV': 'National Oilwell Varco, Inc.',
'RRGB': 'Red Robin Gourmet Burgers, Inc.',
'MLNX': 'Mellanox Technologies, Ltd.',
'GLMD': 'Galmed Pharmaceuticals Ltd.',
'CRBP': 'Corbus Pharmaceuticals Holdings, Inc.',
'VGI': 'Virtus Global Multi-Sector Income Fund',
'NYT': 'New York Times Company (The)',
'SSNC': 'SS&C Technologies Holdings, Inc.',
'IRBT': 'iRobot Corporation',
'KOOL': 'Cesca Therapeutics Inc.',
'OTIV': 'On Track Innovations Ltd',
'SFLY': 'Shutterfly, Inc.',
'TXRH': 'Texas Roadhouse, Inc.',
'HLT': 'Hilton Worldwide Holdings Inc.',
'KIN': 'Kindred Biosciences, Inc.',
'DHRM': 'Dehaier Medical Systems Limited',
'RVNC': 'Revance Therapeutics, Inc.',
'SON': 'Sonoco Products Company',
'ESE': 'ESCO Technologies Inc.',
'EGLE': 'Eagle Bulk Shipping Inc.',
'SERV': 'ServiceMaster Global Holdings, Inc.',
'NBO': 'Neuberger Berman New York Intermediate Municipal Fund Inc.',
'CLC': 'CLARCOR Inc.',
'UNM': 'Unum Group',
'PSA-T': 'Public Storage',
'LB': 'L Brands, Inc.',
'MBI': 'MBIA, Inc.',
'AMH-C': 'American Homes 4 Rent',
'GNVC': 'GenVec, Inc.',
'DE': 'Deere & Company',
'VEEV': 'Veeva Systems Inc.',
'WAT': 'Waters Corporation',
'FFG': 'FBL Financial Group, Inc.',
'SIM': 'Grupo Simec, S.A. de C.V.',
'ASTC': 'Astrotech Corporation',
'AVK': 'Advent Claymore Convertible Securities and Income Fund',
'JPM-G': 'J P Morgan Chase & Co',
'BKH': 'Black Hills Corporation',
'ALB': 'Albemarle Corporation',
'WFC-L': 'Wells Fargo & Company',
'FTK': 'Flotek Industries, Inc.',
'KKR-A': 'KKR & Co. L.P.',
'EMXX': 'Eurasian Minerals Inc.',
'CVS': 'CVS Health Corporation',
'UNTY': 'Unity Bancorp, Inc.',
'CSS': 'CSS Industries, Inc.',
'CBK': 'Christopher & Banks Corporation',
'CCP': 'Care Capital Properties, Inc.',
'SRSC': 'Sears Canada Inc. ',
'ALL-D': 'Allstate Corporation (The)',
'EPM-A': 'Evolution Petroleum Corporation, Inc.',
'HIE': 'Miller/Howard High Income Equity Fund',
'AEH': 'Aegon NV',
'COE': 'China Online Education Group',
'VICR': 'Vicor Corporation',
'MGU': 'Macquarie Global Infrastructure Total Return Fund Inc.',
'USAK': 'USA Truck, Inc.',
'VGM': 'Invesco Trust for Investment Grade Municipals',
'PGRE': 'Paramount Group, Inc.',
'GUT-C': 'Gabelli Utility Trust (The)',
'GNL': 'Global Net Lease, Inc.',
'LMAT': 'LeMaitre Vascular, Inc.',
'MFV': 'MFS Special Value Trust',
'PFK': 'Prudential Financial, Inc.',
'SGA': 'Saga Communications, Inc.',
'IR': 'Ingersoll-Rand plc (Ireland)',
'EXCU': 'Exelon Corporation',
'SRCLP': 'Stericycle, Inc.',
'MOBI': 'Sky-mobi Limited',
'COOL': 'Majesco Entertainment Company',
'VONG': 'Vanguard Russell 1000 Growth ETF',
'AMSG': 'Amsurg Corp.',
'SNBC': 'Sun Bancorp, Inc.',
'BWEN': 'Broadwind Energy, Inc.',
'STRT': 'Strattec Security Corporation',
'HF': 'HFF, Inc.',
'BIP': 'Brookfield Infrastructure Partners LP',
'JGW': 'J.G. Wentworth Company (The)',
'BBT': 'BB&T Corporation',
'TCFC': 'The Community Financial Corporation',
'CTBI': 'Community Trust Bancorp, Inc.',
'UFS': 'Domtar Corporation',
'BMI': 'Badger Meter, Inc.',
'BKEPP': 'Blueknight Energy Partners L.P., L.L.C.',
'PRFZ': 'PowerShares FTSE RAFI US 1500 Small-Mid Portfolio',
'SKOR': 'FlexShares Credit-Scored US Corporate Bond Index Fund',
'EIV': 'Eaton Vance Municipal Bond Fund II',
'LYG': 'Lloyds Banking Group Plc',
'CENX': 'Century Aluminum Company',
'NTGR': 'NETGEAR, Inc.',
'RPM': 'RPM International Inc.',
'EC': 'Ecopetrol S.A.',
'HPF': 'John Hancock Pfd Income Fund II',
'FISV': 'Fiserv, Inc.',
'APRI': 'Apricus Biosciences, Inc',
'HGT': 'Hugoton Royalty Trust',
'HSBC-A': 'HSBC Holdings plc',
'HUSI-G.CL': 'HSBC USA, Inc.',
'HJV': 'MS Structured Asset Corp Saturns GE Cap Corp Series 2002-14',
'UAE': 'iShares MSCI UAE Capped ETF',
'RPT': 'Ramco-Gershenson Properties Trust',
'ROIC': 'Retail Opportunity Investments Corp.',
'ARCX': 'Arc Logistic Partners LP',
'STON': 'StoneMor Partners L.P.',
'JMP': 'JMP Group LLC',
'CPRX': 'Catalyst Pharmaceuticals, Inc.',
'AAXJ': 'iShares MSCI All Country Asia ex Japan Index Fund',
'LPSN': 'LivePerson, Inc.',
'AIMC': 'Altra Industrial Motion Corp.',
'RENX': 'RELX N.V.',
'RFT': 'RAIT Financial Trust',
'MDVXW': 'Medovex Corp.',
'DL': 'China Distance Education Holdings Limited',
'PSXP': 'Phillips 66 Partners LP',
'PEI-B': 'Pennsylvania Real Estate Investment Trust',
'ACHC': 'Acadia Healthcare Company, Inc.',
'CMLS': 'Cumulus Media Inc.',
'NW-C': 'Natl Westminster Pfd',
'EVER': 'EverBank Financial Corp.',
'GAINO': 'Gladstone Investment Corporation',
'DGI': 'DigitalGlobe, Inc',
'AWR': 'American States Water Company',
'LMNX': 'Luminex Corporation',
'MTRX': 'Matrix Service Company',
'GABC': 'German American Bancorp, Inc.',
'BEP': 'Brookfield Renewable Partners L.P.',
'CSTE': 'CaesarStone Sdot-Yam Ltd.',
'MHE': 'BlackRock Massachusetts Tax-Exempt Trust',
'USPH': 'U.S. Physical Therapy, Inc.',
'BNK': 'C1 Financial, Inc.',
'CXSE': 'WisdomTree China ex-State-Owned Enterprises Fund',
'BPY': 'Brookfield Property Partners L.P.',
'BBC': 'BioShares Biotechnology Clinical Trials Fund',
'CGIX': 'Cancer Genetics, Inc.',
'BAK': 'Braskem S.A.',
'PLT': 'Plantronics, Inc.',
'WHLR': 'Wheeler Real Estate Investment Trust, Inc.',
'AMFW': 'Amec Plc Ord',
'NVR': 'NVR, Inc.',
'OCUL': 'Ocular Therapeutix, Inc.',
'TU': 'TELUS Corporation',
'NXTM': 'NxStage Medical, Inc.',
'VRNS': 'Varonis Systems, Inc.',
'NNDM': 'Nano Dimension Ltd.',
'SUMR': 'Summer Infant, Inc.',
'HL-B': 'Hecla Mining Company',
'SVA': 'Sinovac Biotech, Ltd.',
'FMER': 'FirstMerit Corporation',
'RRM': 'RR Media Ltd.',
'AMOT': 'Allied Motion Technologies, Inc.',
'DWAT': 'Arrow DWA Tactical ETF',
'XPL': 'Solitario Exploration & Royalty Corp',
'NRE': 'NorthStar Realty Europe Corp.',
'CMFN': 'CM Finance Inc',
'LNC.WS': 'Lincoln National Corporation',
'PRE-D': 'PartnerRe Ltd.',
'SWHC': 'Smith & Wesson Holding Corporation',
'GPIAU': 'GP Investments Acquisition Corp.',
'CINF': 'Cincinnati Financial Corporation',
'ALN': 'American Lorain Corporation',
'MRVL': 'Marvell Technology Group Ltd.',
'AUBN': 'Auburn National Bancorporation, Inc.',
'NVDQ': 'Novadaq Technologies Inc',
'HBM.WS': 'HudBay Minerals Inc',
'CMRE-C': 'Costamare Inc.',
'MMM': '3M Company',
'VAC': 'Marriot Vacations Worldwide Corporation',
'TCB-B': 'TCF Financial Corporation',
'SPNC': 'The Spectranetics Corporation',
'ALCO': 'Alico, Inc.',
'UBSI': 'United Bankshares, Inc.',
'COF-D': 'Capital One Financial Corporation',
'CIX': 'CompX International Inc.',
'AGZD': 'WisdomTree Barclays U.S. Aggregate Bond Zero Duration Fund',
'BDE': 'Black Diamond, Inc.',
'HBK': 'Hamilton Bancorp, Inc.',
'UBCP': 'United Bancorp, Inc.',
'PRGS': 'Progress Software Corporation',
'GENE': 'Genetic Technologies Ltd',
'CERE': 'Ceres, Inc.',
'HAE': 'Haemonetics Corporation',
'SUPV': 'Grupo Supervielle S.A.',
'NIHD': 'NII Holdings, Inc.',
'EMC': 'EMC Corporation',
'JPS': 'Nuveen Quality Preferred Income Fund 2',
'RSPP': 'RSP Permian, Inc.',
'RRC': 'Range Resources Corporation',
'EPR-C': 'EPR Properties',
'SOHOM': 'Sotherly Hotels LP',
'ARE': 'Alexandria Real Estate Equities, Inc.',
'ADPT': 'Adeptus Health Inc.',
'FYX': 'First Trust Small Cap Core AlphaDEX Fund',
'LBF': 'Scudder Global High Income Fund, Inc.',
'DXGE': 'WisdomTree Germany Hedged Equity Fund',
'EXR': 'Extra Space Storage Inc',
'PSIX': 'Power Solutions International, Inc.',
'CCRN': 'Cross Country Healthcare, Inc.',
'NRF-A': 'Northstar Realty Finance Corp.',
'CTMX': 'CytomX Therapeutics, Inc.',
'HTBI': 'HomeTrust Bancshares, Inc.',
'D': 'Dominion Resources, Inc.',
'PXLW': 'Pixelworks, Inc.',
'CMU': 'Colonial Municipal Income Trust',
'BVA': 'Cordia Bancorp Inc.',
'PBCT': 'People\'s United Financial, Inc.',
'ARP-E': 'Atlas Resource Partners, L.P.',
'VTHR': 'Vanguard Russell 3000 ETF',
'CNMD': 'CONMED Corporation',
'PX': 'Praxair, Inc.',
'FIF': 'First Trust Energy Infrastructure Fund',
'GLU-A': 'The Gabelli Global Utility and Income Trust',
'DIN': 'DineEquity, Inc',
'TVIA': 'TerraVia Holdings, Inc.',
'LGI': 'Lazard Global Total Return and Income Fund',
'LEE': 'Lee Enterprises, Incorporated',
'PENN': 'Penn National Gaming, Inc.',
'NVET': 'Nexvet Biopharma plc',
'CTRE': 'CareTrust REIT, Inc.',
'GCV-B': 'Gabelli Convertible and Income Securities Fund, Inc. (The)',
'TR': 'Tootsie Roll Industries, Inc.',
'NATH': 'Nathan\'s Famous, Inc.',
'CBI': 'Chicago Bridge & Iron Company N.V.',
'ITC': 'ITC Holdings Corp.',
'EFSC': 'Enterprise Financial Services Corporation',
'PPS-A': 'Post Properties, Inc.',
'ACTA': 'Actua Corporation',
'OXBR': 'Oxbridge Re Holdings Limited',
'WRB': 'W.R. Berkley Corporation',
'GILT': 'Gilat Satellite Networks Ltd.',
'GWPH': 'GW Pharmaceuticals Plc',
'NE': 'Noble Corporation',
'LCM': 'Advent/Claymore Enhanced Growth & Income Fund',
'HMTV': 'Hemisphere Media Group, Inc.',
'SCON': 'Superconductor Technologies Inc.',
'ACRE': 'Ares Commercial Real Estate Corporation',
'CLNE': 'Clean Energy Fuels Corp.',
'GGACW': 'Garnero Group Acquisition Company',
'SCZ': 'iShares MSCI EAFE Small-Cap ETF',
'KODK': 'Eastman Kodak Company',
'ADX': 'Adams Diversified Equity Fund, Inc.',
'NTP': 'Nam Tai Property Inc.',
'INS': 'Intelligent Systems Corporation',
'HD': 'Home Depot, Inc. (The)',
'TRUP': 'Trupanion, Inc.',
'GRX': 'The Gabelli Healthcare & Wellness Trust',
'MP-D': 'Mississippi Power Company',
'ADMP': 'Adamis Pharmaceuticals Corporation',
'WTI': 'W&T Offshore, Inc.',
'DIS': 'Walt Disney Company (The)',
'RFEU': 'First Trust RiverFront Dynamic Europe ETF',
'CADTU': 'DT Asia Investments Limited',
'VTWO': 'Vanguard Russell 2000 ETF',
'UTSI': 'UTStarcom Holdings Corp',
'XPO': 'XPO Logistics, Inc.',
'WGA': 'AG&E Holdings, Inc.',
'PSA': 'Public Storage',
'CLACW': 'Capitol Acquisition Corp. III',
'HSFC-B.CL': 'Household Finance Corp',
'WSM': 'Williams-Sonoma, Inc.',
'OSM': 'SLM Corporation',
'JXSB': 'Jacksonville Bancorp Inc.',
'RHT': 'Red Hat, Inc.',
'GTT': 'GTT Communications, Inc.',
'STZ.B': 'Constellation Brands Inc',
'CRI': 'Carter\'s, Inc.',
'MSON': 'MISONIX, Inc.',
'LKQ': 'LKQ Corporation',
'USDP': 'USD Partners LP',
'TNGO': 'Tangoe, Inc.',
'OLD': 'The Long-Term Care ETF',
'MAPI': 'Mapi - Pharma Ltd.',
'DEPO': 'Depomed, Inc.',
'ALDW': 'Alon USA Partners, LP',
'EPM': 'Evolution Petroleum Corporation, Inc.',
'CAL': 'Caleres, Inc.',
'CNET': 'ChinaNet Online Holdings, Inc.',
'USAP': 'Universal Stainless & Alloy Products, Inc.',
'CNC': 'Centene Corporation',
'SPRT': 'support.com, Inc.',
'CDNA': 'CareDx, Inc.',
'JTPY': 'JetPay Corporation',
'AFC': 'Ares Capital Corporation',
'DGLY': 'Digital Ally, Inc.',
'MAUI': 'AdvisorShares Market Adaptive Unconstrained Income ETF',
'PVTD': 'PrivateBancorp, Inc.',
'FDTS': 'First Trust Developed Markets ex-US Small Cap AlphaDEX Fund',
'WERN': 'Werner Enterprises, Inc.',
'TEP': 'Tallgrass Energy Partners, LP',
'NAP': 'Navios Maritime Midstream Partners LP',
'SJR': 'Shaw Communications Inc.',
'CTHR': 'Charles & Colvard Ltd',
'ITEK': 'Inotek Pharmaceuticals Corporation',
'FNTCW': 'FinTech Acquisition Corp.',
'MOFG': 'MidWestOne Financial Group, Inc.',
'ABX': 'Barrick Gold Corporation',
'TROW': 'T. Rowe Price Group, Inc.',
'GTWN': 'Georgetown Bancorp, Inc.',
'PACE': 'Pace Holdings Corp.',
'LEJU': 'Leju Holdings Limited',
'NM-G': 'Navios Maritime Holdings Inc.',
'BBDO': 'Banco Bradesco Sa',
'NIM': 'Nuveen Select Maturities Municipal Fund',
'BX': 'The Blackstone Group L.P.',
'BLH': 'Blackrock New York Municipal 2018 Term Trust',
'SWM': 'Schweitzer-Mauduit International, Inc.',
'SGYPW': 'Synergy Pharmaceuticals, Inc.',
'USG': 'USG Corporation',
'HWBK': 'Hawthorn Bancshares, Inc.',
'HSIC': 'Henry Schein, Inc.',
'STO': 'Statoil ASA',
'DTQ': 'DTE Energy Company',
'RSYS': 'RadiSys Corporation',
'LNG': 'Cheniere Energy, Inc.',
'IHG': 'Intercontinental Hotels Group',
'EVHC': 'Envision Healthcare Holdings, Inc.',
'KMI-A': 'Kinder Morgan, Inc.',
'STT-G': 'State Street Corporation',
'RESN': 'Resonant Inc.',
'SONA': 'Southern National Bancorp of Virginia, Inc.',
'LKFN': 'Lakeland Financial Corporation',
'RF-B': 'Regions Financial Corporation',
'BIF': 'USLIFE Income Fund, Inc.',
'GS-I': 'Goldman Sachs Group, Inc. (The)',
'MON': 'Monsanto Company',
'VTGN': 'VistaGen Therapeutics, Inc.',
'SEB': 'Seaboard Corporation',
'PL-C': 'Protective Life Corporation',
'SKM': 'SK Telecom Co., Ltd.',
'EZPW': 'EZCORP, Inc.',
'CBF': 'Capital Bank Financial Corp.',
'CROX': 'Crocs, Inc.',
'RBC': 'Regal Beloit Corporation',
'ARCO': 'Arcos Dorados Holdings Inc.',
'TANP': 'TravelCenters of America LLC',
'ZB-F': 'Zions Bancorporation',
'WLDN': 'Willdan Group, Inc.',
'MHGC': 'Morgans Hotel Group Co.',
'NORD': 'Nord Anglia Education, Inc.',
'REG-G': 'Regency Centers Corporation',
'ARP-D': 'Atlas Resource Partners, L.P.',
'FOLD': 'Amicus Therapeutics, Inc.',
'AXLL': 'Axiall Corporation',
'HMN': 'Horace Mann Educators Corporation',
'GAIN': 'Gladstone Investment Corporation',
'FNCX': 'Function(x) Inc.',
'UNFI': 'United Natural Foods, Inc.',
'WBMD': 'WebMD Health Corp',
'BONT': 'The Bon-Ton Stores, Inc.',
'FSBW': 'FS Bancorp, Inc.',
'LEG': 'Leggett & Platt, Incorporated',
'BHV': 'BlackRock Virginia Municipal Bond Trust',
'MRCC': 'Monroe Capital Corporation',
'CSBR': 'Champions Oncology, Inc.',
'CDL': 'Victory CEMP US Large Cap High Div Volatility Wtd Index ETF',
'FCFS': 'First Cash Financial Services, Inc.',
'MTD': 'Mettler-Toledo International, Inc.',
'COLM': 'Columbia Sportswear Company',
'INVE': 'Identiv, Inc.',
'APOL': 'Apollo Education Group, Inc.',
'XENE': 'Xenon Pharmaceuticals Inc.',
'ESIO': 'Electro Scientific Industries, Inc.',
'PSA-X': 'Public Storage',
'CHSCN': 'CHS Inc',
'KRC-G': 'Kilroy Realty Corporation',
'RLOC': 'ReachLocal, Inc.',
'KGJI': 'Kingold Jewelry Inc.',
'QADB': 'QAD Inc.',
'DOW': 'Dow Chemical Company (The)',
'UNXL': 'Uni-Pixel, Inc.',
'EURN': 'Euronav NV',
'IQI': 'Invesco Quality Municipal Income Trust',
'ATI': 'Allegheny Technologies Incorporated',
'TEO': 'Telecom Argentina Stet - France Telecom S.A.',
'EVF': 'Eaton Vance Senior Income Trust',
'JRO': 'Nuveen Floating Rate Income Opportuntiy Fund',
'DDT': 'Dillard\'s, Inc.',
'VEC': 'Vectrus, Inc.',
'KBAL': 'Kimball International, Inc.',
'AIG.WS': 'American International Group, Inc.',
'GLRE': 'Greenlight Reinsurance, Ltd.',
'SF': 'Stifel Financial Corporation',
'JOUT': 'Johnson Outdoors Inc.',
'AMCN': 'AirMedia Group Inc',
'FFA': 'First Trust',
'IPCI': 'Intellipharmaceutics International Inc.',
'ABEO': 'Abeona Therapeutics Inc.',
'WFC-V': 'Wells Fargo & Company',
'THO': 'Thor Industries, Inc.',
'NRF': 'Northstar Realty Finance Corp.',
'X': 'United States Steel Corporation',
'CPPL': 'Columbia Pipeline Partners LP',
'DFT-B.CL': 'Dupont Fabros Technology, Inc.',
'BSE': 'Blackrock New York Municipal Income Quality Trust',
'OZM': 'Och-Ziff Capital Management Group LLC',
'REN ': 'Resolute Energy Corporation',
'SCX': 'L.S. Starrett Company (The)',
'LQDT': 'Liquidity Services, Inc.',
'OMCL': 'Omnicell, Inc.',
'CTAS': 'Cintas Corporation',
'YUMA-A': 'Yuma Energy, Inc.',
'FNB': 'F.N.B. Corporation',
'STAR-E': 'iStar Financial Inc.',
'KAR': 'KAR Auction Services, Inc',
'FORTY': 'Formula Systems (1985) Ltd.',
'MIE': 'Cohen & Steers MLP Income and Energy Opportunity Fund, Inc.',
'FDIV': 'First Trust Strategic Income ETF',
'PBR.A': 'Petroleo Brasileiro S.A.- Petrobras',
'NML': 'Neuberger Berman MLP Income Fund Inc.',
'GGG': 'Graco Inc.',
'ALV': 'Autoliv, Inc.',
'ZPIN': 'Zhaopin Limited',
'GFY': 'Western Asset Variable Rate Strategic Fund Inc.',
'NSYS': 'Nortech Systems Incorporated',
'MGF': 'MFS Government Markets Income Trust',
'HALL': 'Hallmark Financial Services, Inc.',
'DPM': 'DCP Midstream Partners, LP',
'SB-D': 'Safe Bulkers, Inc',
'CACC': 'Credit Acceptance Corporation',
'RDCM': 'Radcom Ltd.',
'ORPN': 'Bio Blast Pharma Ltd.',
'CMRE': 'Costamare Inc.',
'DSW': 'DSW Inc.',
'GFNCP': 'General Finance Corporation',
'QTNT': 'Quotient Limited',
'VDSI': 'VASCO Data Security International, Inc.',
'BAC.WS.B': 'Bank of America Corporation',
'PM': 'Philip Morris International Inc',
'BSRR': 'Sierra Bancorp',
'MBRX': 'Moleculin Biotech, Inc.',
'JPM-H': 'J P Morgan Chase & Co',
'MPAA': 'Motorcar Parts of America, Inc.',
'SFS': 'Smart',
'MLNK': 'ModusLink Global Solutions, Inc',
'FSB': 'Franklin Financial Network, Inc.',
'EVJ': 'Eaton Vance New Jersey Municipal Income Trust',
'GCTS': 'GCT Semiconductor, Inc.',
'HIG': 'Hartford Financial Services Group, Inc. (The)',
'XRDC': 'Crossroads Capital, Inc.',
'CVLT': 'CommVault Systems, Inc.',
'DSE': 'Duff & Phelps Select Energy MLP Fund Inc.',
'SAIC': 'SCIENCE APPLICATIONS INTERNATIONAL CORPORATION',
'SPAN': 'Span-America Medical Systems, Inc.',
'CTY': 'Qwest Corporation',
'ISIL': 'Intersil Corporation',
'MAIN': 'Main Street Capital Corporation',
'RCI': 'Rogers Communication, Inc.',
'BOFI': 'BofI Holding, Inc.',
'MSBI': 'Midland States Bancorp, Inc.',
'AUMAW': 'AR Capital Acquisition Corp.',
'TGI': 'Triumph Group, Inc.',
'TAPR': 'region',
'MYE': 'Myers Industries, Inc.',
'KIRK': 'Kirkland\'s, Inc.',
'FHN': 'First Horizon National Corporation',
'DST': 'DST Systems, Inc.',
'DAC': 'Danaos Corporation',
'BNDX': 'Vanguard Total International Bond ETF',
'WWW': 'Wolverine World Wide, Inc.',
'NWE': 'NorthWestern Corporation',
'PSEC': 'Prospect Capital Corporation',
'PSG': 'Performance Sports Group Ltd',
'FISI': 'Financial Institutions, Inc.',
'NLNK': 'NewLink Genetics Corporation',
'PAM': 'Pampa Energia S.A.',
'LHO-J': 'LaSalle Hotel Properties',
'AFSI-B': 'AmTrust Financial Services, Inc.',
'CAI': 'CAI International, Inc.',
'SKUL': 'Skullcandy, Inc.',
'MOS': 'Mosaic Company (The)',
'FLAG': 'WeatherStorm Forensic Accounting Long Short ETF',
'IBKC': 'IBERIABANK Corporation',
'CGNT': 'Cogentix Medical, Inc.',
'CSWC': 'Capital Southwest Corporation',
'AJX': 'Great Ajax Corp.',
'EDR': 'Education Realty Trust Inc.',
'PSA-V': 'Public Storage',
'PTLA': 'Portola Pharmaceuticals, Inc.',
'ISP': 'ING Group, N.V.',
'FSP': 'Franklin Street Properties Corp.',
'INTT': 'inTest Corporation',
'WU': 'Western Union Company (The)',
'SDR': 'SandRidge Mississippian Trust II',
'ACAD': 'ACADIA Pharmaceuticals Inc.',
'HVT': 'Haverty Furniture Companies, Inc.',
'ELEC': 'Electrum Special Acquisition Corporation',
'BBH': 'VanEck Vectors Biotech ETF',
'WTS': 'Watts Water Technologies, Inc.',
'VLGEA': 'Village Super Market, Inc.',
'DPRX': 'Dipexium Pharmaceuticals, Inc.',
'NJV': 'Nuveen New Jersey Municipal Value Fund',
'GPACW': 'Global Partner Acquisition Corp.',
'FLXS': 'Flexsteel Industries, Inc.',
'NQM': 'Nuveen Investment Quality Municipal Fund, Inc.',
'GMAN': 'Gordmans Stores, Inc.',
'OXY': 'Occidental Petroleum Corporation',
'EA': 'Electronic Arts Inc.',
'DEL': 'Deltic Timber Corporation',
'BCLI': 'Brainstorm Cell Therapeutics Inc.',
'PSB-U': 'PS Business Parks, Inc.',
'DPW': 'Digital Power Corporation',
'FEN': 'First Trust Energy Income and Growth Fund',
'HTGZ': 'Hercules Capital, Inc.',
'XRX': 'Xerox Corporation',
'USFD': 'US Foods Holding Corp.',
'MORN': 'Morningstar, Inc.',
'LYB': 'LyondellBasell Industries NV',
'GE': 'General Electric Company',
'LSXMB': 'Liberty Media Corporation',
'LPG': 'Dorian LPG Ltd.',
'CFRX': 'ContraFect Corporation',
'LPNT': 'LifePoint Health, Inc.',
'REGI': 'Renewable Energy Group, Inc.',
'CRDT': 'WisdomTree Strategic Corporate Bond Fund',
'IFV': 'First Trust Dorsey Wright International Focus 5 ETF',
'AGCO': 'AGCO Corporation',
'TDY': 'Teledyne Technologies Incorporated',
'HFBC': 'HopFed Bancorp, Inc.',
'PHX': 'Panhandle Royalty Company',
'BLMT': 'BSB Bancorp, Inc.',
'KERX': 'Keryx Biopharmaceuticals, Inc.',
'MRNS': 'Marinus Pharmaceuticals, Inc.',
'PBIP': 'Prudential Bancorp, Inc.',
'FTAG': 'First Trust Indxx Global Agriculture ETF',
'INTG': 'The Intergroup Corporation',
'CCL': 'Carnival Corporation',
'WPRT': 'Westport Fuel Systems Inc',
'NWBOW': 'Northwest Biotherapeutics, Inc.',
'DAKP': 'Dakota Plains Holdings, Inc.',
'KFN-': 'KKR Financial Holdings LLC',
'OFG-A': 'OFG Bancorp',
'BANC-C': 'Banc of California, Inc.',
'ABT': 'Abbott Laboratories',
'HDSN': 'Hudson Technologies, Inc.',
'RNVAW': 'Rennova Health, Inc.',
'RAI': 'Reynolds American Inc',
'SGRP': 'SPAR Group, Inc.',
'ACIW': 'ACI Worldwide, Inc.',
'SRVA': 'SIRVA, Inc.',
'PSX': 'Phillips 66',
'IKNX': 'Ikonics Corporation',
'XOMA': 'XOMA Corporation',
'SXI': 'Standex International Corporation',
'MTLS': 'Materialise NV',
'WAIR': 'Wesco Aircraft Holdings, Inc.',
'GSBC': 'Great Southern Bancorp, Inc.',
'PNC': 'PNC Financial Services Group, Inc. (The)',
'FWP': 'Forward Pharma A/S',
'FFIC': 'Flushing Financial Corporation',
'EDAP': 'EDAP TMS S.A.',
'GBSN': 'Great Basin Scientific, Inc.',
'DO': 'Diamond Offshore Drilling, Inc.',
'VG': 'Vonage Holdings Corp.',
'ZAGG': 'ZAGG Inc',
'AA': 'Alcoa Inc.',
'LVLT': 'Level 3 Communications, Inc.',
'BHE': 'Benchmark Electronics, Inc.',
'DLR': 'Digital Realty Trust, Inc.',
'RATE': 'Bankrate, Inc.',
'DRAM': 'Dataram Corporation',
'MERC': 'Mercer International Inc.',
'XNCR': 'Xencor, Inc.',
'APPY': 'Venaxis, Inc.',
'GAM-B': 'General American Investors, Inc.',
'SOHOL': 'Sotherly Hotels LP',
'CSX': 'CSX Corporation',
'APC': 'Anadarko Petroleum Corporation',
'CBS.A': 'CBS Corporation',
'NYMTP': 'New York Mortgage Trust, Inc.',
'BRID': 'Bridgford Foods Corporation',
'COR-A': 'CoreSite Realty Corporation',
'EFR': 'Eaton Vance Senior Floating-Rate Fund',
'ARII': 'American Railcar Industries, Inc.',
'CNSL': 'Consolidated Communications Holdings, Inc.',
'GDV': 'Gabelli Dividend',
'EAE': 'Entergy Arkansas, Inc.',
'IDRA': 'Idera Pharmaceuticals, Inc.',
'KRNT': 'Kornit Digital Ltd.',
'PAA': 'Plains All American Pipeline, L.P.',
'UREE': 'U.S. Rare Earths, Inc.',
'TAST': 'Carrols Restaurant Group, Inc.',
'GER': 'Goldman Sachs MLP Energy Renaissance Fund',
'OCLSW': 'Oculus Innovative Sciences, Inc.',
'HLS.WS': 'HealthSouth Corporation',
'SMED': 'Sharps Compliance Corp',
'POST': 'Post Holdings, Inc.',
'NNC': 'Nuveen North Carolina Premium Income Municipal Fund',
'RST': 'Rosetta Stone',
'WFC-P': 'Wells Fargo & Company',
'KSS': 'Kohl\'s Corporation',
'RE': 'Everest Re Group, Ltd.',
'EGLT': 'Egalet Corporation',
'NRCIA': 'National Research Corporation',
'BGT': 'Blackrock Global',
'CXP': 'Columbia Property Trust, Inc.',
'TTWO': 'Take-Two Interactive Software, Inc.',
'GLOG-A': 'GasLog LP.',
'SMG': 'Scotts Miracle-Gro Company (The)',
'TATT': 'TAT Technologies Ltd.',
'STS': 'Supreme Industries, Inc.',
'AIII': 'ACRE Realty Investors, Inc.',
'FCA': 'First Trust China AlphaDEX Fund',
'PCG-A': 'Pacific Gas & Electric Co.',
'RIF': 'RMR Real Estate Income Fund',
'ZTR': 'Zweig Total Return Fund, Inc. (The)',
'DWA': 'Dreamworks Animation SKG, Inc.',
'FT': 'Franklin Universal Trust',
'CID': 'Victory CEMP International High Div Volatility Wtd Index ETF',
'RBA': 'Ritchie Bros. Auctioneers Incorporated',
'XTNT': 'Xtant Medical Holdings, Inc.',
'GLW': 'Corning Incorporated',
'ARCI': 'Appliance Recycling Centers of America, Inc.',
'TSO': 'Tesoro Corporation',
'FCBC': 'First Community Bancshares, Inc.',
'NSPR': 'InspireMD, Inc.',
'FCTY': '1st Century Bancshares, Inc',
'DAX': 'Recon Capital DAX Germany ETF',
'USBI': 'United Security Bancshares, Inc.',
'CNLMW': 'CB Pharma Acquisition Corp.',
'NAKD': 'Naked Brand Group Inc.',
'HCACU': 'Hennessy Capital Acquisition Corp. II',
'CMG': 'Chipotle Mexican Grill, Inc.',
'CEF': 'Central Fund of Canada Limited',
'EUFN': 'iShares MSCI Europe Financials Sector Index Fund',
'DTEA': 'DAVIDsTEA Inc.',
'OVBC': 'Ohio Valley Banc Corp.',
'DX-A': 'Dynex Capital, Inc.',
'ODC': 'Oil-Dri Corporation Of America',
'TMST': 'Timken Steel Corporation',
'JPW': 'Nuveen Flexible Investment Income Fund',
'RTRX': 'Retrophin, Inc.',
'MFA': 'MFA Financial, Inc.',
'PVTB': 'PrivateBancorp, Inc.',
'MGT': 'MGT Capital Investments Inc',
'FATE': 'Fate Therapeutics, Inc.',
'VEDL': 'Vedanta Limited',
'OTEX': 'Open Text Corporation',
'ESPR': 'Esperion Therapeutics, Inc.',
'BLW': 'Citigroup Inc.',
'CABO': 'Cable One, Inc.',
'HCP': 'HCP, Inc.',
'AKG': 'Asanko Gold Inc.',
'ACAS': 'American Capital, Ltd.',
'THW': 'Tekla World Healthcare Fund',
'TDIV': 'First Trust NASDAQ Technology Dividend Index Fund',
'HCLP': 'Hi-Crush Partners LP',
'FAC': 'First Acceptance Corporation',
'MTSC': 'MTS Systems Corporation',
'PSCD': 'PowerShares S&P SmallCap Consumer Discretionary Portfolio',
'ARWAR': 'Arowana Inc.',
'CBMX': 'CombiMatrix Corporation',
'NCMI': 'National CineMedia, Inc.',
'IBOC': 'International Bancshares Corporation',
'CIZN': 'Citizens Holding Company',
'PFSW': 'PFSweb, Inc.',
'FCS': 'Fairchild Semiconductor International, Inc.',
'USLB': 'PowerShares Russell 1000 Low Beta Equal Weight Portfolio',
'IFEU': 'iShares FTSE EPRA/NAREIT Europe Index Fund',
'PDLI': 'PDL BioPharma, Inc.',
'MPLX': 'MPLX LP',
'TEAM': 'Atlassian Corporation Plc',
'KEM': 'Kemet Corporation',
'BANC': 'Banc of California, Inc.',
'SJW': 'SJW Corporation',
'VECO': 'Veeco Instruments Inc.',
'APLP': 'Archrock Partners, L.P.',
'RES': 'RPC, Inc.',
'ZB-G': 'Zions Bancorporation',
'PTIE': 'Pain Therapeutics',
'MCRB': 'Seres Therapeutics, Inc.',
'RRMS': 'Rose Rock Midstream, L.P.',
'FET': 'Forum Energy Technologies, Inc.',
'OXGN': 'OXiGENE, Inc.',
'OGS': 'ONE Gas, Inc.',
'CBM': 'Cambrex Corporation',
'CEMI': 'Chembio Diagnostics, Inc.',
'GALE': 'Galena Biopharma, Inc.',
'AMT': 'American Tower Corporation (REIT)',
'ESD': 'Western Asset Emerging Markets Debt Fund Inc',
'NAVG': 'The Navigators Group, Inc.',
'KWN': 'Kennedy-Wilson Holdings Inc.',
'ABM': 'ABM Industries Incorporated',
'NZH': 'Nuveen California Dividend Advantage Municipal Fund 3',
'REXR': 'Rexford Industrial Realty, Inc.',
'HAR': 'Harman International Industries, Incorporated',
'AVT': 'Avnet, Inc.',
'FNGN': 'Financial Engines, Inc.',
'CSGS': 'CSG Systems International, Inc.',
'IDTI': 'Integrated Device Technology, Inc.',
'TY-': 'Tri Continental Corporation',
'ADXS': 'Advaxis, Inc.',
'CLNY': 'Colony Capital, Inc',
'KORS': 'Michael Kors Holdings Limited',
'SAMG': 'Silvercrest Asset Management Group Inc.',
'FCFP': 'First Community Financial Partners, Inc.',
'JCOM': 'j2 Global, Inc.',
'GHC': 'Graham Holdings Company',
'SPGI': 'S&P Global Inc.',
'DTSI': 'DTS, Inc.',
'WEB': 'Web.com Group, Inc.',
'INT': 'World Fuel Services Corporation',
'SBAC': 'SBA Communications Corporation',
'LAWS': 'Lawson Products, Inc.',
'HON': 'Honeywell International Inc.',
'BATRK': 'Liberty Media Corporation',
'EXAC': 'Exactech, Inc.',
'LND': 'Brasilagro Cia Brasileira De Propriedades Agricolas',
'ROYT': 'Pacific Coast Oil Trust',
'WGL': 'WGL Holdings Inc',
'NTL': 'Nortel Inversora SA',
'ATNM': 'Actinium Pharmaceuticals, Inc.',
'GLT': 'Glatfelter',
'LUV': 'Southwest Airlines Company',
'RHI': 'Robert Half International Inc.',
'OPGNW': 'OpGen, Inc.',
'THST': 'Truett-Hurst, Inc.',
'AM': 'Antero Midstream Partners LP',
'AEM': 'Agnico Eagle Mines Limited',
'ESND': 'Essendant Inc.',
'HTM': 'U.S. Geothermal Inc.',
'IGOV': 'iShares S&P/Citigroup International Treasury Bond Fund',
'LXFR': 'Luxfer Holdings PLC',
'CRME': 'Cardiome Pharma Corporation',
'HCJ': 'HCI Group, Inc.',
'WIW': 'Western Asset/Claymore U.S Treasury Inflation Prot Secs Fd 2',
'CNA': 'CNA Financial Corporation',
'ECACR': 'E-compass Acquisition Corp.',
'WLTW': 'Willis Towers Watson Public Limited Company',
'GRSH': 'Gores Holdings, Inc.',
'GSAT': 'Globalstar, Inc.',
'GROW': 'U.S. Global Investors, Inc.',
'KFS': 'Kingsway Financial Services, Inc.',
'IMKTA': 'Ingles Markets, Incorporated',
'ROK': 'Rockwell Automation, Inc.',
'AITP': 'Advanced Inhalation Therapies (AIT) Ltd.',
'ATAX': 'America First Multifamily Investors, L.P.',
'SWK': 'Stanley Black & Decker, Inc.',
'WEC': 'WEC Energy Group, Inc.',
'EVDY': 'Everyday Health, Inc.',
'CGEN': 'Compugen Ltd.',
'WRB-C': 'W.R. Berkley Corporation',
'LEU': 'Centrus Energy Corp.',
'HI': 'Hillenbrand Inc',
'KMB': 'Kimberly-Clark Corporation',
'FNB-E': 'F.N.B. Corporation',
'CLCT': 'Collectors Universe, Inc.',
'DCOM': 'Dime Community Bancshares, Inc.',
'DRYS': 'DryShips Inc.',
'CAC': 'Camden National Corporation',
'NCZ': 'AllianzGI Convertible & Income Fund II',
'BNSO': 'Bonso Electronics International, Inc.',
'N': 'Netsuite Inc',
'CNQ': 'Canadian Natural Resources Limited',
'VBIV': 'VBI Vaccines, Inc.',
'MMD': 'MainStay DefinedTerm Municipal Opportunities Fund',
'MTB': 'M&T Bank Corporation',
'MUJ': 'Blackrock MuniHoldings New Jersey Insured Fund, Inc.',
'FITS': 'The Health and Fitness ETF',
'FCO': 'Aberdeen Global Income Fund, Inc.',
'ATSG': 'Air Transport Services Group, Inc',
'GYC': 'Corporate Asset Backed Corp CABCO',
'BHL': 'Blackrock Defined Opportunity Credit Trust',
'WEBK': 'Wellesley Bancorp, Inc.',
'ACWI': 'iShares MSCI ACWI Index Fund',
'BPFH': 'Boston Private Financial Holdings, Inc.',
'CECE': 'CECO Environmental Corp.',
'CLRO': 'ClearOne, Inc.',
'CCRC': 'China Customer Relations Centers, Inc.',
'USMD': 'USMD Holdings, Inc.',
'ONCS': 'OncoSec Medical Incorporated',
'CIF': 'Colonial Intermediate High Income Fund',
'ALX': 'Alexander\'s, Inc.',
'NCBS': 'Nicolet Bankshares Inc.',
'DRWI': 'DragonWave Inc',
'ISLE': 'Isle of Capri Casinos, Inc.',
'TOL': 'Toll Brothers Inc.',
'NCA': 'Nuveen California Municipal Value Fund, Inc.',
'TSLA': 'Tesla Motors, Inc.',
'CRS': 'Carpenter Technology Corporation',
'WUBA': '58.com Inc.',
'VGLT': 'Vanguard Long-Term Government Bond ETF',
'NQP': 'Nuveen Pennsylvania Investment Quality Municipal Fund, Inc.',
'EPRS': 'EPIRUS Biopharmaceuticals, Inc.',
'TYC': 'Tyco International plc',
'ISD': 'Prudential Short Duration High Yield Fund, Inc.',
'EL': 'Estee Lauder Companies, Inc. (The)',
'VMI': 'Valmont Industries, Inc.',
'IZEA': 'IZEA Inc.',
'CSIQ': 'Canadian Solar Inc.',
'CBB-B': 'Cincinnati Bell Inc',
'MLSS': 'Milestone Scientific, Inc.',
'DNBF': 'DNB Financial Corp',
'NKTR': 'Nektar Therapeutics',
'CYHHZ': 'Community Health Systems, Inc.',
'IAF': 'Aberdeen Australia Equity Fund Inc',
'JAX': 'J. Alexander\'s Holdings, Inc.',
'DMLP': 'Dorchester Minerals, L.P.',
'DANG': 'E-Commerce China Dangdang Inc.',
'DISH': 'DISH Network Corporation',
'AGNC': 'American Capital Agency Corp.',
'FLEX': 'Flextronics International Ltd.',
'MRVC': 'MRV Communications, Inc.',
'BKCC': 'BlackRock Capital Investment Corporation',
'RAX': 'Rackspace Hosting, Inc',
'SRE': 'Sempra Energy',
'TSU': 'TIM Participacoes S.A.',
'HLTH': 'Nobilis Health Corp.',
'VTWV': 'Vanguard Russell 2000 Value ETF',
'PRIM': 'Primoris Services Corporation',
'MXPT': 'MaxPoint Interactive, Inc.',
'AGM-A': 'Federal Agricultural Mortgage Corporation',
'RVSB': 'Riverview Bancorp Inc',
'VIIX': 'region',
'AMSC': 'American Superconductor Corporation',
'AZZ': 'AZZ Inc.',
'JAKK': 'JAKKS Pacific, Inc.',
'CWST': 'Casella Waste Systems, Inc.',
'DGICA': 'Donegal Group, Inc.',
'MIN': 'MFS Intermediate Income Trust',
'CMCSA': 'Comcast Corporation',
'LINK': 'Interlink Electronics, Inc.',
'DXR': 'Daxor Corporation',
'IFF': 'Internationa Flavors & Fragrances, Inc.',
'BAH': 'Booz Allen Hamilton Holding Corporation',
'HNP': 'Huaneng Power International, Inc.',
'HELE': 'Helen of Troy Limited',
'SUNS': 'Solar Senior Capital Ltd.',
'MASI': 'Masimo Corporation',
'MCN': 'Madison Covered Call & Equity Strategy Fund',
'VMEM': 'Violin Memory, Inc.',
'AMH-B': 'American Homes 4 Rent',
'RCII': 'Rent-A-Center Inc.',
'HIO': 'Western Asset High Income Opportunity Fund, Inc.',
'KSM': 'Scudder Strategic Municiple Income Trust',
'PW-A': 'Power REIT',
'WFBI': 'WashingtonFirst Bankshares Inc',
'IDE': 'Voya Infrastructure, Industrials and Materials Fund',
'AXE': 'Anixter International Inc.',
'TOO-B': 'Teekay Offshore Partners L.P.',
'HMPR': 'Hampton Roads Bankshares Inc',
'BHACW': 'Barington/Hilco Acquisition Corp.',
'FGEN': 'FibroGen, Inc',
'FNJN': 'Finjan Holdings, Inc.',
'STV': 'China Digital TV Holding Co., Ltd.',
'AT': 'Atlantic Power Corporation',
'PMBC': 'Pacific Mercantile Bancorp',
'DYN-A': 'Dynegy Inc.',
'SYNT': 'Syntel, Inc.',
'INGN': 'Inogen, Inc',
'JNS': 'Janus Capital Group, Inc',
'TFSL': 'TFS Financial Corporation',
'MOG.B': 'Moog Inc.',
'FIVE': 'Five Below, Inc.',
'IEC': 'IEC Electronics Corp.',
'ITIC': 'Investors Title Company',
'CLLS': 'Cellectis S.A.',
'PLNT': 'Planet Fitness, Inc.',
'Q': 'Quintiles Transitional Holdings Inc.',
'CCD': 'Calamos Dynamic Convertible & Income Fund',
'RMBS': 'Rambus, Inc.',
'FRC-F': 'FIRST REPUBLIC BANK',
'SBRA': 'Sabra Healthcare REIT, Inc.',
'NEWM': 'New Media Investment Group Inc.',
'PCLN': 'The Priceline Group Inc. ',
'EAT': 'Brinker International, Inc.',
'INNL': 'Innocoll Holdings',
'FHY': 'First Trust Strategic High Income Fund II',
'ERI': 'Eldorado Resorts, Inc.',
'SBI': 'Western Asset Intermediate Muni Fund Inc',
'EXPO': 'Exponent, Inc.',
'STAG': 'Stag Industrial, Inc.',
'ACV': 'AllianzGI Diversified Income & Convertible Fund',
'VMO': 'Invesco Municipal Opportunity Trust',
'JFR': 'Nuveen Floating Rate Income Fund',
'SPCB': 'SuperCom, Ltd.',
'ABG': 'Asbury Automotive Group Inc',
'CQH': 'Cheniere Energy Partners LP Holdings, LLC',
'RDHL': 'Redhill Biopharma Ltd.',
'BP': 'BP p.l.c.',
'SAH': 'Sonic Automotive, Inc.',
'MGEE': 'MGE Energy Inc.',
'EPC': 'Energizer Holdings, Inc.',
'WFC.WS': 'Wells Fargo & Company',
'AFT': 'Apollo Senior Floating Rate Fund Inc.',
'CMC': 'Commercial Metals Company',
'FKU': 'First Trust United Kingdom AlphaDEX Fund',
'SYX': 'Systemax Inc.',
'AREX': 'Approach Resources Inc.',
'BIO': 'Bio-Rad Laboratories, Inc.',
'TRCO': 'Tribune Media Company',
'PCN': 'Pimco Corporate & Income Stategy Fund',
'APF': 'Morgan Stanley Asia-Pacific Fund, Inc.',
'EOCC': 'Empresa Nacional de Electricidad S.A.',
'UTEK': 'Ultratech, Inc.',
'SNSS': 'Sunesis Pharmaceuticals, Inc.',
'MVT': 'Blackrock MuniVest Fund II, Inc.',
'EPR-E': 'EPR Properties',
'OFS': 'OFS Capital Corporation',
'ABCD': 'Cambium Learning Group, Inc.',
'SPU': 'SkyPeople Fruit Juice, Inc.',
'PDBC': 'PowerShares DB Optimum Yield Diversified Commodity Strategy Po',
'ETG': 'Eaton Vance Tax-Advantaged Global Dividend Income Fund',
'RBS-L': 'Royal Bank Scotland plc (The)',
'JRVR': 'James River Group Holdings, Ltd.',
'TAYD': 'Taylor Devices, Inc.',
'AWX': 'Avalon Holdings Corporation',
'DOC': 'Physicians Realty Trust',
'FBMS': 'The First Bancshares, Inc.',
'BATRR': 'Liberty Media Corporation',
'GD': 'General Dynamics Corporation',
'SPLK': 'Splunk Inc.',
'NWLI': 'National Western Life Group, Inc.',
'GOODM': 'Gladstone Commercial Corporation',
'CRTO': 'Criteo S.A.',
'PRFT': 'Perficient, Inc.',
'CLMS': 'Calamos Asset Management, Inc.',
'JPEP': 'JP Energy Partners LP',
'CGA': 'China Green Agriculture, Inc.',
'BXE': 'Bellatrix Exploration Ltd',
'BERY': 'BPC Acquisition Corp',
'AIRI': 'Air Industries Group',
'PFE': 'Pfizer, Inc.',
'SBFG': 'SB Financial Group, Inc.',
'PAGP': 'Plains Group Holdings, L.P.',
'ZGNX': 'Zogenix, Inc.',
'EVLMC': 'Eaton Vance NextShares Trust II',
'GDL-B': 'The GDL Fund',
'SWC': 'Stillwater Mining Company',
'AXR': 'AMREP Corporation',
'PTI': 'Proteostasis Therapeutics, Inc.',
'HDP': 'Hortonworks, Inc.',
'MTGEP': 'American Capital Mortgage Investment Corp.',
'NRF-B': 'Northstar Realty Finance Corp.',
'BDJ': 'Blackrock Enhanced Equity Dividend Trust',
'FEIC': 'FEI Company',
'APDN': 'Applied DNA Sciences Inc',
'FRPT': 'Freshpet, Inc.',
'REI': 'Ring Energy, Inc.',
'SNE': 'Sony Corp Ord',
'LINC': 'Lincoln Educational Services Corporation',
'NBH': 'Neuberger Berman Intermediate Municipal Fund Inc.',
'AA-B': 'Alcoa Inc.',
'FALC': 'FalconStor Software, Inc.',
'EXP': 'Eagle Materials Inc',
'NBW': 'Neuberger Berman California Intermediate Municipal Fund Inc.',
'MKTO': 'Marketo, Inc.',
'ERJ': 'Embraer-Empresa Brasileira de Aeronautica',
'GDO': 'Western Asset Global Corporate Defined Opportunity Fund Inc.',
'AKTX': 'Akari Therapeutics Plc',
'EVT': 'Eaton Vance Tax Advantaged Dividend Income Fund',
'ITRN': 'Ituran Location and Control Ltd.',
'WBS-E': 'Webster Financial Corporation',
'EXK': 'Endeavour Silver Corporation',
'ELU': 'Entergy Louisiana, Inc.',
'GK': 'G&K Services, Inc.',
'IMS': 'IMS Health Holdings, Inc.',
'STI.WS.B': 'SunTrust Banks, Inc.',
'URRE': 'Uranium Resources, Inc.',
'INXN': 'InterXion Holding N.V.',
'FIVN': 'Five9, Inc.',
'SNHO': 'Senior Housing Properties Trust',
'INSY': 'Insys Therapeutics, Inc.',
'DDR-K': 'DDR Corp.',
'UBS': 'UBS AG',
'GEN ': 'Genesis Healthcare, Inc.',
'BBT-F': 'BB&T Corporation',
'RDY': 'Dr. Reddy\'s Laboratories Ltd',
'FRBA': 'First Bank',
'HMST': 'HomeStreet, Inc.',
'CEM': 'ClearBridge Energy MLP Fund Inc.',
'AFSI-D': 'AmTrust Financial Services, Inc.',
'JBLU': 'JetBlue Airways Corporation',
'AMH': 'American Homes 4 Rent',
'MPV': 'Babson Capital Participation Investors',
'KTN': 'Lehman ABS Corporation',
'JGV': 'Nuveen Global Equity Income Fund ',
'DXCM': 'DexCom, Inc.',
'EXPR': 'Express, Inc.',
'IPDN': 'Professional Diversity Network, Inc.',
'ATRC': 'AtriCure, Inc.',
'AKAM': 'Akamai Technologies, Inc.',
'HMHC': 'Houghton Mifflin Harcourt Company',
'ENH-C': 'Endurance Specialty Holdings Ltd',
'PEB': 'Pebblebrook Hotel Trust',
'RVP': 'Retractable Technologies, Inc.',
'TUBE': 'TubeMogul, Inc.',
'GENC': 'Gencor Industries Inc.',
'CIFC': 'CIFC LLC',
'AKS': 'AK Steel Holding Corporation',
'GPRE': 'Green Plains, Inc.',
'IBUY': 'Amplify Online Retail ETF',
'MTL': 'Mechel OAO',
'HBHCL': 'Hancock Holding Company',
'SSS': 'Sovran Self Storage, Inc.',
'HMG': 'HMG/Courtland Properties, Inc.',
'EFT': 'Eaton Vance Floating Rate Income Trust',
'SREV': 'ServiceSource International, Inc.',
'NRK': 'Nuveen New York AMT-Free Municipal Income Fund',
'EVSTC': 'Eaton Vance NextShares Trust',
'BOKF': 'BOK Financial Corporation',
'IPXL': 'Impax Laboratories, Inc.',
'MTH': 'Meritage Corporation',
'MCX': 'Medley Capital Corporation',
'NAC': 'Nuveen California Dividend Advantage Municipal Fund',
'KTOV': 'Kitov Pharamceuticals Holdings Ltd.',
'CTR': 'ClearBridge Energy MLP Total Return Fund Inc.',
'REV': 'Revlon, Inc.',
'BAM$': 'Brookfield Asset Management Inc',
'HABT': 'The Habit Restaurants, Inc.',
'EBMT': 'Eagle Bancorp Montana, Inc.',
'PAH': 'Platform Specialty Products Corporation',
'RMGN': 'RMG Networks Holding Corporation',
'HEAR': 'Turtle Beach Corporation',
'LNKD': 'LinkedIn Corporation',
'ELECW': 'Electrum Special Acquisition Corporation',
'BCS-D': 'Barclays PLC',
'MYGN': 'Myriad Genetics, Inc.',
'EVR': 'Evercore Partners Inc',
'AQMS': 'Aqua Metals, Inc.',
'NKSH': 'National Bankshares, Inc.',
'MRKT': 'Markit Ltd.',
'TUP': 'Tupperware Brands Corporation',
'COMM': 'CommScope Holding Company, Inc.',
'BCS': 'Barclays PLC',
'EQY': 'Equity One, Inc.',
'OIIM': 'O2Micro International Limited',
'DSL': 'DoubleLine Income Solutions Fund',
'NIQ': 'Nuveenn Intermediate Duration Quality Municipal Term Fund',
'ADXSW': 'Advaxis, Inc.',
'MDVN': 'Medivation, Inc.',
'ODFL': 'Old Dominion Freight Line, Inc.',
'SGF': 'Aberdeen Singapore Fund, Inc.',
'CHT': 'Chunghwa Telecom Co., Ltd.',
'BXP-B': 'Boston Properties, Inc.',
'AWK': 'American Water Works',
'WYN': 'Wyndham Worldwide Corp',
'MDGN': 'Medgenics, Inc.',
'TRQ': 'Turquoise Hill Resources Ltd.',
'AXS-C': 'Axis Capital Holdings Limited',
'GRX-A': 'The Gabelli Healthcare & Wellness Trust',
'SCG': 'Scana Corporation',
'BML-G': 'Bank of America Corporation',
'SAN': 'Banco Santander, S.A.',
'CHEK': 'Check-Cap Ltd.',
'AGM-C': 'Federal Agricultural Mortgage Corporation',
'RZA': 'Reinsurance Group of America, Incorporated',
'AET': 'Aetna Inc.',
'KTF': 'Scudder Municiple Income Trust',
'SATS': 'EchoStar Corporation',
'WHF': 'WhiteHorse Finance, Inc.',
'PBHC': 'Pathfinder Bancorp, Inc.',
'VSI': 'Vitamin Shoppe, Inc',
'DOOR': 'Masonite International Corporation',
'NMIH': 'NMI Holdings Inc',
'XXIA': 'Ixia',
'PEI-A': 'Pennsylvania Real Estate Investment Trust',
'EBSB': 'Meridian Bancorp, Inc.',
'ERIE': 'Erie Indemnity Company',
'DWIN': 'PowerShares DWA Tactical Multi-Asset Income Portfolio',
'TLP': 'Transmontaigne Partners L.P.',
'PKOH': 'Park-Ohio Holdings Corp.',
'CMCT': 'CIM Commercial Trust Corporation',
'FBRC': 'FBR & Co',
'PNC.WS': 'PNC Financial Services Group, Inc. (The)',
'OOMA': 'Ooma, Inc.',
'ANIK': 'Anika Therapeutics Inc.',
'ERC': 'Wells Fargo Multi-Sector Income Fund',
'JBK': 'Lehman ABS Corporation',
'CCFI': 'Community Choice Financial Inc.',
'BMY': 'Bristol-Myers Squibb Company',
'JMLP': 'Nuveen All Cap Energy MLP Opportunities Fund',
'EMAN': 'eMagin Corporation',
'UTX': 'United Technologies Corporation',
'BAM': 'Brookfield Asset Management Inc',
'ESTE': 'Earthstone Energy, Inc.',
'HSNI': 'HSN, Inc.',
'VTN': 'Invesco Trust for Investment Grade New York Municipal',
'POT': 'Potash Corporation of Saskatchewan Inc.',
'BGCA': 'BGC Partners, Inc.',
'MITT-A': 'AG Mortgage Investment Trust, Inc.',
'BHP': 'BHP Billiton Limited',
'CETC': 'Hongli Clean Energy Technologies Corp.',
'PACD': 'Pacific Drilling S.A.',
'CAPL': 'CrossAmerica Partners LP',
'CUK': 'Carnival Corporation',
'FTF': 'Franklin Limited Duration Income Trust',
'DIT': 'AMCON Distributing Company',
'PII': 'Polaris Industries Inc.',
'SAN-I': 'Banco Santander, S.A.',
'LOR': 'Lazard World Dividend & Income Fund, Inc.',
'LRCX': 'Lam Research Corporation',
'HTGC': 'Hercules Capital, Inc.',
'AMEH': 'Apollo Medical Holdings, Inc.',
'JMT': 'Nuven Mortgage Opportunity Term Fund 2',
'LPX': 'Louisiana-Pacific Corporation',
'DTUS': 'region',
'VRSN': 'VeriSign, Inc.',
'CPE-A': 'Callon Petroleum Company',
'FH': 'FORM Holdings Corp.',
'RKDA': 'Arcadia Biosciences, Inc.',
'WSO': 'Watsco, Inc.',
'PG': 'Procter & Gamble Company (The)',
'MTEX': 'Mannatech, Incorporated',
'EFC': 'Ellington Financial LLC',
'JVA': 'Coffee Holding Co., Inc.',
'XIV': 'region',
'CAFD': '8point3 Energy Partners LP',
'ANDAR': 'Andina Acquisition Corp. II',
'MSEX': 'Middlesex Water Company',
'TOWR': 'Tower International, Inc.',
'FUND': 'Sprott Focus Trust, Inc.',
'FDML': 'Federal-Mogul Holdings Corporation',
'FTD': 'FTD Companies, Inc.',
'CETV': 'Central European Media Enterprises Ltd.',
'ARWAW': 'Arowana Inc.',
'MKSI': 'MKS Instruments, Inc.',
'XKE': 'Lehman ABS Corporation',
'DVA': 'DaVita healthCare Partners Inc.',
'NGS': 'Natural Gas Services Group, Inc.',
'INVA': 'Innoviva, Inc.',
'PANW': 'Palo Alto Networks, Inc.',
'MAT': 'Mattel, Inc.',
'BGY': 'BLACKROCK INTERNATIONAL, LTD.',
'PNRG': 'PrimeEnergy Corporation',
'ATTU': 'Attunity Ltd.',
'WIT': 'Wipro Limited',
'VUSE': 'Vident Core US Equity ETF',
'PKY': 'Parkway Properties, Inc.',
'HT-C': 'Hersha Hospitality Trust',
'DFVL': 'region',
'ESSA': 'ESSA Bancorp, Inc.',
'MODN': 'Model N, Inc.',
'TKR': 'Timken Company (The)',
'NMT': 'Nuveen Massachusetts Premium Income Municipal Fund',
'SWKS': 'Skyworks Solutions, Inc.',
'CUB': 'Cubic Corporation',
'ARES-A': 'Ares Management L.P.',
'SOFO': 'Sonic Foundry, Inc.',
'PDFS': 'PDF Solutions, Inc.',
'GAIA': 'Gaiam, Inc.',
'HIG.WS': 'Hartford Financial Services Group, Inc. (The)',
'LEI': 'Lucas Energy, Inc.',
'SMCP': 'AlphaMark Actively Managed Small Cap ETF',
'PRK': 'Park National Corporation',
'LEN.B': 'Lennar Corporation',
'CUBA': 'The Herzfeld Caribbean Basin Fund, Inc.',
'PME': 'Pingtan Marine Enterprise Ltd.',
'HUBS': 'HubSpot, Inc.',
'CPG': 'Crescent Point Energy Corporation',
'AME': 'AMTEK, Inc.',
'VZ': 'Verizon Communications Inc.',
'MDU': 'MDU Resources Group, Inc.',
'WNR': 'Western Refining, Inc.',
'CBRL': 'Cracker Barrel Old Country Store, Inc.',
'ESGR': 'Enstar Group Limited',
'AINV': 'Apollo Investment Corporation',
'AST.WS': 'Asterias Biotherapeutics, Inc.',
'SOJA': 'Southern Company (The)',
'PEB-C': 'Pebblebrook Hotel Trust',
'PDEX': 'Pro-Dex, Inc.',
'CEB': 'CEB Inc.',
'RGS': 'Regis Corporation',
'CDXC': 'ChromaDex Corporation',
'CME': 'CME Group Inc.',
'STRL': 'Sterling Construction Company Inc',
'ORM': 'Owens Realty Mortgage, Inc.',
'QRVO': 'Qorvo, Inc.',
'HPI': 'John Hancock Preferred Income Fund',
'NP': 'Neenah Paper, Inc.',
'CYTX': 'Cytori Therapeutics Inc',
'SRCE': '1st Source Corporation',
'EQC': 'Equity Commonwealth',
'HTWR': 'Heartware International, Inc.',
'NTAP': 'NetApp, Inc.',
'JAGX': 'Jaguar Animal Health, Inc.',
'MQY': 'Blackrock MuniYield Quality Fund, Inc.',
'EPR': 'EPR Properties',
'VOYA': 'Voya Financial, Inc.',
'JOE': 'St. Joe Company (The)',
'UWN': 'Nevada Gold & Casinos, Inc.',
'BFIT': 'Global X Health & Wellness Thematic ETF',
'GEF': 'Greif Bros. Corporation',
'LAND': 'Gladstone Land Corporation',
'SMLP': 'Summit Midstream Partners, LP',
'SAND ': 'Sandstorm Gold Ltd',
'BOJA': 'Bojangles\', Inc.',
'TLK': 'PT Telekomunikasi Indonesia, Tbk',
'MTSL': 'MER Telemanagement Solutions Ltd.',
'FOF': 'Cohen & Steers Closed-End Opportunity Fund, Inc.',
'TMK-C': 'Torchmark Corporation',
'SNN': 'Smith & Nephew SNATS, Inc.',
'RF': 'Regions Financial Corporation',
'BUR': 'Burcon Nutrascience Corp',
'PRPH': 'ProPhase Labs, Inc.',
'FL': 'Foot Locker, Inc.',
'NLY-A': 'Annaly Capital Management Inc',
'MNDO': 'MIND C.T.I. Ltd.',
'BKU': 'BankUnited, Inc.',
'BFK': 'BlackRock Municipal Income Trust',
'BVXV': 'BiondVax Pharmaceuticals Ltd.',
'MTG': 'MGIC Investment Corporation',
'NID': 'Nuveen Intermediate Duration Municipal Term Fund',
'LUNA': 'Luna Innovations Incorporated',
'MPC': 'Marathon Petroleum Corporation',
'CXW': 'Corrections Corporation of America',
'NEE-C': 'NextEra Energy, Inc.',
'ESRT': 'Empire State Realty Trust, Inc.',
'ITCI': 'Intra-Cellular Therapies Inc.',
'OFLX': 'Omega Flex, Inc.',
'BBRY': 'BlackBerry Limited',
'MHO': 'M/I Homes, Inc.',
'JACK': 'Jack In The Box Inc.',
'HIBB': 'Hibbett Sports, Inc.',
'TRP': 'TransCanada Corporation',
'CHTR': 'Charter Communications, Inc.',
'NSL': 'Nuveen Senior Income Fund',
'FRME': 'First Merchants Corporation',
'CACI': 'CACI International, Inc.',
'QCOM': 'QUALCOMM Incorporated',
'OSK': 'Oshkosh Corporation',
'BRS': 'Bristow Group Inc',
'DRNA': 'Dicerna Pharmaceuticals, Inc.',
'DGRE': 'WisdomTree Emerging Markets Quality Dividend Growth Fund',
'BTT': 'BlackRock Municipal Target Term Trust Inc. (The)',
'CL': 'Colgate-Palmolive Company',
'AXDX': 'Accelerate Diagnostics, Inc.',
'BR': 'Broadridge Financial Solutions, Inc.',
'SSRG': 'Symmetry Surgical Inc.',
'JW.B': 'John Wiley & Sons, Inc.',
'STLD': 'Steel Dynamics, Inc.',
'WMIH': 'WMIH Corp.',
'MUE': 'Blackrock MuniHoldings Quality Fund II, Inc.',
'MFO': 'MFA Financial, Inc.',
'GJT': 'Synthetic Fixed-Income Securities, Inc.',
'KANG': 'iKang Healthcare Group, Inc.',
'TBK': 'Triumph Bancorp, Inc.',
'FAST': 'Fastenal Company',
'BIT': 'BlackRock Multi-Sector Income Trust',
'IVTY': 'Invuity, Inc.',
'ALLE': 'Allegion plc',
'TTPH': 'Tetraphase Pharmaceuticals, Inc.',
'ELLO': 'Ellomay Capital Ltd.',
'PSA-S': 'Public Storage',
'NRIM': 'Northrim BanCorp Inc',
'CCE': 'Coca-Cola European Partners plc',
'COHU': 'Cohu, Inc.',
'UNH': 'UnitedHealth Group Incorporated',
'EEQ': 'Enbridge Energy Management LLC',
'TRK': 'Speedway Motorsports, Inc.',
'CYBR': 'CyberArk Software Ltd.',
'CHW': 'Calamos Global Dynamic Income Fund',
'VER': 'VEREIT Inc.',
'SBSI': 'Southside Bancshares, Inc.',
'ARKR': 'Ark Restaurants Corp.',
'KR': 'Kroger Company (The)',
'ZDGE': 'Zedge, Inc.',
'GNE-A': 'Genie Energy Ltd.',
'CPF': 'CPB Inc.',
'PSCT': 'PowerShares S&P SmallCap Information Technology Portfolio',
'CUBS': 'Customers Bancorp, Inc',
'QLC': 'FlexShares US Quality Large Cap Index Fund',
'PUK-A': 'Prudential Public Limited Company',
'LTC': 'LTC Properties, Inc.',
'TCO': 'Taubman Centers, Inc.',
'PLAY': 'Dave & Buster\'s Entertainment, Inc.',
'CVI': 'CVR Energy Inc.',
'IPAS': 'iPass Inc.',
'FDT': 'First Trust Developed Markets Ex-US AlphaDEX Fund',
'PIY': 'Merrill Lynch Depositor, Inc.',
'TENX': 'Tenax Therapeutics, Inc.',
'BIOA.WS': 'BioAmber Inc.',
'OMAB': 'Grupo Aeroportuario del Centro Norte S.A.B. de C.V.',
'GLOG': 'GasLog LP.',
'NVCN': 'Neovasc Inc.',
'CAJ': 'Canon, Inc.',
'ETB': 'Eaton Vance Tax-Managed Buy-Write Income Fund',
'TDJ': 'Telephone and Data Systems, Inc.',
'HHS': 'Harte-Hanks, Inc.',
'YIN': 'Yintech Investment Holdings Limited',
'MTU': 'Mitsubishi UFJ Financial Group Inc',
'BBCN': 'BBCN Bancorp, Inc.',
'SSB': 'South State Corporation',
'PWX': 'Providence and Worcester Railroad Company',
'FRC-G': 'FIRST REPUBLIC BANK',
'KOP': 'Koppers Holdings Inc.',
'RITTW': 'RIT Technologies Ltd.',
'ITI': 'Iteris, Inc.',
'HEP': 'Holly Energy Partners, L.P.',
'GBNK': 'Guaranty Bancorp',
'MTOR': 'Meritor, Inc.',
'MOKO': 'Moko Social Media Ltd.',
'TNDM': 'Tandem Diabetes Care, Inc.',
'DDD': '3D Systems Corporation',
'VTIP': 'Vanguard Short-Term Inflation-Protected Securities Index Fund',
'WBAI': '500.com Limited',
'FFC': 'Flaherty & Crumrine Preferred Securities Income Fund Inc',
'SFUN': 'SouFun Holdings Limited',
'RY': 'Royal Bank Of Canada',
'CUBE-A': 'CubeSmart',
'FIG': 'Fortress Investment Group LLC',
'CRVS': 'Corvus Pharmaceuticals, Inc.',
'WYNN': 'Wynn Resorts, Limited',
'MFRM': 'Mattress Firm Holding Corp.',
'LBTYA': 'Liberty Global plc',
'KF': 'Korea Fund, Inc. (The)',
'VXUP': 'AccuShares Spot CBOE VIX Up Shares',
'OIBR.C': 'Oi S.A.',
'TSQ': 'Townsquare Media, Inc.',
'PNK': 'Pinnacle Entertainment, Inc.',
'ZTS': 'Zoetis Inc.',
'CNIT': 'China Information Technology, Inc.',
'YUMA': 'Yuma Energy, Inc.',
'PNC-P': 'PNC Financial Services Group, Inc. (The)',
'NICE': 'NICE-Systems Limited',
'INFU': 'InfuSystems Holdings, Inc.',
'HAL': 'Halliburton Company',
'DRE': 'Duke Realty Corporation',
'YDIV': 'First Trust International Multi-Asset Diversified Income Index',
'BOOM': 'Dynamic Materials Corporation',
'CFFI': 'C&F Financial Corporation',
'SSW': 'Seaspan Corporation',
'GARS': 'Garrison Capital Inc.',
'MVG': 'Mag Silver Corporation',
'RSG': 'Republic Services, Inc.',
'YPF': 'YPF Sociedad Anonima',
'UPLD': 'Upland Software, Inc.',
'RFAP': 'First Trust RiverFront Dynamic Asia Pacific ETF',
'CYD': 'China Yuchai International Limited',
'FCVT': 'First Trust SSI Strategic Convertible Securities ETF',
'SONC': 'Sonic Corp.',
'PVCT': 'Provectus Biopharmaceuticals, Inc.',
'TOWN': 'Towne Bank',
'ADC': 'Agree Realty Corporation',
'JPM-D': 'J P Morgan Chase & Co',
'LAD': 'Lithia Motors, Inc.',
'ARW': 'Arrow Electronics, Inc.',
'SNDE': 'Sundance Energy Australia Limited',
'IRIX': 'IRIDEX Corporation',
'FGL': 'Fidelity and Guaranty Life',
'CLVS': 'Clovis Oncology, Inc.',
'MRIN': 'Marin Software Incorporated',
'IMPR': 'Imprivata, Inc.',
'ATKR': 'Atkore International Group Inc.',
'IDSA': 'Industrial Services of America, Inc.',
'CGG': 'CGG',
'DRRX': 'Durect Corporation',
'MGH': 'Minco Gold Corporation',
'QHC': 'Quorum Health Corporation',
'SRV': 'The Cushing MLP Total Return Fund',
'AGND': 'WisdomTree Barclays U.S. Aggregate Bond Negative Duration Fund',
'SCD': 'LMP Capital and Income Fund Inc.',
'ZIV': 'region',
'ALLY-A': 'Ally Financial Inc.',
'GWRE': 'Guidewire Software, Inc.',
'BIOS': 'BioScrip, Inc.',
'VKI': 'Invesco Advantage Municipal Income Trust II',
'SGBK': 'Stonegate Bank',
'ZEN': 'Zendesk, Inc.',
'COHR': 'Coherent, Inc.',
'JBN': 'Select Asset Inc.',
'ETW': 'Eaton Vance Corporation',
'HZO': 'MarineMax, Inc.',
'BKN': 'BlackRock Investment Quality Municipal Trust Inc. (The)',
'MPCT': 'iShares Sustainable MSCI Global Impact ETF',
'RPAI-A': 'Retail Properties of America, Inc.',
'UHT': 'Universal Health Realty Income Trust',
'KONE': 'Kingtone Wirelessinfo Solution Holding Ltd',
'GXP-D': 'Great Plains Energy Inc',
'ELNK': 'EarthLink Holdings Corp.',
'CLUB': 'Town Sports International Holdings, Inc.',
'AN': 'AutoNation, Inc.',
'NOW': 'ServiceNow, Inc.',
'ALOG': 'Analogic Corporation',
'BKJ': 'Bancorp of New Jersey, Inc',
'ELJ': 'Entergy Louisiana, Inc.',
'VRAY': 'ViewRay, Inc.',
'KRC-H': 'Kilroy Realty Corporation',
'MIFI': 'Novatel Wireless, Inc.',
'WBC': 'Wabco Holdings Inc.',
'VTAE': 'Vitae Pharmaceuticals, Inc.',
'ETRM': 'EnteroMedics Inc.',
'HLIT': 'Harmonic Inc.',
'CTX': 'Qwest Corporation',
'SELB': 'Selecta Biosciences, Inc.',
'FOMX': 'Foamix Pharmaceuticals Ltd.',
'BEN': 'Franklin Resources, Inc.',
'IF': 'Aberdeen Indonesia Fund, Inc.',
'CASS': 'Cass Information Systems, Inc',
'COYN': 'COPsync, Inc.',
'NVAX': 'Novavax, Inc.',
'EGHT': '8x8 Inc',
'HSBC': 'HSBC Holdings plc',
'MITT': 'AG Mortgage Investment Trust, Inc.',
'MTBC': 'Medical Transcription Billing, Corp.',
'BKSC': 'Bank of South Carolina Corp.',
'UTG': 'Reaves Utility Income Fund',
'IMDZ': 'Immune Design Corp.',
'ASPN': 'Aspen Aerogels, Inc.',
'LODE': 'Comstock Mining, Inc.',
'MER-P': 'Merrill Lynch & Co., Inc.',
'IHC': 'Independence Holding Company',
'IBKR': 'Interactive Brokers Group, Inc.',
'FMI': 'Foundation Medicine, Inc.',
'EVAR': 'Lombard Medical, Inc.',
'ABEV': 'Ambev S.A.',
'AYA': 'Amaya Inc.',
'CHI': 'Calamos Convertible Opportunities and Income Fund',
'SCM': 'Stellus Capital Investment Corporation',
'HFC': 'HollyFrontier Corporation',
'JOY': 'Joy Global Inc.',
'PRMW': 'Primo Water Corporation',
'BAC-W': 'Bank of America Corporation',
'XEC': 'Cimarex Energy Co',
'P': 'Pandora Media, Inc.',
'TTC': 'Toro Company (The)',
'EXD': 'Eaton Vance Tax-Advantaged Bond',
'NCOM': 'National Commerce Corporation',
'TFSCR': '1347 Capital Corp.',
'MLP': 'Maui Land & Pineapple Company, Inc.',
'BLUE': 'bluebird bio, Inc.',
'IXYS': 'IXYS Corporation',
'ELB': 'Entergy Louisiana, Inc.',
'NHF': 'NexPoint Credit Stategies Fund',
'MS-I': 'Morgan Stanley',
'UMPQ': 'Umpqua Holdings Corporation',
'VCSH': 'Vanguard Short-Term Corporate Bond ETF',
'EMJ': 'Eaton Vance New Jersey Municipal Bond Fund',
'XPLR': 'Xplore Technologies Corp',
'CYTR': 'CytRx Corporation',
'VIVO': 'Meridian Bioscience Inc.',
'PNTR': 'Pointer Telocation Ltd.',
'WWD': 'Woodward, Inc.',
'CPAH': 'CounterPath Corporation',
'PVG': 'Pretium Resources, Inc.',
'IRT': 'Independence Realty Trust, Inc.',
'FLO': 'Flowers Foods, Inc.',
'AHL': 'Aspen Insurance Holdings Limited',
'MFG': 'Mizuho Financial Group, Inc.',
'DPZ': 'Domino\'s Pizza Inc',
'NUV': 'Nuveen AMT-Free Municipal Value Fund',
'GEL': 'Genesis Energy, L.P.',
'HWAY': 'Healthways, Inc.',
'RLI': 'RLI Corp.',
'GRFS': 'Grifols, S.A.',
'NMFC': 'New Mountain Finance Corporation',
'BSFT': 'BroadSoft, Inc.',
'STI-E': 'SunTrust Banks, Inc.',
'CNK': 'Cinemark Holdings Inc',
'BLIN ': 'Bridgeline Digital, Inc.',
'SHLM': 'A. Schulman, Inc.',
'IDI': 'IDI, Inc.',
'AL': 'Air Lease Corporation',
'SPWH': 'Sportsman\'s Warehouse Holdings, Inc.',
'STT-E': 'State Street Corporation',
'AGO-E': 'Assured Guaranty Ltd.',
'LUB': 'Luby\'s, Inc.',
'GLBZ': 'Glen Burnie Bancorp',
'NCLH': 'Norwegian Cruise Line Holdings Ltd.',
'FRC-A': 'FIRST REPUBLIC BANK',
'ESES': 'Eco-Stim Energy Solutions, Inc.',
'ANGI': 'Angie\'s List, Inc.',
'ELS-C': 'Equity Lifestyle Properties, Inc.',
'DLNG-A': 'Dynagas LNG Partners LP',
'NDSN': 'Nordson Corporation',
'SHLX': 'Shell Midstream Partners, L.P.',
'NHC': 'National HealthCare Corporation',
'HST': 'Host Hotels & Resorts, Inc.',
'BTO': 'John Hancock Financial Opportunities Fund',
'CEVA': 'CEVA, Inc.',
'IBIO': 'iBio, Inc.',
'AIQ': 'Alliance HealthCare Services, Inc.',
'CBT': 'Cabot Corporation',
'CST': 'CST Brands, Inc.',
'COYNW': 'COPsync, Inc.',
'KYO': 'Kyocera Corporation',
'OAKS': 'Five Oaks Investment Corp.',
'TCO-K': 'Taubman Centers, Inc.',
'NNN': 'National Retail Properties',
'JAZZ': 'Jazz Pharmaceuticals plc',
'GCAP': 'GAIN Capital Holdings, Inc.',
'FCCO': 'First Community Corporation',
'EFF': 'Eaton vance Floating-Rate Income Plus Fund',
'OMN': 'OMNOVA Solutions Inc.',
'ACLS': 'Axcelis Technologies, Inc.',
'LOW': 'Lowe\'s Companies, Inc.',
'IART': 'Integra LifeSciences Holdings Corporation',
'CHMI': 'Cherry Hill Mortgage Investment Corporation',
'GAINN': 'Gladstone Investment Corporation',
'JDD': 'Nuveen Diversified Dividend and Income Fund',
'NM': 'Navios Maritime Holdings Inc.',
'AERI': 'Aerie Pharmaceuticals, Inc.',
'TZOO': 'Travelzoo Inc.',
'C-K': 'Citigroup Inc.',
'NEFF': 'Neff Corporation',
'SNHY': 'Sun Hydraulics Corporation',
'PPS': 'Post Properties, Inc.',
'OME': 'Omega Protein Corporation',
'EQT': 'EQT Corporation',
'VSEC': 'VSE Corporation',
'STJ': 'St. Jude Medical, Inc.',
'SNOW': 'Intrawest Resorts Holdings, Inc.',
'OFG-D': 'OFG Bancorp',
'CLAC': 'Capitol Acquisition Corp. III',
'SYT': 'Syngenta AG',
'ZN': 'Zion Oil & Gas Inc',
'VISN': 'VisionChina Media, Inc.',
'VRTB': 'Vestin Realty Mortgage II, Inc.',
'PRSC': 'The Providence Service Corporation',
'CNXC': 'CNX Coal Resources LP',
'CCS': 'Century Communities, Inc.',
'YY': 'YY Inc.',
'QBAK': 'Qualstar Corporation',
'PNRA': 'Panera Bread Company',
'TILE': 'Interface, Inc.',
'DCM': 'NTT DOCOMO, Inc',
'TSRO': 'TESARO, Inc.',
'LAZ': 'Lazard Ltd.',
'CWBC': 'Community West Bancshares',
'NTG': 'Tortoise MLP Fund, Inc.',
'HIIQ': 'Health Insurance Innovations, Inc.',
'OGXI': 'OncoGenex Pharmaceuticals Inc.',
'THC': 'Tenet Healthcare Corporation',
'GSI': 'General Steel Holdings, Inc.',
'CHEKW': 'Check-Cap Ltd.',
'JBHT': 'J.B. Hunt Transport Services, Inc.',
'WSBC': 'WesBanco, Inc.',
'STML': 'Stemline Therapeutics, Inc.',
'MCR': 'MFS Charter Income Trust',
'STR': 'Questar Corporation',
'HBANO': 'Huntington Bancshares Incorporated',
'GCVRZ': 'Sanofi',
'APA': 'Apache Corporation',
'GYB': 'CABCO Series 2004-101 Trust',
'EQM': 'EQT Midstream Partners, LP',
'HTGX': 'Hercules Capital, Inc.',
'ZAYO': 'Zayo Group Holdings, Inc.',
'NNN-D': 'National Retail Properties',
'RZB': 'Reinsurance Group of America, Incorporated',
'PNX': 'Phoenix Companies, Inc. (The)',
'SGNT': 'Sagent Pharmaceuticals, Inc.',
'WK': 'Workiva Inc.',
'FV': 'First Trust Dorsey Wright Focus 5 ETF',
'ACP': 'Avenue Income Credit Strategies Fund',
'BLKB': 'Blackbaud, Inc.',
'NAUH': 'National American University Holdings, Inc.',
'SCE-H': 'Southern California Edison Company',
'PAC': 'Grupo Aeroportuario Del Pacifico, S.A. de C.V.',
'VLY-A': 'Valley National Bancorp',
'AED': 'Aegon NV',
'KLXI': 'KLX Inc.',
'ENB': 'Enbridge Inc',
'ORA': 'Ormat Technologies, Inc.',
'GGAC': 'Garnero Group Acquisition Company',
'AVAV': 'AeroVironment, Inc.',
'MHLD': 'Maiden Holdings, Ltd.',
'KEP': 'Korea Electric Power Corporation',
'USCR': 'U S Concrete, Inc.',
'ASM': 'Avino Silver',
'CFR-A': 'Cullen/Frost Bankers, Inc.',
'AMWD': 'American Woodmark Corporation',
'VCYT': 'Veracyte, Inc.',
'COP': 'ConocoPhillips',
'MHN': 'Blackrock MuniHoldings New York Quality Fund, Inc.',
'HQCL': 'Hanwha Q CELLS Co., Ltd. ',
'OIS': 'Oil States International, Inc.',
'PHF': 'Pacholder High Yield Fund, Inc.',
'FGP': 'Ferrellgas Partners, L.P.',
'AUDC': 'AudioCodes Ltd.',
'QIWI': 'QIWI plc',
'CLBH': 'Carolina Bank Holdings Inc.',
'AMID': 'American Midstream Partners, LP',
'KAP': 'KCAP Financial, Inc.',
'UZA': 'United States Cellular Corporation',
'SBLKL': 'Star Bulk Carriers Corp.',
'PRTA': 'Prothena Corporation plc',
'BAA': 'BANRO CORPORATION',
'HASI': 'Hannon Armstrong Sustainable Infrastructure Capital, Inc.',
'WVVIP': 'Willamette Valley Vineyards, Inc.',
'CCA': 'MFS California Insured Municipal Trust',
'LADR': 'Ladder Capital Corp',
'ATEC': 'Alphatec Holdings, Inc.',
'KMF': 'Kayne Anderson Midstream Energy Fund, Inc',
'UAM': 'Universal American Corp.',
'VRX': 'Valeant Pharmaceuticals International, Inc.',
'ITEQ': 'BlueStar TA-BIGITech Israel Technology ETF',
'RLGT': 'Radiant Logistics, Inc.',
'MARPS': 'Marine Petroleum Trust',
'CSI': 'Cutwater Select Income Fund',
'HGH': 'Hartford Financial Services Group, Inc. (The)',
'SHOO': 'Steven Madden, Ltd.',
'MEG': 'Media General, Inc.',
'MOMO': 'Momo Inc.',
'LJPC': 'La Jolla Pharmaceutical Company',
'RF-A': 'Regions Financial Corporation',
'CHCT': 'Community Healthcare Trust Incorporated',
'CPRT': 'Copart, Inc.',
'CMRE-D': 'Costamare Inc.',
'CSGP': 'CoStar Group, Inc.',
'COWNL': 'Cowen Group, Inc.',
'FHCO': 'Female Health Company (The)',
'CPE': 'Callon Petroleum Company',
'BIB': 'ProShares Ultra Nasdaq Biotechnology',
'OSTK': 'Overstock.com, Inc.',
'NGVT': 'Ingevity Corporation',
'XGTIW': 'XG Technology, Inc',
'OSN': 'Ossen Innovation Co., Ltd.',
'KLAC': 'KLA-Tencor Corporation',
'DLBL': 'region',
'EBAY': 'eBay Inc.',
'ABE ': 'Aberdeen Emerging Markets Smaller Company Opportunities Fund I',
'KMPH': 'KemPharm, Inc.',
'VOC': 'VOC Energy Trust',
'PKI': 'PerkinElmer, Inc.',
'ALQA': 'Alliqua BioMedical, Inc.',
'MLR': 'Miller Industries, Inc.',
'EACQW': 'Easterly Acquisition Corp.',
'BKHU': 'Black Hills Corporation',
'ZBRA': 'Zebra Technologies Corporation',
'NGD': 'NEW GOLD INC.',
'VNRBP': 'Vanguard Natural Resources LLC',
'MOG.A': 'Moog Inc.',
'CCMP': 'Cabot Microelectronics Corporation',
'APU': 'AmeriGas Partners, L.P.',
'SNC': 'State National Companies, Inc.',
'APIC': 'Apigee Corporation',
'FPRX': 'Five Prime Therapeutics, Inc.',
'PHII': 'PHI, Inc.',
'BPL': 'Buckeye Partners L.P.',
'ZX': 'China Zenix Auto International Limited',
'FOE': 'Ferro Corporation',
'TSCO': 'Tractor Supply Company',
'CHA': 'China Telecom Corp Ltd',
'PRTY': 'Party City Holdco Inc.',
'PUMP': 'Asante Solutions, Inc.',
'ZYNE': 'Zynerba Pharmaceuticals, Inc.',
'CRCM': 'Care.com, Inc.',
'IFON': 'InfoSonics Corp',
'NATI': 'National Instruments Corporation',
'R': 'Ryder System, Inc.',
'TSRA': 'Tessera Technologies, Inc.',
'PNR': 'Pentair plc.',
'UNB': 'Union Bankshares, Inc.',
'TBIO': 'Transgenomic, Inc.',
'CTSO': 'Cytosorbents Corporation',
'PFPT': 'Proofpoint, Inc.',
'NYMX': 'Nymox Pharmaceutical Corporation',
'ICA': 'Empresas Ica Soc Contrladora',
'ULTI': 'The Ultimate Software Group, Inc.',
'CART': 'Carolina Trust Bank',
'YRD': 'Yirendai Ltd.',
'IROQ': 'IF Bancorp, Inc.',
'FGM': 'First Trust Germany AlphaDEX Fund',
'CIK': 'Credit Suisse Asset Management Income Fund, Inc.',
'BYBK': 'Bay Bancorp, Inc.',
'LIME': 'Lime Energy Co.',
'FFKT': 'Farmers Capital Bank Corporation',
'LGND': 'Ligand Pharmaceuticals Incorporated',
'NDAQ': 'Nasdaq, Inc.',
'HVT.A': 'Haverty Furniture Companies, Inc.',
'SAN-B': 'Banco Santander, S.A.',
'HNW': 'Pioneer Diversified High Income Trust',
'ULTA': 'Ulta Salon, Cosmetics & Fragrance, Inc.',
'SLQD': 'iShares 0-5 Year Investment Grade Corporate Bond ETF',
'CUBI-C': 'Customers Bancorp, Inc',
'EAGLU': 'Double Eagle Acquisition Corp.',
'DWTR': 'PowerShares DWA Tactical Sector Rotation Portfolio',
'KYE': 'Kayne Anderson Energy Total Return Fund, Inc.',
'WAGE': 'WageWorks, Inc.',
'BEL': 'Belmond Ltd.',
'MTGE': 'American Capital Mortgage Investment Corp.',
'PTSI': 'P.A.M. Transportation Services, Inc.',
'RYAM': 'Rayonier Advanced Materials Inc.',
'PSCE': 'PowerShares S&P SmallCap Energy Portfolio',
'SE': 'Spectra Energy Corp',
'FMD': 'First Marblehead Corporation (The)',
'NTRA': 'Natera, Inc.',
'EIG': 'Employers Holdings Inc',
'PFBI': 'Premier Financial Bancorp, Inc.',
'VNO-J': 'Vornado Realty Trust',
'BREW': 'Craft Brew Alliance, Inc.',
'OSHC': 'Ocean Shore Holding Co.',
'SILC': 'Silicom Ltd',
'RSO-A': 'Resource Capital Corp.',
'PNM': 'PNM Resources, Inc. (Holding Co.)',
'FSBK': 'First South Bancorp Inc',
'NNN-E': 'National Retail Properties',
'HBNC': 'Horizon Bancorp (IN)',
'JRJR': 'JRjr33, Inc.',
'SHI': 'SINOPEC Shangai Petrochemical Company, Ltd.',
'MCS': 'Marcus Corporation (The)',
'KT': 'KT Corporation',
'BRKL': 'Brookline Bancorp, Inc.',
'UNVR': 'Univar Inc.',
'IDCC': 'InterDigital, Inc.',
'HUBB': 'Hubbell Inc',
'ENVA': 'Enova International, Inc.',
'ROBO': 'ROBO Global Robotics and Automation Index ETF',
'BUI': 'BlackRock Utility and Infrastructure Trust',
'ECYT': 'Endocyte, Inc.',
'TIVO': 'TiVo Inc.',
'TDS': 'Telephone and Data Systems, Inc.',
'AMG': 'Affiliated Managers Group, Inc.',
'OMAM': 'OM Asset Management plc',
'GS-B': 'Goldman Sachs Group, Inc. (The)',
'LGIH': 'LGI Homes, Inc.',
'LXK': 'Lexmark International, Inc.',
'OASM': 'Oasmia Pharmaceutical AB',
'ANTX': 'Anthem, Inc.',
'MPEL': 'Melco Crown Entertainment Limited',
'UVV': 'Universal Corporation',
'WST': 'West Pharmaceutical Services, Inc.',
'DY': 'Dycom Industries, Inc.',
'GFI': 'Gold Fields Limited',
'AOI': 'Alliance One International, Inc.',
'GOOD': 'Gladstone Commercial Corporation',
'GFF': 'Griffon Corporation',
'DDR': 'DDR Corp.',
'AGLE': 'Aeglea BioTherapeutics, Inc.',
'BANF': 'BancFirst Corporation',
'SCAI': 'Surgical Care Affiliates, Inc.',
'OFG': 'OFG Bancorp',
'SCSS': 'Select Comfort Corporation',
'PES': 'Pioneer Energy Services Corp.',
'FSTR': 'L.B. Foster Company',
'ACFC': 'Atlantic Coast Financial Corporation',
'DRII': 'Diamond Resorts International, Inc.',
'CFX': 'Colfax Corporation',
'OFC': 'Corporate Office Properties Trust',
'WPG-I': 'WP Glimcher Inc.',
'JHY': 'Nuveen High Income 2020 Target Term Fund',
'NGVC': 'Natural Grocers by Vitamin Cottage, Inc.',
'MNI': 'McClatchy Company (The)',
'DISCB': 'Discovery Communications, Inc.',
'IRMD': 'iRadimed Corporation',
'SOV-C': 'Santander Holdings USA, Inc.',
'BMS': 'Bemis Company, Inc.',
'AVGR': 'Avinger, Inc.',
'PTR': 'PetroChina Company Limited',
'HNI': 'HNI Corporation',
'CELP': 'Cypress Energy Partners, L.P.',
'SNI': 'Scripps Networks Interactive, Inc',
'FNFV': 'Fidelity National Financial, Inc.',
'MOBL': 'MobileIron, Inc.',
'SRCL': 'Stericycle, Inc.',
'ESCA': 'Escalade, Incorporated',
'HES': 'Hess Corporation',
'MITL': 'Mitel Networks Corporation',
'OCLS': 'Oculus Innovative Sciences, Inc.',
'SEM': 'Select Medical Holdings Corporation',
'XRM': 'Xerium Technologies, Inc.',
'MH-C': 'Maiden Holdings, Ltd.',
'ICLD': 'InterCloud Systems, Inc',
'TLI': 'Western Asset Corporate Loan Fund Inc',
'VALX': 'Validea Market Legends ETF',
'BIG': 'Big Lots, Inc.',
'RS': 'Reliance Steel & Aluminum Co.',
'NFJ': 'AllianzGI NFJ Dividend, Interest & Premium Strategy Fund',
'DOV': 'Dover Corporation',
'EDU': 'New Oriental Education & Technology Group, Inc.',
'TOPS': 'TOP Ships Inc.',
'SPG': 'Simon Property Group, Inc.',
'ERA': 'Era Group, Inc.',
'PERF': 'Perfumania Holdings, Inc',
'UGI': 'UGI Corporation',
'CGO': 'Calamos Global Total Return Fund',
'DMO': 'Western Asset Mortgage Defined Opportunity Fund Inc',
'FPO': 'First Potomac Realty Trust',
'CCK': 'Crown Holdings, Inc.',
'OSUR': 'OraSure Technologies, Inc.',
'PDCO': 'Patterson Companies, Inc.',
'AMBCW': 'Ambac Financial Group, Inc.',
'BWL.A': 'Bowl America, Inc.',
'PLG': 'Platinum Group Metals Ltd.',
'CBPX': 'Continental Building Products, Inc.',
'NSAT': 'Norsat International Inc.',
'PANL': 'Pangaea Logistics Solutions Ltd.',
'PCTI': 'PC-Tel, Inc.',
'IFMI': 'Institutional Financial Markets, Inc.',
'PAR': 'PAR Technology Corporation',
'SMIT': 'Schmitt Industries, Inc.',
'DAR': 'Darling Ingredients Inc.',
'TLN': 'Talen Energy Corporation',
'CONG': 'congatec Holding AG',
'FPXI': 'First Trust International IPO ETF',
'BSF': 'Bear State Financial, Inc.',
'GEOS': 'Geospace Technologies Corporation',
'PUK': 'Prudential Public Limited Company',
'ETH': 'Ethan Allen Interiors Inc.',
'XXII': '22nd Century Group, Inc',
'APWC': 'Asia Pacific Wire & Cable Corporation Limited',
'CLA': 'Capitala Finance Corp.',
'SYNL': 'Synalloy Corporation',
'ABC': 'AmerisourceBergen Corporation (Holding Co)',
'WINS': 'Wins Finance Holdings Inc.',
'WM': 'Waste Management, Inc.',
'EXA': 'Exa Corporation',
'APH': 'Amphenol Corporation',
'ELY': 'Callaway Golf Company',
'MPSX': 'Multi Packaging Solutions International Limited',
'NVTA': 'Invitae Corporation',
'KGC': 'Kinross Gold Corporation',
'BGG': 'Briggs & Stratton Corporation',
'LIFE': 'aTyr Pharma, Inc.',
'HBIO': 'Harvard Bioscience, Inc.',
'FOXF': 'Fox Factory Holding Corp.',
'HAS': 'Hasbro, Inc.',
'NVFY': 'Nova Lifestyle, Inc',
'PCH': 'Potlatch Corporation',
'EAGLW': 'Double Eagle Acquisition Corp.',
'TDG': 'Transdigm Group Incorporated',
'OLED': 'Universal Display Corporation',
'CIM': 'Chimera Investment Corporation',
'TRIL': 'Trillium Therapeutics Inc.',
'ALL-E': 'Allstate Corporation (The)',
'PNC-Q': 'PNC Financial Services Group, Inc. (The)',
'AMED': 'Amedisys Inc',
'OTEL': 'Otelco Inc.',
'GNMA': 'iShares GNMA Bond ETF',
'MMS': 'Maximus, Inc.',
'CCO': 'Clear Channel Outdoor Holdings, Inc.',
'NCT-B': 'Newcastle Investment Corporation',
'BLD': 'TopBuild Corp.',
'NGHCP': 'National General Holdings Corp',
'MFRI': 'MFRI, Inc.',
'SJI': 'South Jersey Industries, Inc.',
'ATHX': 'Athersys, Inc.',
'BWA': 'BorgWarner Inc.',
'INST': 'Instructure, Inc.',
'KLDX': 'Klondex Mines Ltd.',
'SSBI': 'Summit State Bank',
'HHC': 'Howard Hughes Corporation (The)',
'FXCM': 'FXCM Inc.',
'TDA': 'Telephone and Data Systems, Inc.',
'FAD': 'First Trust Multi Cap Growth AlphaDEX Fund',
'UFI': 'Unifi, Inc.',
'BGI': 'Birks Group Inc.',
'CRD.A': 'Crawford & Company',
'UQM': 'UQM TECHNOLOGIES INC',
'STM': 'STMicroelectronics N.V.',
'ASFI': 'Asta Funding, Inc.',
'WAL': 'Western Alliance Bancorporation',
'RGC': 'Regal Entertainment Group',
'HNR': 'Harvest Natural Resources Inc',
'CPAC': 'Cementos Pacasmayo S.A.A.',
'JCS': 'Communications Systems, Inc.',
'ONE ': 'Higher One Holdings, Inc.',
'TITN': 'Titan Machinery Inc.',
'LM': 'Legg Mason, Inc.',
'HLM-': 'Hillman Group Capital Trust',
'DFVS': 'region',
'JPM': 'J P Morgan Chase & Co',
'IOTS': 'Adesto Technologies Corporation',
'UUUU': 'Energy Fuels Inc',
'GSS': 'Golden Star Resources, Ltd',
'SMT': 'SMART Technologies Inc.',
'TROVW': 'TrovaGene, Inc.',
'AVP': 'Avon Products, Inc.',
'SPB ': 'Spectrum Brands Holdings, Inc.',
'FTNT': 'Fortinet, Inc.',
'PSB-S': 'PS Business Parks, Inc.',
'NSM': 'Nationstar Mortgage Holdings Inc.',
'CRTN': 'Cartesian, Inc.',
'ITUB': 'Itau Unibanco Banco Holding SA',
'DEI': 'Douglas Emmett, Inc.',
'CB': 'D/B/A Chubb Limited New',
'FKO': 'First Trust South Korea AlphaDEX Fund',
'CVRS': 'Corindus Vascular Robotics, Inc.',
'ARGS': 'Argos Therapeutics, Inc.',
'IVR-A': 'Invesco Mortgage Capital Inc.',
'RGLD': 'Royal Gold, Inc.',
'AEE': 'Ameren Corporation',
'CBLI': 'Cleveland BioLabs, Inc.',
'PE': 'Parsley Energy, Inc.',
'BBN': 'BalckRock Taxable Municipal Bond Trust',
'BANFP': 'BancFirst Corporation',
'MOH': 'Molina Healthcare Inc',
'XONE': 'The ExOne Company',
'EEA': 'European Equity Fund, Inc. (The)',
'SRPT': 'Sarepta Therapeutics, Inc.',
'MAB': 'Eaton Vance Massachusetts Municipal Bond Fund',
'DG': 'Dollar General Corporation',
'NVO': 'Novo Nordisk A/S',
'HYND': 'WisdomTree BofA Merrill Lynch High Yield Bond Negative Duratio',
'BABA': 'Alibaba Group Holding Limited',
'IRCP': 'IRSA Propiedades Comerciales S.A.',
'SBGL': 'Sibanye Gold Limited',
'FMS': 'Fresenius Medical Care Corporation',
'VMET': 'Viamet Pharmaceuticals Corp.',
'CBMG': 'Cellular Biomedicine Group, Inc.',
'PCQ': 'PIMCO California Municipal Income Fund',
'ALIM': 'Alimera Sciences, Inc.',
'TIF': 'Tiffany & Co.',
'CDTX': 'Cidara Therapeutics, Inc.',
'ADM': 'Archer-Daniels-Midland Company',
'LORL': 'Loral Space and Communications, Inc.',
'CSQ': 'Calamos Strategic Total Return Fund',
'MCC': 'Medley Capital Corporation',
'UGP': 'Ultrapar Participacoes S.A.',
'FAAR': 'First Trust Alternative Absolute Return Strategy ETF',
'LABL': 'Multi-Color Corporation',
'TIK': 'Tel-Instrument Electronics Corp.',
'BRKR': 'Bruker Corporation',
'CSOD': 'Cornerstone OnDemand, Inc.',
'APPF': 'AppFolio, Inc.',
'GBLIZ': 'Global Indemnity plc',
'HLS': 'HealthSouth Corporation',
'DM': 'Dominion Midstream Partners, LP',
'TRNO': 'Terreno Realty Corporation',
'ARR-B': 'ARMOUR Residential REIT, Inc.',
'OTIC': 'Otonomy, Inc.',
'CFO': 'Victory CEMP US 500 Enhanced Volatility Wtd Index ETF',
'FBR': 'Fibria Celulose S.A.',
'AEMD': 'Aethlon Medical, Inc.',
'VALU': 'Value Line, Inc.',
'GUT-A': 'Gabelli Utility Trust (The)',
'PNFP': 'Pinnacle Financial Partners, Inc.',
'AVH': 'Avianca Holdings S.A.',
'BIIB': 'Biogen Inc.',
'TRXC': 'TransEnterix, Inc.',
'KMI.WS': 'Kinder Morgan, Inc.',
'MJCO': 'Majesco',
'TGC': 'Tengasco, Inc.',
'LTBR': 'Lightbridge Corporation',
'CXDC': 'China XD Plastics Company Limited',
'CVB': 'Lehman ABS Corporation',
'SCE-G': 'Southern California Edison Company',
'AUMN': 'Golden Minerals Company',
'GGP': 'General Growth Properties, Inc.',
'KEF': 'Korea Equity Fund, Inc.',
'GDEN': 'Golden Entertainment, Inc.',
'CTG': 'Computer Task Group, Incorporated',
'EIX': 'Edison International',
'CLI': 'Mack-Cali Realty Corporation',
'TPUB': 'Tribune Publishing Company',
'MCEP': 'Mid-Con Energy Partners, LP',
'CEZ': 'Victory CEMP Emerging Market Volatility Wtd Index ETF',
'ENJ': 'Entergy New Orleans, Inc.',
'VSTM': 'Verastem, Inc.',
'MFSF': 'MutualFirst Financial Inc.',
'VASC': 'Vascular Solutions, Inc.',
'FNSR': 'Finisar Corporation',
'MBFIP': 'MB Financial Inc.',
'SNA': 'Snap-On Incorporated',
'CPHI': 'China Pharma Holdings, Inc.',
'LQ': 'La Quinta Holdings Inc.',
'LSBG': 'Lake Sunapee Bank Group',
'CATO': 'Cato Corporation (The)',
'PFLT': 'PennantPark Floating Rate Capital Ltd.',
'MBT': 'Mobile TeleSystems OJSC',
'FUN': 'Cedar Fair, L.P.',
'MYF': 'Blackrock MuniYield Investment Fund',
'MCBC': 'Macatawa Bank Corporation',
'HUBG': 'Hub Group, Inc.',
'SNMX': 'Senomyx, Inc.',
'GNK': 'Genco Shipping & Trading Limited Warrants Expiring 12/31/2021 ',
'PBFX': 'PBF Logistics LP',
'CCIH': 'ChinaCache International Holdings Ltd.',
'UBNT': 'Ubiquiti Networks, Inc.',
'NHLD': 'National Holdings Corporation',
'REG': 'Regency Centers Corporation',
'VRTV': 'Veritiv Corporation',
'MTR': 'Mesa Royalty Trust',
'MILN': 'Global X Millennials Thematic ETF',
'UTMD': 'Utah Medical Products, Inc.',
'WF': 'Woori Bank',
'COTY': 'Coty Inc.',
'HALO': 'Halozyme Therapeutics, Inc.',
'STAR ': 'iStar Financial Inc.',
'AZPN': 'Aspen Technology, Inc.',
'NSPH': 'Nanosphere, Inc.',
'NNBR': 'NN, Inc.',
'OVAS': 'Ovascience Inc.',
'OLP': 'One Liberty Properties, Inc.',
'VLY': 'Valley National Bancorp',
'CPN': 'Calpine Corporation',
'TRMB': 'Trimble Navigation Limited',
'FCB': 'FCB Financial Holdings, Inc.',
'GSV': 'Gold Standard Ventures Corporation',
'EJ': 'E-House (China) Holdings Limited',
'PHD': 'Pioneer Floating Rate Trust',
'STT-D': 'State Street Corporation',
'DFBG': 'Differential Brands Group Inc.',
'SALT': 'Scorpio Bulkers Inc.',
'INTC': 'Intel Corporation',
'GJR': 'Synthetic Fixed-Income Securities, Inc.',
'BGB': 'Blackstone / GSO Strategic Credit Fund',
'SSW-E': 'Seaspan Corporation',
'CALD': 'Callidus Software, Inc.',
'MRC': 'MRC Global Inc.',
'IRS': 'IRSA Inversiones Y Representaciones S.A.',
'GEF.B': 'Greif Bros. Corporation',
'IIM': 'Invesco Value Municipal Income Trust',
'ECR': 'Eclipse Resources Corporation',
'BAP': 'Credicorp Ltd.',
'AER': 'Aercap Holdings N.V.',
'OII': 'Oceaneering International, Inc.',
'BBY': 'Best Buy Co., Inc.',
'GHDX': 'Genomic Health, Inc.',
'BLX': 'Banco Latinoamericano de Comercio Exterior, S.A.',
'EXAR': 'Exar Corporation',
'AXS-D': 'Axis Capital Holdings Limited',
'ALL-F': 'Allstate Corporation (The)',
'RBS-T': 'Royal Bank Scotland plc (The)',
'AEIS': 'Advanced Energy Industries, Inc.',
'FRC-D': 'FIRST REPUBLIC BANK',
'AHC': 'A.H. Belo Corporation',
'LVHD': 'Legg Mason Low Volatility High Dividend ETF',
'APOG': 'Apogee Enterprises, Inc.',
'TIME': 'Time Inc.',
'ACRX': 'AcelRx Pharmaceuticals, Inc.',
'VBLT': 'Vascular Biogenics Ltd.',
'COMT': 'iShares Commodities Select Strategy ETF',
'SWH': 'Stanley Black & Decker, Inc.',
'YPRO': 'AdvisorShares YieldPro ETF',
'LDR': 'Landauer, Inc.',
'DAN': 'Dana Holding Corporation',
'TAP.A': 'Molson Coors Brewing Company',
'CRDS': 'Crossroads Systems, Inc.',
'GMO': 'General Moly, Inc',
'CBL': 'CBL & Associates Properties, Inc.',
'MTZ': 'MasTec, Inc.',
'AWRE': 'Aware, Inc.',
'MD': 'Mednax, Inc',
'SAJA': 'Sajan, Inc.',
'SC': 'Santander Consumer USA Holdings Inc.',
'JEQ': 'Aberdeen Japan Equity Fund, Inc. ',
'MG': 'Mistras Group Inc',
'RBS': 'Royal Bank Scotland plc (The)',
'DLA': 'Delta Apparel, Inc.',
'AFSI-A': 'AmTrust Financial Services, Inc.',
'BFS-C': 'Saul Centers, Inc.',
'EAA': 'Entergy Arkansas, Inc.',
'TCB-C': 'TCF Financial Corporation',
'PFL': 'PIMCO Income Strategy Fund',
'ETAK': 'Elephant Talk Communications Corp.',
'HW': 'Headwaters Incorporated',
'LITB': 'LightInTheBox Holding Co., Ltd.',
'GWB': 'Great Western Bancorp, Inc.',
'AOSL': 'Alpha and Omega Semiconductor Limited',
'NTX': 'Nuveen Texas Quality Income Municipal Fund',
'TSM': 'Taiwan Semiconductor Manufacturing Company Ltd.',
'NHI': 'National Health Investors, Inc.',
'IX': 'Orix Corp Ads',
'ISCA': 'International Speedway Corporation',
'UDF': 'United Development Funding IV',
'WDFC': 'WD-40 Company',
'PMT': 'PennyMac Mortgage Investment Trust',
'BBL': 'BHP Billiton plc',
'DBL': 'DoubleLine Opportunistic Credit Fund',
'LSXMK': 'Liberty Media Corporation',
'DEG': 'Etablissements Delhaize Freres et Cie "Le Lion" S.A.',
'NEWR': 'New Relic, Inc.',
'NNY': 'Nuveen New York Municipal Value Fund, Inc.',
'ISR': 'IsoRay, Inc.',
'CRVL': 'CorVel Corp.',
'WLK': 'Westlake Chemical Corporation',
'ELTK': 'Eltek Ltd.',
'THQ': 'Tekla Healthcare Opportunies Fund',
'CTW': 'Qwest Corporation',
'AVNW': 'Aviat Networks, Inc.',
'MAA': 'Mid-America Apartment Communities, Inc.',
'MCF': 'Contango Oil & Gas Company',
'PBB': 'Prospect Capital Corporation',
'CX': 'Cemex S.A.B. de C.V.',
'CPLP': 'Capital Product Partners L.P.',
'IMUC': 'ImmunoCellular Therapeutics, Ltd.',
'CRNT': 'Ceragon Networks Ltd.',
'DCT': 'DCT Industrial Trust Inc',
'SPXX': 'Nuveen S&P 500 Dynamic Overwrite Fund',
'VIVE': 'Viveve Medical, Inc.',
'LECO': 'Lincoln Electric Holdings, Inc.',
'FSFG': 'First Savings Financial Group, Inc.',
'GWGH': 'GWG Holdings, Inc',
'AEP': 'American Electric Power Company, Inc.',
'GPN': 'Global Payments Inc.',
'NUM': 'Nuveen Michigan Quality Income Municipal Fund',
'TRTLU': 'Terrapin 3 Acquisition Corporation',
'BRKS': 'Brooks Automation, Inc.',
'TCBI': 'Texas Capital Bancshares, Inc.',
'WVVI': 'Willamette Valley Vineyards, Inc.',
'GCP': 'GCP Applied Technologies Inc.',
'NDLS': 'Noodles & Company',
'FCN': 'FTI Consulting, Inc.',
'EVC': 'Entravision Communications Corporation',
'FITBI': 'Fifth Third Bancorp',
'MSL': 'MidSouth Bancorp',
'SIGM': 'Sigma Designs, Inc.',
'CYRXW': 'CryoPort, Inc.',
'ALL-C': 'Allstate Corporation (The)',
'SPPI': 'Spectrum Pharmaceuticals, Inc.',
'MNTX': 'Manitex International, Inc.',
'IGA': 'Voya Global Advantage and Premium Opportunity Fund',
'RQI': 'Cohen & Steers Quality Income Realty Fund Inc',
'ATTO': 'Atento S.A.',
'RCG': 'RENN Fund, Inc.',
'GLQ': 'Clough Global Equity Fund',
'TESS': 'TESSCO Technologies Incorporated',
'TGT': 'Target Corporation',
'NSIT': 'Insight Enterprises, Inc.',
'CAS': 'Castle (A.M.) & Co.',
'SHLO': 'Shiloh Industries, Inc.',
'SPAR': 'Spartan Motors, Inc.',
'BBGI': 'Beasley Broadcast Group, Inc.',
'MFLX': 'Multi-Fineline Electronix, Inc.',
'NSP': 'Insperity, Inc.',
'ASB-B': 'Associated Banc-Corp',
'ARA': 'American Renal Associates Holdings, Inc',
'ANDAW': 'Andina Acquisition Corp. II',
'FR': 'First Industrial Realty Trust, Inc.',
'BCV': 'Bancroft Fund Limited',
'SPR': 'Spirit Aerosystems Holdings, Inc.',
'MESO': 'Mesoblast Limited',
'NYH': 'Eaton Vance New York Municipal Bond Fund II',
'CRL': 'Charles River Laboratories International, Inc.',
'EP-C': 'El Paso Corporation',
'VDTH': 'Videocon d2h Limited',
'SCE-J': 'Southern California Edison Company',
'BZUN': 'Baozun Inc.',
'MZOR': 'Mazor Robotics Ltd.',
'SLCA': 'U.S. Silica Holdings, Inc.',
'ULBI': 'Ultralife Corporation',
'IRM': 'Iron Mountain Incorporated',
'KYN-G': 'Kayne Anderson MLP Investment Company',
'RGLS': 'Regulus Therapeutics Inc.',
'AETI': 'American Electric Technologies, Inc.',
'TMK-B': 'Torchmark Corporation',
'SBNYW': 'Signature Bank',
'PCF': 'Putnam High Income Bond Fund',
'GDOT': 'Green Dot Corporation',
'SMP': 'Standard Motor Products, Inc.',
'ASR': 'Grupo Aeroportuario del Sureste, S.A. de C.V.',
'LXU': 'Lsb Industries Inc.',
'NEE': 'NextEra Energy, Inc.',
'VFC': 'V.F. Corporation',
'MYN': 'Blackrock MuniYield New York Quality Fund, Inc.',
'ESP': 'Espey Mfg. & Electronics Corp.',
'AGMX': 'AutoGenomics, Inc.',
'HTLF': 'Heartland Financial USA, Inc.',
'ISSC': 'Innovative Solutions and Support, Inc.',
'JMU': 'Wowo Limited',
'TIS': 'Orchids Paper Products Company',
'CMO': 'Capstead Mortgage Corporation',
'TSLF': 'THL Credit Senior Loan Fund',
'WR': 'Westar Energy, Inc.',
'DLBS': 'region',
'CEMP': 'Cempra, Inc.',
'MBVT': 'Merchants Bancshares, Inc.',
'BFZ': 'BlackRock California Municipal Income Trust',
'ELLI': 'Ellie Mae, Inc.',
'LHO-H': 'LaSalle Hotel Properties',
'CCF': 'Chase Corporation',
'RETA': 'Reata Pharmaceuticals, Inc.',
'DCA': 'Virtus Total Return Fund',
'IMMR': 'Immersion Corporation',
'PCG-B': 'Pacific Gas & Electric Co.',
'IVZ': 'Invesco Plc',
'PRLB': 'Proto Labs, Inc.',
'WY': 'Weyerhaeuser Company',
'AIF': 'Apollo Tactical Income Fund Inc.',
'PRTK': 'Paratek Pharmaceuticals, Inc. ',
'QQQX': 'Nuveen NASDAQ 100 Dynamic Overwrite Fund',
'DPLO': 'Diplomat Pharmacy, Inc.',
'CLM': 'Cornerstone Strategic Value Fund, Inc.',
'ENRJ': 'EnerJex Resources, Inc.',
'MDVX': 'Medovex Corp.',
'OEC': 'Orion Engineered Carbons S.A',
'VBND': 'Vident Core U.S. Bond Strategy Fund',
'KLREW': 'KLR Energy Acquisition Corp.',
'MRD': 'Memorial Resource Development Corp.',
'GS-A': 'Goldman Sachs Group, Inc. (The)',
'FCCY': '1st Constitution Bancorp (NJ)',
'AGM': 'Federal Agricultural Mortgage Corporation',
'LBRDA': 'Liberty Broadband Corporation',
'PVBC': 'Provident Bancorp, Inc.',
'SLG-I': 'SL Green Realty Corporation',
'GEH': 'General Electric Capital Corporation',
'BOXL': 'Boxlight Corporation',
'AMZN': 'Amazon.com, Inc.',
'ETO': 'Eaton Vance Tax-Advantage Global Dividend Opp',
'PZE': 'Petrobras Argentina S.A.',
'CAR': 'Avis Budget Group, Inc.',
'CHY': 'Calamos Convertible and High Income Fund',
'MENT': 'Mentor Graphics Corporation',
'PAY': 'Verifone Systems, Inc.',
'ASMB': 'Assembly Biosciences, Inc.',
'WIFI': 'Boingo Wireless, Inc.',
'GIGA': 'Giga-tronics Incorporated',
'LILAK': 'Liberty Global plc',
'JUNO': 'Juno Therapeutics, Inc.',
'PEIX': 'Pacific Ethanol, Inc.',
'STRM': 'Streamline Health Solutions, Inc.',
'MGR': 'Affiliated Managers Group, Inc.',
'BIO.B': 'Bio-Rad Laboratories, Inc.',
'AGII': 'Argo Group International Holdings, Ltd.',
'GUID': 'Guidance Software, Inc.',
'DPG': 'Duff & Phelps Global Utility Income Fund Inc.',
'DPS': 'Dr Pepper Snapple Group, Inc',
'SMFG': 'Sumitomo Mitsui Financial Group Inc',
'BGSF': 'BG Staffing Inc',
'KPTI': 'Karyopharm Therapeutics Inc.',
'FEMS': 'First Trust Emerging Markets Small Cap AlphaDEX Fund',
'CTAA': 'Qwest Corporation',
'CBA ': 'ClearBridge American Energy MLP Fund Inc.',
'TQQQ': 'ProShares UltraPro QQQ',
'OXLCN': 'Oxford Lane Capital Corp.',
'MHLDO': 'Maiden Holdings, Ltd.',
'ARE-D': 'Alexandria Real Estate Equities, Inc.',
'BVSN': 'BroadVision, Inc.',
'GALTW': 'Galectin Therapeutics Inc.',
'ASBB': 'ASB Bancorp, Inc.',
'SGC': 'Superior Uniform Group, Inc.',
'EXPE': 'Expedia, Inc.',
'ARMH': 'ARM Holdings plc',
'ACSF': 'American Capital Senior Floating, Ltd.',
'CAW': 'CCA Industries, Inc.',
'NYLD.A': 'NRG Yield, Inc.',
'CORR': 'CorEnergy Infrastructure Trust, Inc.',
'CSL': 'Carlisle Companies Incorporated',
'NYRT': 'New York REIT, Inc.',
'WINA': 'Winmark Corporation',
'PMM': 'Putnam Managed Municipal Income Trust',
'HSTM': 'HealthStream, Inc.',
'KSU-': 'Kansas City Southern',
'PFIN': 'P & F Industries, Inc.',
'SDRL': 'Seadrill Limited',
'YLCO': 'Global X Yieldco Index ETF',
'PNBK': 'Patriot National Bancorp Inc.',
'PMF': 'PIMCO Municipal Income Fund',
'GLOB': 'Globant S.A.',
'EBIX': 'Ebix, Inc.',
'SNFCA': 'Security National Financial Corporation',
'TSNU': 'Tyson Foods, Inc.',
'KST': 'Scudder Strategic Income Trust',
'STBA': 'S&T Bancorp, Inc.',
'AMNB': 'American National Bankshares, Inc.',
'FCSC': 'Fibrocell Science Inc',
'MIND': 'Mitcham Industries, Inc.',
'NEE-K': 'NextEra Energy, Inc.',
'T': 'AT&T Inc.',
'FRAN': 'Francesca\'s Holdings Corporation',
'ATU': 'Actuant Corporation',
'TUTT': 'Tuttle Tactical Management U.S. Core ETF',
'OCFC': 'OceanFirst Financial Corp.',
'CSU': 'Capital Senior Living Corporation',
'BHACR': 'Barington/Hilco Acquisition Corp.',
'DDC': 'Dominion Diamond Corporation',
'ACTX': 'Global X Guru Activist ETF',
'EOG': 'EOG Resources, Inc.',
'SLMBP': 'SLM Corporation',
'NXN': 'Nuveen Insured New York Select Tax-Free Income Portfolio',
'ESSF': 'ETRE REIT, LLC',
'PFIS': 'Peoples Financial Services Corp. ',
'HSEB': 'HSBC Holdings plc',
'GLDI': 'Credit Suisse AG',
'NNVC': 'NanoViricides, Inc.',
'GALT': 'Galectin Therapeutics Inc.',
'C-L': 'Citigroup Inc.',
'PIRS': 'Pieris Pharmaceuticals, Inc.',
'AAT': 'American Assets Trust, Inc.',
'SQI': 'SciQuest, Inc.',
'UDR': 'United Dominion Realty Trust, Inc.',
'ORI': 'Old Republic International Corporation',
'OSBC': 'Old Second Bancorp, Inc.',
'BBT-D': 'BB&T Corporation',
'OLLI': 'Ollie\'s Bargain Outlet Holdings, Inc.',
'OC': 'Owens Corning Inc',
'RCON': 'Recon Technology, Ltd.',
'SCOR': 'comScore, Inc.',
'BATRA': 'Liberty Media Corporation',
'CRR': 'Carbo Ceramics, Inc.',
'DSWL': 'Deswell Industries, Inc.',
'AVGO': 'Broadcom Limited',
'VNET': '21Vianet Group, Inc.',
'DXI': 'DXI Energy Inc.',
'CVTI': 'Covenant Transportation Group, Inc.',
'NVDA': 'NVIDIA Corporation',
'ROSG': 'Rosetta Genomics Ltd.',
'MRUS': 'Merus N.V.',
'SCS': 'Steelcase Inc.',
'TEGP': 'Tallgrass Energy GP, LP',
'EEMA': 'iShares MSCI Emerging Markets Asia Index Fund',
'GMED': 'Globus Medical, Inc.',
'PJC': 'Piper Jaffray Companies',
'LAKE': 'Lakeland Industries, Inc.',
'ZFGN': 'Zafgen, Inc.',
'OMEX': 'Odyssey Marine Exploration, Inc.',
'SFNC': 'Simmons First National Corporation',
'UN': 'Unilever NV',
'BBP': 'BioShares Biotechnology Products Fund',
'ABY': 'Atlantica Yield plc',
'STLY': 'Stanley Furniture Company, Inc.',
'ANDA': 'Andina Acquisition Corp. II',
'SAIA': 'Saia, Inc.',
'DQ': 'DAQO New Energy Corp.',
'NRF-E': 'Northstar Realty Finance Corp.',
'PHM': 'PulteGroup, Inc.',
'MEOH': 'Methanex Corporation',
'BGS': 'B&G Foods, Inc.',
'NGHCZ': 'National General Holdings Corp',
'NVMI': 'Nova Measuring Instruments Ltd.',
'ELRC': 'Electro Rent Corporation',
'ONEQ': 'Fidelity Nasdaq Composite Index Tracking Stock',
'AAV': 'Advantage Oil & Gas Ltd',
'SOHU': 'Sohu.com Inc.',
'DBD': 'Diebold, Incorporated',
'CVO': 'Cenveo Inc',
'AA-': 'Alcoa Inc.',
'ITUS': 'ITUS Corporation',
'SLTB': 'Scorpio Bulkers Inc.',
'CIDM': 'Cinedigm Corp',
'USB-M': 'U.S. Bancorp',
'VVC': 'Vectren Corporation',
'INO': 'Inovio Pharmaceuticals, Inc.',
'ADAP': 'Adaptimmune Therapeutics plc',
'OHGI': 'One Horizon Group, Inc.',
'VAR': 'Varian Medical Systems, Inc.',
'CNX': 'CONSOL Energy Inc.',
'INFN': 'Infinera Corporation',
'MGCD': 'MGC Diagnostics Corporation',
'PACEW': 'Pace Holdings Corp.',
'KOS': 'Kosmos Energy Ltd.',
'NKG': 'Nuveen Georgia Dividend Advantage Municipal Fund 2',
'SSYS': 'Stratasys, Ltd.',
'SVVC': 'Firsthand Technology Value Fund, Inc.',
'FMK': 'First Trust Mega Cap AlphaDEX Fund',
'CUBI-D': 'Customers Bancorp, Inc',
'CBOE': 'CBOE Holdings, Inc.',
'BTX': 'BioTime, Inc.',
'NAV': 'Navistar International Corporation',
'CALM': 'Cal-Maine Foods, Inc.',
'SHW': 'Sherwin-Williams Company (The)',
'TLYS': 'Tilly\'s, Inc.',
'CIEN': 'Ciena Corporation',
'ORBC': 'ORBCOMM Inc.',
'APD': 'Air Products and Chemicals, Inc.',
'BLVDU': 'Boulevard Acquisition Corp. II',
'CFC-A': 'Countrywide Financial Corporation',
'CF': 'CF Industries Holdings, Inc.',
'OFED': 'Oconee Federal Financial Corp.',
'BPMC': 'Blueprint Medicines Corporation',
'MTRN': 'Materion Corporation',
'AAU': 'Almaden Minerals, Ltd.',
'GNCA': 'Genocea Biosciences, Inc.',
'CVRR': 'CVR Refining, LP',
'ENBL': 'Enable Midstream Partners, LP',
'FANG': 'Diamondback Energy, Inc.',
'SXE': 'Southcross Energy Partners, L.P.',
'RMD': 'ResMed Inc.',
'KE': 'Kimball Electronics, Inc.',
'MCK': 'McKesson Corporation',
'MY': 'China Ming Yang Wind Power Group Limited',
'AU': 'AngloGold Ashanti Limited',
'ASUR': 'Asure Software Inc',
'HNNA': 'Hennessy Advisors, Inc.',
'MFM': 'MFS Municipal Income Trust',
'GLOW': 'Glowpoint, Inc.',
'HTR': 'Brookfield Total Return Fund Inc.',
'CVEO': 'Civeo Corporation',
'CSA': 'Victory CEMP US Small Cap Volatility Wtd Index ETF',
'MBTF': 'M B T Financial Corp',
'SOCL': 'Global X Social Media Index ETF',
'BIS': 'ProShares UltraShort Nasdaq Biotechnology',
'SNV': 'Synovus Financial Corp.',
'FMC': 'FMC Corporation',
'OXBRW': 'Oxbridge Re Holdings Limited',
'AAMC': 'Altisource Asset Management Corp',
'FFBC': 'First Financial Bancorp.',
'BOCH': 'Bank of Commerce Holdings (CA)',
'NEOG': 'Neogen Corporation',
'ABDC': 'Alcentra Capital Corp.',
'OKSB': 'Southwest Bancorp, Inc.',
'SANM': 'Sanmina Corporation',
'RFTA': 'RAIT Financial Trust',
'FORK': 'Fuling Global Inc.',
'FRT': 'Federal Realty Investment Trust',
'TVE': 'Tennessee Valley Authority',
'CHSP-A': 'Chesapeake Lodging Trust',
'HAYN': 'Haynes International, Inc.',
'SCYX': 'SCYNEXIS, Inc.',
'HCKT': 'The Hackett Group, Inc.',
'ARE-E': 'Alexandria Real Estate Equities, Inc.',
'EIGI': 'Endurance International Group Holdings, Inc.',
'EGOV': 'NIC Inc.',
'CEQP': 'Crestwood Equity Partners LP',
'CYNO': 'Cynosure, Inc.',
'SLGN': 'Silgan Holdings Inc.',
'CSB': 'Victory CEMP US Small Cap High Div Volatility Wtd Index ETF',
'GTXI': 'GTx, Inc.',
'NILE': 'Blue Nile, Inc.',
'CINR': 'Ciner Resources LP',
'OPOF': 'Old Point Financial Corporation',
'SSNI': 'Silver Spring Networks, Inc.',
'CARA': 'Cara Therapeutics, Inc.',
'OTG': 'OTG EXP, Inc.',
'SPOK': 'Spok Holdings, Inc.',
'NUAN': 'Nuance Communications, Inc.',
'ALL-A': 'Allstate Corporation (The)',
'BFAM': 'Bright Horizons Family Solutions Inc.',
'TAHO': 'Tahoe Resources, Inc.',
'INGR': 'Ingredion Incorporated',
'UTL': 'UNITIL Corporation',
'ZIONZ': 'Zions Bancorporation',
'GST': 'Gastar Exploration Inc.',
'RDEN': 'Elizabeth Arden, Inc.',
'NSU': 'Nevsun Resources Ltd',
'CCV': 'Comcast Corporation',
'CPA': 'Copa Holdings, S.A.',
'SLF': 'Sun Life Financial Inc.',
'CUBN': 'Commerce Union Bancshares, Inc.',
'CQP': 'Cheniere Energy Partners, LP',
'GB': 'Greatbatch, Inc.',
'SNH': 'Senior Housing Properties Trust',
'CVM': 'Cel-Sci Corporation',
'SENS': 'Senseonics Holdings, Inc.',
'EVEP': 'EV Energy Partners, L.P.',
'HWCC': 'Houston Wire & Cable Company',
'CXRX': 'Concordia Healthcare Corp.',
'PLOW': 'Douglas Dynamics, Inc.',
'JPM-E': 'J P Morgan Chase & Co',
'OHRP': 'Ohr Pharmaceuticals, Inc.',
'EMR': 'Emerson Electric Company',
'PBI-B': 'Pitney Bowes Inc.',
'UPS': 'United Parcel Service, Inc.',
'ON': 'ON Semiconductor Corporation',
'ETJ': 'Eaton Vance Risk-Managed Diversified Equity Income Fund',
'FONR': 'Fonar Corporation',
'URG': 'Ur Energy Inc',
'SSW-D': 'Seaspan Corporation',
'JBL': 'Jabil Circuit, Inc.',
'DDS': 'Dillard\'s, Inc.',
'AFH': 'Atlas Financial Holdings, Inc.',
'EVH': 'Evolent Health, Inc',
'OXM': 'Oxford Industries, Inc.',
'AJRD': 'Aerojet Rocketdyne Holdings, Inc. ',
'GPT': 'Gramercy Property Trust',
'JASNW': 'Jason Industries, Inc.',
'CMRE-B': 'Costamare Inc.',
'GGACR': 'Garnero Group Acquisition Company',
'ANH-C': 'Anworth Mortgage Asset Corporation',
'FES': 'Forbes Energy Services Ltd',
'SPTN': 'SpartanNash Company',
'LEO': 'Dreyfus Strategic Municipals, Inc.',
'EVO': 'Eaton Vance Ohio Municipal Income Trust',
'PRE-F': 'PartnerRe Ltd.',
'PNW': 'Pinnacle West Capital Corporation',
'VIRC': 'Virco Manufacturing Corporation',
'ACAT': 'Arctic Cat Inc.',
'TREE': 'LendingTree, Inc.',
'BDN': 'Brandywine Realty Trust',
'BAF': 'BlackRock Income Investment Quality Trust',
'MSP': 'Madison Strategic Sector Premium Fund',
'CTS': 'CTS Corporation',
'MYJ': 'Blackrock MuniYield New Jersey Fund, Inc.',
'JRJC': 'China Finance Online Co. Limited',
'KOF': 'Coca Cola Femsa S.A.B. de C.V.',
'CGNX': 'Cognex Corporation',
'AFGE': 'American Financial Group, Inc.',
'SBLK': 'Star Bulk Carriers Corp.',
'MBSD': 'FlexShares Disciplined Duration MBS Index Fund',
'BRG': 'Bluerock Residential Growth REIT, Inc.',
'LITE': 'Lumentum Holdings Inc.',
'LOGM': 'LogMein, Inc.',
'JSMD': 'Janus Small/Mid Cap Growth Alpha ETF',
'EIO': 'Eaton Vance Ohio Municipal Bond Fund',
'NMO': 'Nuveen Municipal Market Opportunity Fund, Inc.',
'KND': 'Kindred Healthcare, Inc.',
'ERIC': 'Ericsson',
'IAC': 'IAC/InterActiveCorp',
'TARO': 'Taro Pharmaceutical Industries Ltd.',
'SRC': 'Spirit Realty Capital, Inc.',
'SRAQ': 'Silver Run Acquisition Corporation',
'IGT': 'International Game Technology',
'IEX': 'IDEX Corporation',
'AGIO': 'Agios Pharmaceuticals, Inc.',
'WCC': 'WESCO International, Inc.',
'BWLD': 'Buffalo Wild Wings, Inc.',
'MCRI': 'Monarch Casino & Resort, Inc.',
'EYEG': 'Eyegate Pharmaceuticals, Inc.',
'TTM': 'Tata Motors Ltd',
'BLCM': 'Bellicum Pharmaceuticals, Inc.',
'CPHR': 'Cipher Pharmaceuticals Inc.',
'VMM': 'Delaware Investments Minnesota Municipal Income Fund II, Inc.',
'FSM': 'Fortuna Silver Mines Inc.',
'LCUT': 'Lifetime Brands, Inc.',
'DRH': 'Diamondrock Hospitality Company',
'GBT': 'GLOBAL BLOOD THERAPEUTICS, INC.',
'MCY': 'Mercury General Corporation',
'MYC': 'Blackrock MuniYield California Fund, Inc.',
'INN-A': 'Summit Hotel Properties, Inc.',
'TVIZ': 'region',
'CADC': 'China Advanced Construction Materials Group, Inc.',
'BCEI': 'Bonanza Creek Energy, Inc.',
'EMIF': 'iShares S&P Emerging Markets Infrastructure Index Fund',
'SNHN': 'Senior Housing Properties Trust',
'HOV': 'Hovnanian Enterprises Inc',
'PERY': 'Perry Ellis International Inc.',
'GSK': 'GlaxoSmithKline PLC',
'MRTN': 'Marten Transport, Ltd.',
'BBSI': 'Barrett Business Services, Inc.',
'CSII': 'Cardiovascular Systems, Inc.',
'ARCW': 'ARC Group Worldwide, Inc.',
'TXMD': 'TherapeuticsMD, Inc.',
'GRPN': 'Groupon, Inc.',
'XCO': 'EXCO Resources NL',
'SRDX': 'SurModics, Inc.',
'CRD.B': 'Crawford & Company',
'ARDM': 'Aradigm Corporation',
'GES': 'Guess?, Inc.',
'HURN': 'Huron Consulting Group Inc.',
'COKE': 'Coca-Cola Bottling Co. Consolidated',
'ETN': 'Eaton Corporation, PLC',
'BLFS': 'BioLife Solutions, Inc.',
'SNR': 'New Senior Investment Group Inc.',
'AFI': 'Armstrong Flooring, Inc.',
'LOV': 'Spark Networks, Inc.',
'JPM-B': 'J P Morgan Chase & Co',
'GUT': 'Gabelli Utility Trust (The)',
'DFS': 'Discover Financial Services',
'BOE': 'Blackrock Global',
'TISA': 'Top Image Systems, Ltd.',
'QCLN': 'First Trust NASDAQ Clean Edge Green Energy Index Fund',
'CHKP': 'Check Point Software Technologies Ltd.',
'SFN': 'Stifel Financial Corporation',
'AIRR': 'First Trust RBA American Industrial Renaissance ETF',
'ALXN': 'Alexion Pharmaceuticals, Inc.',
'BSBR': 'Banco Santander Brasil SA',
'IRWD': 'Ironwood Pharmaceuticals, Inc.',
'SSW-C.CL': 'Seaspan Corporation',
'YCB': 'Your Community Bankshares, Inc.',
'GGACU': 'Garnero Group Acquisition Company',
'SWN': 'Southwestern Energy Company',
'RBS-R': 'Royal Bank Scotland plc (The)',
'GBL': 'Gamco Investors, Inc.',
'PEN': 'Penumbra, Inc.',
'EYES': 'Second Sight Medical Products, Inc.',
'MCI': 'Babson Capital Corporate Investors',
'WGP': 'Western Gas Equity Partners, LP',
'CIT': 'CIT Group Inc (DEL)',
'BITE': 'The Restaurant ETF',
'ACGL': 'Arch Capital Group Ltd.',
'CBYL': 'Carbylan Therapeutics, Inc.',
'CERN': 'Cerner Corporation',
'DV': 'DeVry Education Group Inc.',
'CYAN': 'Cyanotech Corporation',
'BCE': 'BCE, Inc.',
'HQY': 'HealthEquity, Inc.',
'HNH': 'Handy & Harman Ltd.',
'AGNCP': 'American Capital Agency Corp.',
'WLH': 'Lyon William Homes',
'GRAM': 'Grana y Montero S.A.A.',
'PSCH': 'PowerShares S&P SmallCap Health Care Portfolio',
'NVEC': 'NVE Corporation',
'OPGN': 'OpGen, Inc.',
'FAM': 'First Trust/Aberdeen Global Opportunity Income Fund',
'SHO-F': 'Sunstone Hotel Investors, Inc.',
'NAZ': 'Nuveen Arizona Premium Income Municipal Fund',
'SOCB': 'Southcoast Financial Corporation',
'CHKR': 'Chesapeake Granite Wash Trust',
'MSCI': 'MSCI Inc',
'FBNC': 'First Bancorp',
'VMC': 'Vulcan Materials Company',
'SNAK': 'Inventure Foods, Inc.',
'I': 'Intelsat S.A.',
'NPN': 'Nuveen Pennsylvania Municipal Value Fund',
'VXDN': 'AccuShares Spot CBOE VIX Down Shares',
'MNK': 'Mallinckrodt plc',
'JSML': 'Janus Small Cap Growth Alpha ETF',
'FULT': 'Fulton Financial Corporation',
'MLHR': 'Herman Miller, Inc.',
'CNFR': 'Conifer Holdings, Inc.',
'MYOS': 'MYOS RENS Technology Inc.',
'KRA': 'Kraton Performance Polymers, Inc',
'SEDG': 'SolarEdge Technologies, Inc.',
'TBNK': 'Territorial Bancorp Inc.',
'PCI': 'PIMCO Dynamic Credit Income Fund',
'DHX': 'DHI Group, Inc.',
'LCNB': 'LCNB Corporation',
'AVXL': 'Anavex Life Sciences Corp.',
'ABAX': 'ABAXIS, Inc.',
'BELFA': 'Bel Fuse Inc.',
'ASH': 'Ashland Inc.',
'CFGE': 'Calamos Focus Growth ETF',
'ARQL': 'ArQule, Inc.',
'ECACU': 'E-compass Acquisition Corp.',
'AAON': 'AAON, Inc.',
'BRC': 'Brady Corporation',
'MHH': 'Mastech Holdings, Inc',
'PFSI': 'PennyMac Financial Services, Inc.',
'SSY': 'SunLink Health Systems, Inc.',
'DLHC': 'DLH Holdings Corp.',
'CARB': 'Carbonite, Inc.',
'VSR': 'Versar, Inc.',
'XGTI': 'XG Technology, Inc',
'CRM': 'Salesforce.com Inc',
'RVT': 'Royce Value Trust, Inc.',
'IDN': 'Intellicheck Mobilisa, Inc.',
'LPCN': 'Lipocine Inc.',
'NQS': 'Nuveen Select Quality Municipal Fund, Inc.',
'PSA-R': 'Public Storage',
'ISIG': 'Insignia Systems, Inc.',
'EMMS': 'Emmis Communications Corporation',
'BLBD': 'Blue Bird Corporation',
'BICK': 'First Trust BICK Index Fund',
'MFT': 'Blackrock MuniYield Investment QualityFund',
'CCC': 'Calgon Carbon Corporation',
'SJM': 'J.M. Smucker Company (The)',
'RAVE': 'Rave Restaurant Group, Inc.',
'DMTX': 'Dimension Therapeutics, Inc.',
'WMLP': 'Westmoreland Resource Partners, LP',
'EME': 'EMCOR Group, Inc.',
'FPP.WS': 'FieldPoint Petroleum Corporation',
'MDR': 'McDermott International, Inc.',
'GGZ-A': 'Gabelli Global Small and Mid Cap Value Trust (The)',
'BT': 'BT Group plc',
'NEE-P': 'NextEra Energy, Inc.',
'TRTLW': 'Terrapin 3 Acquisition Corporation',
'RRTS': 'Roadrunner Transportation Systems, Inc',
'IMMU': 'Immunomedics, Inc.',
'STAA': 'STAAR Surgical Company',
'TEVA': 'Teva Pharmaceutical Industries Limited',
'HP': 'Helmerich & Payne, Inc.',
'GST-B': 'Gastar Exploration Inc.',
'VTNR': 'Vertex Energy, Inc',
'CHNR': 'China Natural Resources, Inc.',
'MPB': 'Mid Penn Bancorp',
'AFSI-C': 'AmTrust Financial Services, Inc.',
'KYN-F': 'Kayne Anderson MLP Investment Company',
'SP': 'SP Plus Corporation',
'AEUA': 'Anadarko Petroleum Corporation',
'STBZ': 'State Bank Financial Corporation.',
'JHS': 'John Hancock Income Securities Trust',
'AES': 'The AES Corporation',
'AIY': 'Apollo Investment Corporation',
'POL': 'PolyOne Corporation',
'LDRI': 'PowerShares LadderRite 0-5 Year Corporate Bond Portfolio',
'HOLX': 'Hologic, Inc.',
'SAAS': 'inContact, Inc.',
'RDWR': 'Radware Ltd.',
'GRX-B': 'The Gabelli Healthcare & Wellness Trust',
'COF-P': 'Capital One Financial Corporation',
'GGE': 'Guggenheim Enhanced Equity Strategy Fund',
'HMNF': 'HMN Financial, Inc.',
'WMB': 'Williams Companies, Inc. (The)',
'SHIP': 'Seanergy Maritime Holdings Corp',
'GPE-A': 'Georgia Power Company',
'GFNSL': 'General Finance Corporation',
'NEOS': 'Neos Therapeutics, Inc.',
'PRAA': 'PRA Group, Inc.',
'GNRC': 'Generac Holdlings Inc.',
'DOX': 'Amdocs Limited',
'VTRB': 'Ventas Realty, Limited Partnership // Ventas Capital Corporati',
'HCI': 'HCI Group, Inc.',
'IPKW': 'PowerShares International BuyBack Achievers Portfolio',
'BSX': 'Boston Scientific Corporation',
'COO': 'Cooper Companies, Inc. (The)',
'HRTG': 'Heritage Insurance Holdings, Inc.',
'WAB': 'Westinghouse Air Brake Technologies Corporation',
'CNYD': 'China Yida Holding, Co.',
'AXS': 'Axis Capital Holdings Limited',
'GOOGL': 'Alphabet Inc.',
'POWI': 'Power Integrations, Inc.',
'AMCC': 'Applied Micro Circuits Corporation',
'ARDC': 'Ares Dynamic Credit Allocation Fund, Inc.',
'UA.C': 'Under Armour, Inc.',
'CRY': 'CryoLife, Inc.',
'NCTY': 'The9 Limited',
'AES-C': 'The AES Corporation',
'LOCO': 'El Pollo Loco Holdings, Inc.',
'INF': 'Brookfield Global Listed Infrastructure Income Fund',
'GT': 'The Goodyear Tire & Rubber Company',
'VONE': 'Vanguard Russell 1000 ETF',
'TWOU': '2U, Inc.',
'ENPH': 'Enphase Energy, Inc.',
'CMN': 'Cantel Medical Corp.',
'CWAY': 'Coastway Bancorp, Inc.',
'TESO': 'Tesco Corporation',
'AIV': 'Apartment Investment and Management Company',
'VCIT': 'Vanguard Intermediate-Term Corporate Bond ETF',
'TAOM': 'Taomee Holdings Limited',
'DLR-F': 'Digital Realty Trust, Inc.',
'O': 'Realty Income Corporation',
'MNE': 'Blackrock Muni New York Intermediate Duration Fund Inc',
'RGEN': 'Repligen Corporation',
'XRS': 'TAL Education Group',
'UCTT': 'Ultra Clean Holdings, Inc.',
'EPZM': 'Epizyme, Inc.',
'CLW': 'Clearwater Paper Corporation',
'BTG': 'B2Gold Corp',
'ALNY': 'Alnylam Pharmaceuticals, Inc.',
'AMOV': 'America Movil, S.A.B. de C.V.',
'SNTA': 'Synta Pharmaceuticals Corp.',
'SIVB': 'SVB Financial Group',
'BKS': 'Barnes & Noble, Inc.',
'MXE': 'Mexico Equity and Income Fund, Inc. (The)',
'TEAR': 'TearLab Corporation',
'ONCE': 'Spark Therapeutics, Inc.',
'PFD': 'Flaherty & Crumrine Preferred Income Fund Incorporated',
'MIK': 'The Michaels Companies, Inc.',
'LMCA': 'Liberty Media Corporation',
'AKBA': 'Akebia Therapeutics, Inc.',
'PDS': 'Precision Drilling Corporation',
'GULF': 'WisdomTree Middle East Dividend Fund',
'UNTD': 'United Online, Inc.',
'NM-H': 'Navios Maritime Holdings Inc.',
'MVCB': 'MVC Capital, Inc.',
'ESL': 'Esterline Technologies Corporation',
'ELMD': 'Electromed, Inc.',
'EDI': 'Stone Harbor Emerging Markets Total Income Fund',
'XHR': 'Xenia Hotels & Resorts, Inc.',
'HTCH': 'Hutchinson Technology Incorporated',
'CNNX': 'Cone Midstream Partners LP',
'EXLS': 'ExlService Holdings, Inc.',
'WEA': 'Western Asset Bond Fund',
'AGC': 'Advent Claymore Convertible Securities and Income Fund II',
'NEWS': 'NewStar Financial, Inc.',
'TLMR': 'Talmer Bancorp, Inc.',
'HYGS': 'Hydrogenics Corporation',
'ICPT': 'Intercept Pharmaceuticals, Inc.',
'KEN': 'Kenon Holdings Ltd.',
'PJS': 'Preferred Plus Trust Ser QWS 2 Tr Ctf',
'RBS-H': 'Royal Bank Scotland plc (The)',
'OCN': 'Ocwen Financial Corporation',
'BAC-A': 'Bank of America Corporation',
'CBMXW': 'CombiMatrix Corporation',
'TCPI': 'TCP International Holdings Ltd.',
'LHO': 'LaSalle Hotel Properties',
'CLRB': 'Cellectar Biosciences, Inc.',
'TRX': 'Tanzanian Royalty Exploration Corporation',
'TWTR': 'Twitter, Inc.',
'GGN': 'GAMCO Global Gold, Natural Reources & Income Trust ',
'AROW': 'Arrow Financial Corporation',
'ANY': 'Sphere 3D Corp.',
'CYBE': 'CyberOptics Corporation',
'LMOS': 'Lumos Networks Corp.',
'SIVBO': 'SVB Financial Group',
'PKBK': 'Parke Bancorp, Inc.',
'CNTF': 'China TechFaith Wireless Communication Technology Limited',
'TLT': 'iShares 20+ Year Treasury Bond ETF',
'MEET': 'MeetMe, Inc.',
'SBW': 'Western Asset Worldwide Income Fund Inc.',
'NTRSP': 'Northern Trust Corporation',
'HR': 'Healthcare Realty Trust Incorporated',
'USB-N': 'U.S. Bancorp',
'WFC-R': 'Wells Fargo & Company',
'PNQI': 'PowerShares Nasdaq Internet Portfolio',
'NATL': 'National Interstate Corporation',
'STPP': 'region',
'AAAP': 'Advanced Accelerator Applications S.A.',
'PN': 'Patriot National, Inc.',
'TRMK': 'Trustmark Corporation',
'ADHD': 'Alcobra Ltd.',
'PSA-A': 'Public Storage',
'CFCOU': 'CF Corporation',
'SASR': 'Sandy Spring Bancorp, Inc.',
'CFRXW': 'ContraFect Corporation',
'SGU': 'Star Gas Partners, L.P.',
'GTIM': 'Good Times Restaurants Inc.',
'MSG': 'MSG Networks Inc.',
'EIA': 'Eaton Vance California Municipal Bond Fund II',
'RDI': 'Reading International Inc',
'ZSAN': 'Zosano Pharma Corporation',
'GCH': 'Aberdeen Greater China Fund, Inc.',
'MYRG': 'MYR Group, Inc.',
'PRE-H': 'PartnerRe Ltd.',
'FCEL': 'FuelCell Energy, Inc.',
'GPL': 'Great Panther Silver Limited',
'AEB': 'Aegon NV',
'BLJ': 'Blackrock New Jersey Municipal Bond Trust',
'CHSCO': 'CHS Inc',
'JYNT': 'The Joint Corp.',
'PTN': 'Palatin Technologies, Inc.',
'WPPGY': 'WPP plc',
'VICL': 'Vical Incorporated',
'BSL': 'Blackstone GSO Senior Floating Rate Term Fund',
'CPIX': 'Cumberland Pharmaceuticals Inc.',
'LGF': 'Lions Gate Entertainment Corporation',
'BAC-C': 'Bank of America Corporation',
'FLTX': 'Fleetmatics Group PLC',
'CJJD': 'China Jo-Jo Drugstores, Inc.',
'RBS-F': 'Royal Bank Scotland plc (The)',
'AIR': 'AAR Corp.',
'FINL': 'The Finish Line, Inc.',
'WSTG': 'Wayside Technology Group, Inc.',
'MMLP': 'Martin Midstream Partners L.P.',
'IPAR': 'Inter Parfums, Inc.',
'CYOU': 'Changyou.com Limited',
'CATY': 'Cathay General Bancorp',
'EBAYL': 'eBay Inc.',
'MDLZ': 'Mondelez International, Inc.',
'MNST': 'Monster Beverage Corporation',
'COH': 'Coach, Inc.',
'NCI': 'Navigant Consulting, Inc.',
'PRI': 'Primerica, Inc.',
'FOSL': 'Fossil Group, Inc.',
'RRR': 'Red Rock Resorts, Inc.',
'MRTX': 'Mirati Therapeutics, Inc.',
'PSA-C': 'Public Storage',
'VLP': 'Valero Energy Partners LP',
'TREX': 'Trex Company, Inc.',
'CFG': 'Citizens Financial Group, Inc.',
'NFLX': 'Netflix, Inc.',
'XNY': 'China Xiniya Fashion Limited',
'TRC': 'Tejon Ranch Co',
'RAS-B': 'RAIT Financial Trust',
'TDW': 'Tidewater Inc.',
'GPK': 'Graphic Packaging Holding Company',
'LVS': 'Las Vegas Sands Corp.',
'STI.WS.A': 'SunTrust Banks, Inc.',
'NXEOU': 'Nexeo Solutions, Inc.',
'BKT': 'BlackRock Income Trust Inc. (The)',
'BGX': 'Blackstone GSO Long Short Credit Income Fund',
'ICLN': 'iShares S&P Global Clean Energy Index Fund',
'MAS': 'Masco Corporation',
'INAP': 'Internap Corporation',
'DCUC': 'Dominion Resources, Inc.',
'PYPL': 'PayPal Holdings, Inc.',
'IGD': 'Voya Global Equity Dividend and Premium Opportunity Fund',
'INN-C': 'Summit Hotel Properties, Inc.',
'CPSS': 'Consumer Portfolio Services, Inc.',
'HYH': 'Halyard Health, Inc.',
'DVD': 'Dover Motorsports, Inc.',
'UTES': 'Reaves Utilities ETF',
'SPHS': 'Sophiris Bio, Inc.',
'TCCA': 'Triangle Capital Corporation',
'AFW': 'American Financial Group, Inc.',
'GFN': 'General Finance Corporation',
'GDDY': 'GoDaddy Inc.',
'WKHS': 'Workhorse Group, Inc.',
'CTIC': 'CTI BioPharma Corp.',
'ARC': 'ARC Document Solutions, Inc.',
'DMRC': 'Digimarc Corporation',
'GRMN': 'Garmin Ltd.',
'CZNC': 'Citizens & Northern Corp',
'FSCFL': 'Fifth Street Finance Corp.',
'CHSCM': 'CHS Inc',
'ONTX': 'Onconova Therapeutics, Inc.',
'SWZ': 'Swiss Helvetia Fund, Inc. (The)',
'ARAY': 'Accuray Incorporated',
'LOPE': 'Grand Canyon Education, Inc.',
'LBIX': 'Leading Brands Inc',
'CRIS': 'Curis, Inc.',
'DGICB': 'Donegal Group, Inc.',
'ACIA': 'Acacia Communications, Inc.',
'HAWK': 'Blackhawk Network Holdings, Inc.',
'VR': 'Validus Holdings, Ltd.',
'NTRS': 'Northern Trust Corporation',
'BRO': 'Brown & Brown, Inc.',
'AHT-D': 'Ashford Hospitality Trust Inc',
'WRI': 'Weingarten Realty Investors',
'PAYC': 'Paycom Software, Inc.',
'WCIC': 'WCI Communities, Inc.',
'EE': 'El Paso Electric Company',
'FPP': 'FieldPoint Petroleum Corporation',
'GLAD': 'Gladstone Capital Corporation',
'OESX': 'Orion Energy Systems, Inc.',
'MNKD': 'MannKind Corporation',
'IGF': 'iShares Global Infrastructure ETF',
'LNCE': 'Snyder\'s-Lance, Inc.',
'CRMT': 'America\'s Car-Mart, Inc.',
'UBP': 'Urstadt Biddle Properties Inc.',
'SMBK': 'SmartFinancial, Inc.',
'VOD': 'Vodafone Group Plc',
'RDIB': 'Reading International Inc',
'CRT': 'Cross Timbers Royalty Trust',
'CCZ': 'Comcast Corporation',
'AVXS': 'AveXis, Inc.',
'OMER': 'Omeros Corporation',
'PRO': 'PROS Holdings, Inc.',
'RLGT-A': 'Radiant Logistics, Inc.',
'FRSH': 'Papa Murphy\'s Holdings, Inc.',
'OHI': 'Omega Healthcare Investors, Inc.',
'GBX': 'Greenbrier Companies, Inc. (The)',
'CIGI': 'Colliers International Group Inc. ',
'FNTC': 'FinTech Acquisition Corp.',
'OPB': 'Opus Bank',
'BSTG': 'Biostage, Inc.',
'CYNA': 'Cynapsus Therapeutics Inc.',
'BK': 'Bank Of New York Mellon Corporation (The)',
'AMKR': 'Amkor Technology, Inc.',
'NICK': 'Nicholas Financial, Inc.',
'FORD': 'Forward Industries, Inc.',
'SNY': 'Sanofi',
'CCLP': 'CSI Compressco LP',
'CNLM': 'CB Pharma Acquisition Corp.',
'PER': 'SandRidge Permian Trust',
'V': 'Visa Inc.',
'EOCA': 'Endesa Americas S.A.',
'SAFM': 'Sanderson Farms, Inc.',
'ACST': 'Acasti Pharma, Inc.',
'ABTL': 'Autobytel Inc.',
'GLPI': 'Gaming and Leisure Properties, Inc.',
'ICD': 'Independence Contract Drilling, Inc.',
'BAC-Z': 'Bank of America Corporation',
'ELSE': 'Electro-Sensors, Inc.',
'ORAN': 'Orange',
'SIFY': 'Sify Technologies Limited',
'CTB': 'Cooper Tire & Rubber Company',
'MAYS': 'J. W. Mays, Inc.',
'KEYS': 'Keysight Technologies Inc.',
'APEI': 'American Public Education, Inc.',
'EXFO': 'EXFO Inc',
'ROKA': 'Roka Bioscience, Inc.',
'LC': 'LendingClub Corporation',
'NOG': 'Northern Oil and Gas, Inc.',
'NBRV': 'Nabriva Therapeutics AG',
'GRSHW': 'Gores Holdings, Inc.',
'ENZ': 'Enzo Biochem, Inc.',
'CMA': 'Comerica Incorporated',
'PAAC': 'Pacific Special Acquisition Corp.',
'IRR': 'Voya Natural Resources Equity Income Fund',
'GLBS': 'Globus Maritime Limited',
'UBFO': 'United Security Bancshares',
'FC': 'Franklin Covey Company',
'FSI': 'Flexible Solutions International Inc.',
'FCAN': 'First Trust Canada AlphaDEX Fund',
'EMO': 'ClearBridge Energy MLP Opportunity Fund Inc.',
'PETS': 'PetMed Express, Inc.',
'TCRX': 'THL Credit, Inc.',
'GDV-A': 'Gabelli Dividend',
'UNF': 'Unifirst Corporation',
'SSN': 'Samson Oil & Gas Limited',
'SZC': 'Cushing Renaissance Fund (The)',
'FONE': 'First Trust NASDAQ Smartphone Index Fund',
'RMCF': 'Rocky Mountain Chocolate Factory, Inc.',
'ORBK': 'Orbotech Ltd.',
'SQM': 'Sociedad Quimica y Minera S.A.',
'FRC-B': 'FIRST REPUBLIC BANK',
'TCON': 'TRACON Pharmaceuticals, Inc.',
'FWRD': 'Forward Air Corporation',
'CNCE': 'Concert Pharmaceuticals, Inc.',
'PDI': 'PIMCO Dynamic Income Fund',
'CVGW': 'Calavo Growers, Inc.',
'FIX': 'Comfort Systems USA, Inc.',
'DUC': 'Duff & Phelps Utility & Corporate Bond Trust, Inc.',
'WPG': 'WP Glimcher Inc.',
'VUZI': 'Vuzix Corporation',
'RFIL': 'RF Industries, Ltd.',
'TDI': 'Telephone and Data Systems, Inc.',
'MESG': 'Xura, Inc.',
'OCRX': 'Ocera Therapeutics, Inc.',
'ZB-A': 'Zions Bancorporation',
'HUM': 'Humana Inc.',
'ERB': 'ERBA Diagnostics, Inc.',
'SRLP': 'Sprague Resources LP',
'CACB': 'Cascade Bancorp',
'LTXB': 'LegacyTexas Financial Group, Inc.',
'CR': 'Crane Company',
'ROVI': 'Rovi Corporation',
'NBL': 'Noble Energy Inc.',
'SHLD': 'Sears Holdings Corporation',
'ORLY': 'O\'Reilly Automotive, Inc.',
'HSGX': 'Histogenics Corporation',
'JWN': 'Nordstrom, Inc.',
'PDCE': 'PDC Energy, Inc.',
'EDD': 'Morgan Stanley Emerging Markets Domestic Debt Fund, Inc.',
'PF': 'Pinnacle Foods, Inc.',
'EMN': 'Eastman Chemical Company',
'CVU': 'CPI Aerostructures, Inc.',
'WRB-D': 'W.R. Berkley Corporation',
'MXIM': 'Maxim Integrated Products, Inc.',
'LNN': 'Lindsay Corporation',
'WRLD': 'World Acceptance Corporation',
'VCO': 'Vina Concha Y Toro',
'CTZ': 'Qwest Corporation',
'CIB': 'BanColombia S.A.',
'NEON': 'Neonode Inc.',
'KKR': 'KKR & Co. L.P.',
'IVR': 'INVESCO MORTGAGE CAPITAL INC',
'TNK': 'Teekay Tankers Ltd.',
'ZEUS': 'Olympic Steel, Inc.',
'HII': 'Huntington Ingalls Industries, Inc.',
'BANX': 'StoneCastle Financial Corp',
'ZNH': 'China Southern Airlines Company Limited',
'CATH': 'Global X S&P 500 Catholic Values ETF',
'PIR': 'Pier 1 Imports, Inc.',
'MDIV': 'First Trust Multi-Asset Diversified Income Index Fund',
'TCP': 'TC PipeLines, LP',
'CKEC': 'Carmike Cinemas, Inc.',
'MMI': 'Marcus & Millichap, Inc.',
'NTLA': 'Intellia Therapeutics, Inc.',
'BUD': 'Anheuser-Busch Inbev SA',
'SDPI': 'Superior Drilling Products, Inc.',
'MED': 'MEDIFAST INC',
'PEBK': 'Peoples Bancorp of North Carolina, Inc.',
'WFC-O': 'Wells Fargo & Company',
'GZT': 'Gazit-Globe Ltd.',
'FRPH': 'FRP Holdings, Inc.',
'AC': 'Associated Capital Group, Inc.',
'GSJ': 'Goldman Sachs Group, Inc. (The)',
'MNRO': 'Monro Muffler Brake, Inc.',
'LE': 'Lands\' End, Inc.',
'AUO': 'AU Optronics Corp',
'HK': 'Halcon Resources Corporation',
'IRET-B': 'Investors Real Estate Trust',
'CHS': 'Chico\'s FAS, Inc.',
'TCCO': 'Technical Communications Corporation',
'AG': 'First Majestic Silver Corp.',
'BPFHW': 'Boston Private Financial Holdings, Inc.',
'SCHN': 'Schnitzer Steel Industries, Inc.',
'CYTK': 'Cytokinetics, Incorporated',
'EDIT': 'Editas Medicine, Inc.',
'UCBI': 'United Community Banks, Inc.',
'CNP': 'CenterPoint Energy, Inc.',
'CCNE': 'CNB Financial Corporation',
'ILMN': 'Illumina, Inc.',
'SPA': 'Sparton Corporation',
'OPTT': 'Ocean Power Technologies, Inc.',
'MSN': 'Emerson Radio Corporation',
'PIP': 'PharmAthene, Inc',
'KAI': 'Kadant Inc',
'JMEI': 'Jumei International Holding Limited',
'DISCA': 'Discovery Communications, Inc.',
'NRCIB': 'National Research Corporation',
'MSD': 'Morgan Stanley Emerging Markets Debt Fund, Inc.',
'UBIC': 'UBIC, Inc.',
'CALA': 'Calithera Biosciences, Inc.',
'TDC': 'Teradata Corporation',
'AGN-A': 'Allergan plc.',
'BCO': 'Brink\'s Company (The)',
'NXC': 'Nuveen Insured California Select Tax-Free Income Portfolio',
'CLGX': 'CoreLogic, Inc.',
'FSNN': 'Fusion Telecommunications International, Inc.',
'FSLR': 'First Solar, Inc.',
'HAFC': 'Hanmi Financial Corporation',
'AHL-C': 'Aspen Insurance Holdings Limited',
'SAGE': 'Sage Therapeutics, Inc.',
'ZBIO': 'ProShares UltraPro Short NASDAQ Biotechnology',
'LRAD': 'LRAD Corporation',
'IIN': 'IntriCon Corporation',
'CFNL': 'Cardinal Financial Corporation',
'NG': 'Novagold Resources Inc.',
'SSH': 'Sunshine Heart Inc',
'SCE-E': 'Southern California Edison Company',
'SSWN': 'Seaspan Corporation',
'ORIG': 'Ocean Rig UDW Inc.',
'ATLO': 'Ames National Corporation',
'TPZ': 'Tortoise Power and Energy Infrastructure Fund, Inc',
'PLPM': 'Planet Payment, Inc.',
'SSFN': 'Stewardship Financial Corp',
'HCACW': 'Hennessy Capital Acquisition Corp. II',
'BTI': 'British American Tobacco p.l.c.',
'PGTI': 'PGT, Inc.',
'TACT': 'TransAct Technologies Incorporated',
'MUR': 'Murphy Oil Corporation',
'ENG': 'ENGlobal Corporation',
'CREG': 'China Recycling Energy Corporation',
'ANH': 'Anworth Mortgage Asset Corporation',
'EVV': 'Eaton Vance Limited Duration Income Fund',
'SUN': 'Sunoco LP',
'FPO-A.CL': 'First Potomac Realty Trust',
'TST': 'TheStreet, Inc.',
'ANCB': 'Anchor Bancorp',
'C-N': 'Citigroup Inc.',
'MVO': 'MV Oil Trust',
'FMO': 'Fiduciary/Claymore MLP Opportunity Fund',
'FIS': 'Fidelity National Information Services, Inc.',
'VYMI': 'Vanguard International High Dividend Yield ETF',
'EVBS': 'Eastern Virginia Bankshares, Inc.',
'ASNA': 'Ascena Retail Group, Inc.',
'ARP': 'Atlas Resource Partners, L.P.',
'GEQ': 'Guggenheim Equal Weight Enhanced Equity Income Fund',
'SOHO': 'Sotherly Hotels Inc.',
'APTO': 'Aptose Biosciences, Inc.',
'PEBO': 'Peoples Bancorp Inc.',
'CBFV': 'CB Financial Services, Inc.',
'HSII': 'Heidrick & Struggles International, Inc.',
'RHP': 'Ryman Hospitality Properties, Inc.',
'F': 'Ford Motor Company',
'IDXX': 'IDEXX Laboratories, Inc.',
'TPVZ': 'TriplePoint Venture Growth BDC Corp.',
'DNP': 'Duff & Phelps Utilities Income, Inc.',
'RCMT': 'RCM Technologies, Inc.',
'FMBI': 'First Midwest Bancorp, Inc.',
'FCF': 'First Commonwealth Financial Corporation',
'BID': 'Sotheby\'s',
'PPT': 'Putnam Premier Income Trust',
'AGYS': 'Agilysys, Inc.',
'GBDC': 'Golub Capital BDC, Inc.',
'VGZ': 'Vista Gold Corporation',
'CSLT': 'Castlight Health, inc.',
'HCN': 'Welltower Inc.',
'TMUS': 'T-Mobile US, Inc.',
'CDNS': 'Cadence Design Systems, Inc.',
'CMTL': 'Comtech Telecommunications Corp.',
'PSA-W': 'Public Storage',
'FCE.A': 'Forest City Realty Trust, Inc.',
'PFIE': 'Profire Energy, Inc.',
'ZIONW': 'Zions Bancorporation',
'DD': 'E.I. du Pont de Nemours and Company',
'WRN': 'Western Copper and Gold Corporation',
'WYIGW': 'JM Global Holding Company',
'VCV': 'Invesco California Value Municipal Income Trust',
'FFWM': 'First Foundation Inc.',
'VIAV': 'Viavi Solutions Inc.',
'JNPR': 'Juniper Networks, Inc.',
'UBND': 'WisdomTree Western Asset Unconstrained Bond Fund',
'AIT': 'Applied Industrial Technologies, Inc.',
'GNBC': 'Green Bancorp, Inc.',
'CRH': 'CRH PLC',
'HZNP': 'Horizon Pharma plc',
'RNG': 'Ringcentral, Inc.',
'STKS': 'The ONE Group Hospitality, Inc.',
'FNF': 'Fidelity National Financial, Inc.',
'MEI': 'Methode Electronics, Inc.',
'TINY': 'Harris & Harris Group, Inc.',
'MTX': 'Minerals Technologies Inc.',
'VIRT': 'Virtu Financial, Inc.',
'WAFDW': 'Washington Federal, Inc.',
'CETX': 'Cemtrex Inc.',
'ELOS': 'Syneron Medical Ltd.',
'IEUS': 'iShares MSCI Europe Small-Cap ETF',
'PMD': 'Psychemedics Corporation',
'WASH': 'Washington Trust Bancorp, Inc.',
'MSCC': 'Microsemi Corporation',
'EQFN': 'Equitable Financial Corp.',
'CNTY': 'Century Casinos, Inc.',
'WIRE': 'Encore Wire Corporation',
'EVBN': 'Evans Bancorp, Inc.',
'IRET-': 'Investors Real Estate Trust',
'MATX': 'Matson, Inc.',
'REXI': 'Resource America, Inc.',
'TPVG': 'TriplePoint Venture Growth BDC Corp.',
'PSMT': 'PriceSmart, Inc.',
'ENFC': 'Entegra Financial Corp.',
'CPK': 'Chesapeake Utilities Corporation',
'CZFC': 'Citizens First Corporation',
'ISG': 'ING Group, N.V.',
'KRG': 'Kite Realty Group Trust',
'WHFBL': 'WhiteHorse Finance, Inc.',
'TECD': 'Tech Data Corporation',
'OSIR': 'Osiris Therapeutics, Inc.',
'PRKR': 'ParkerVision, Inc.',
'EMCB': 'WisdomTree Emerging Markets Corporate Bond Fund',
'AHP-B': 'Ashford Hospitality Prime, Inc.',
'PPG': 'PPG Industries, Inc.',
'NOM': 'Nuveen Missouri Premium Income Municipal Fund',
'LILA': 'Liberty Global plc',
'UUU': 'Universal Security Instruments, Inc.',
'AVHI': 'A V Homes, Inc.',
'GJV': 'Synthetic Fixed-Income Securities, Inc.',
'CATYW': 'Cathay General Bancorp',
'NH': 'NantHealth, Inc.',
'DRQ': 'Dril-Quip, Inc.',
'LDRH': 'LDR Holding Corporation',
'WAFD': 'Washington Federal, Inc.',
'MWA': 'MUELLER WATER PRODUCTS',
'IAE': 'Voya Asia Pacific High Dividend Equity Income Fund',
'TSN': 'Tyson Foods, Inc.',
'LDL': 'Lydall, Inc.',
'CNLMU': 'CB Pharma Acquisition Corp.',
'AMDA': 'Amedica Corporation',
'MFIN': 'Medallion Financial Corp.',
'AGNCB': 'American Capital Agency Corp.',
'JCTCF': 'Jewett-Cameron Trading Company',
'EARN': 'Ellington Residential Mortgage REIT',
'COVS': 'Covisint Corporation',
'MUC': 'Blackrock MuniHoldings California Quality Fund, Inc.',
'PKX': 'POSCO',
'BOBE': 'Bob Evans Farms, Inc.',
'VIIZ': 'region',
'DMB': 'Dreyfus Municipal Bond Infrastructure Fund, Inc.',
'CRHM': 'CRH Medical Corporation',
'GM': 'General Motors Company',
'WEN': 'Wendy\'s Company (The)',
'CVBF': 'CVB Financial Corporation',
'FDX': 'FedEx Corporation',
'AMTD': 'TD Ameritrade Holding Corporation',
'AFGH': 'American Financial Group, Inc.',
'CXO': 'Concho Resources Inc.',
'VIGI': 'Vanguard International Dividend Appreciation ETF',
'CREE': 'Cree, Inc.',
'MANT': 'ManTech International Corporation',
'AGFS': 'AgroFresh Solutions, Inc.',
'RUTH': 'Ruth\'s Hospitality Group, Inc.',
'EQCO': 'Equity Commonwealth',
'CFI': 'Culp, Inc.',
'HMC': 'Honda Motor Company, Ltd.',
'WFE-A': 'Wells Fargo & Company',
'MACK': 'Merrimack Pharmaceuticals, Inc.',
'H': 'Hyatt Hotels Corporation',
'VGIT': 'Vanguard Intermediate -Term Government Bond ETF',
'TUMI': 'Tumi Holdings, Inc.',
'ABIO': 'ARCA biopharma, Inc.',
'EXTN': 'Exterran Corporation',
'OBCI': 'Ocean Bio-Chem, Inc.',
'CFR': 'Cullen/Frost Bankers, Inc.',
'GCBC': 'Greene County Bancorp, Inc.',
'DKL': 'Delek Logistics Partners, L.P.',
'GLYC': 'GlycoMimetics, Inc.',
'BYFC': 'Broadway Financial Corporation',
'CORE': 'Core-Mark Holding Company, Inc.',
'SHSP': 'SharpSpring, Inc.',
'PSF': 'Cohen & Steers Select Preferred and Income Fund, Inc.',
'LALT': 'PowerShares Multi-Strategy Alternative Portfolio',
'ADTN': 'ADTRAN, Inc.',
'TSL': 'Trina Solar Limited',
'CNOB': 'ConnectOne Bancorp, Inc.',
'BIOC': 'Biocept, Inc.',
'XRA': 'Exeter Resource Corporation',
'MSM': 'MSC Industrial Direct Company, Inc.',
'STRS': 'Stratus Properties, Inc.',
'CYS-B': 'CYS Investments, Inc.',
'EEP': 'Enbridge Energy, L.P.',
'EVA': 'Enviva Partners, LP',
'ECC ': 'Eagle Point Credit Company Inc.',
'TKF': 'Turkish Investment Fund, Inc. (The)',
'GIII': 'G-III Apparel Group, LTD.',
'CBIO': 'Catalyst Biosciences, Inc. ',
'CHN': 'China Fund, Inc. (The)',
'STOR': 'STORE Capital Corporation',
'ACBI': 'Atlantic Capital Bancshares, Inc.',
'TX': 'Ternium S.A.',
'DMF': 'Dreyfus Municipal Income, Inc.',
'BMO': 'Bank Of Montreal',
'KWR': 'Quaker Chemical Corporation',
'PYDS': 'Payment Data Systems, Inc.',
'NTZ': 'Natuzzi, S.p.A.',
'SITE': 'SiteOne Landscape Supply, Inc.',
'IGLD': 'Internet Gold Golden Lines Ltd.',
'NSR': 'Neustar, Inc.',
'SGNL': 'Signal Genetics, Inc.',
'KTOVW': 'Kitov Pharamceuticals Holdings Ltd.',
'CW': 'Curtiss-Wright Corporation',
'SGM': 'Stonegate Mortgage Corporation',
'EDF': 'Stone Harbor Emerging Markets Income Fund',
'FOGO': 'Fogo de Chao, Inc.',
'NBD': 'Nuveen Build America Bond Opportunity Fund',
'TIPT': 'Tiptree Financial Inc.',
'ARIA': 'ARIAD Pharmaceuticals, Inc.',
'BCRX': 'BioCryst Pharmaceuticals, Inc.',
'GSIT': 'GSI Technology, Inc.',
'GRO': 'Agria Corporation',
'MINDP': 'Mitcham Industries, Inc.',
'JFC': 'JPMorgan China Region Fund, Inc.',
'EGP': 'EastGroup Properties, Inc.',
'SB': 'Safe Bulkers, Inc',
'BHACU': 'Barington/Hilco Acquisition Corp.',
'AST': 'Asterias Biotherapeutics, Inc.',
'PCYO': 'Pure Cycle Corporation',
'QSII': 'Quality Systems, Inc.',
'ETE': 'Energy Transfer Equity, L.P.',
'RBS-S': 'Royal Bank Scotland plc (The)',
'TOF': 'Tofutti Brands Inc.',
'GTY': 'Getty Realty Corporation',
'BAX': 'Baxter International Inc.',
'GLF': 'GulfMark Offshore, Inc.',
'VALE': 'VALE S.A.',
'CSRA': 'CSRA Inc.',
'SALM': 'Salem Media Group, Inc.',
'KYN': 'Kayne Anderson MLP Investment Company',
'TCRD': 'THL Credit, Inc.',
'HRB': 'H&R Block, Inc.',
'NTI': 'Northern Tier Energy LP',
'HLF': 'Herbalife LTD.',
'MER-M': 'Merrill Lynch & Co., Inc.',
'LBIO': 'Lion Biotechnologies, Inc.',
'ISTR': 'Investar Holding Corporation',
'USB-A': 'U.S. Bancorp',
'REFR': 'Research Frontiers Incorporated',
'HTLD': 'Heartland Express, Inc.',
'PSAU': 'PowerShares Global Gold & Precious Metals Portfolio',
'SLRA': 'Solar Capital Ltd.',
'WTT': 'Wireless Telecom Group, Inc.',
'HIVE': 'Aerohive Networks, Inc.',
'BPTH': 'Bio-Path Holdings, Inc.',
'LMHA': 'Legg Mason, Inc.',
'RARE': 'Ultragenyx Pharmaceutical Inc.',
'MCHP': 'Microchip Technology Incorporated',
'SMTX': 'SMTC Corporation',
'SIF': 'SIFCO Industries, Inc.',
'GAB-H': 'Gabelli Equity Trust, Inc. (The)',
'MVIS': 'Microvision, Inc.',
'KLRE': 'KLR Energy Acquisition Corp.',
'NMBL': 'Nimble Storage, Inc.',
'AKAO': 'Achaogen, Inc.',
'HPT-D': 'Hospitality Properties Trust',
'AWF': 'Alliance World Dollar Government Fund II',
'TVC': 'Tennessee Valley Authority',
'LDOS': 'Leidos Holdings, Inc.',
'FCLF': 'First Clover Leaf Financial Corp.',
'BNY': 'BlackRock New York Investment Quality Municipal Trust Inc. (Th',
'ICLDW': 'InterCloud Systems, Inc',
'EOS': 'Eaton Vance Enhanced Equity Income Fund II',
'ESRX': 'Express Scripts Holding Company',
'VLT': 'Invesco High Income Trust II',
'GJS': 'STRATS Trust',
'WD': 'Walker & Dunlop, Inc.',
'AFG': 'American Financial Group, Inc.',
'MNGA': 'MagneGas Corporation',
'QEP': 'QEP Resources, Inc.',
'BBK': 'Blackrock Municipal Bond Trust',
'ATHN': 'athenahealth, Inc.',
'TTI': 'Tetra Technologies, Inc.',
'GPIAW': 'GP Investments Acquisition Corp.',
'BBBY': 'Bed Bath & Beyond Inc.',
'ACC': 'American Campus Communities Inc',
'JPM-A': 'J P Morgan Chase & Co',
'RILY': 'B. Riley Financial, Inc.',
'OMI': 'Owens & Minor, Inc.',
'PGP': 'Pimco Global Stocksplus & Income Fund',
'MAN': 'ManpowerGroup',
'RBCAA': 'Republic Bancorp, Inc.',
'IHT': 'InnSuites Hospitality Trust',
'SCE-D': 'Southern California Edison Company',
'TK': 'Teekay Corporation',
'TRN': 'Trinity Industries, Inc.',
'TRT': 'Trio-Tech International',
'FAF': 'First American Corporation (The)',
'VVR': 'Invesco Senior Income Trust',
'MBRG': 'Middleburg Financial Corporation',
'PKG': 'Packaging Corporation of America',
'NIE': 'AllianzGI Equity & Convertible Income Fund',
'MLVF': 'Malvern Bancorp, Inc.',
'CBL-E': 'CBL & Associates Properties, Inc.',
'NBN': 'Northeast Bancorp',
'NL': 'NL Industries, Inc.',
'TDOC': 'Teladoc, Inc.',
'APTS': 'Preferred Apartment Communities, Inc.',
'GTE': 'Gran Tierra Energy Inc.',
'ITRI': 'Itron, Inc.',
'POR': 'Portland General Electric Company',
'ALK': 'Alaska Air Group, Inc.',
'FEX': 'First Trust Large Cap Core AlphaDEX Fund',
'TGD': 'Timmons Gold Corp',
'PCG-E': 'Pacific Gas & Electric Co.',
'CTU': 'Qwest Corporation',
'CAAS': 'China Automotive Systems, Inc.',
'SR': 'Spire Inc.',
'THGA': 'The Hanover Insurance Group, Inc.',
'MDP': 'Meredith Corporation',
'AMGN': 'Amgen Inc.',
'JKHY': 'Jack Henry & Associates, Inc.',
'CUI': 'CUI Global, Inc.',
'MTCH': 'Match Group, Inc.',
'JPI': 'Nuveen Preferred and Income Term Fund',
'NGHCO': 'National General Holdings Corp',
'GTS': 'Triple-S Management Corporation',
'QUNR': 'Qunar Cayman Islands Limited',
'PMC': 'Pharmerica Corporation',
'TTMI': 'TTM Technologies, Inc.',
'BJZ': 'Blackrock California Municipal 2018 Term Trust',
'BRT': 'BRT Realty Trust',
'DHT': 'DHT Holdings, Inc.',
'AVY': 'Avery Dennison Corporation',
'VNO': 'Vornado Realty Trust',
'VTR': 'Ventas, Inc.',
'CMCO': 'Columbus McKinnon Corporation',
'ININ': 'Interactive Intelligence Group, Inc.',
'ACM': 'AECOM',
'SSRI': 'Silver Standard Resources Inc.',
'MC': 'Moelis & Company',
'ITG': 'Investment Technology Group, Inc.',
'NCT': 'Newcastle Investment Corporation',
'GMS': 'GMS Inc.',
'BPK': 'Blackrock Municipal 2018 Term Trust',
'MAR': 'Marriott International',
'ALGN': 'Align Technology, Inc.',
'PLM': 'Polymet Mining Corp.',
'ED': 'Consolidated Edison Inc',
'CHFC': 'Chemical Financial Corporation',
'HEB': 'Hemispherx BioPharma, Inc.',
'SBNA': 'Scorpio Tankers Inc.',
'EHI': 'Western Asset Global High Income Fund Inc',
'TMHC': 'Taylor Morrison Home Corporation',
'ASA': 'ASA Gold and Precious Metals Limited',
'IMMY': 'Imprimis Pharmaceuticals, Inc.',
'INCR': 'INC Research Holdings, Inc.',
'DYN': 'Dynegy Inc.',
'SITO': 'SITO Mobile, Ltd.',
'CMS': 'CMS Energy Corporation',
'EGT': 'Entertainment Gaming Asia Incorporated',
'EFX': 'Equifax, Inc.',
'JW.A': 'John Wiley & Sons, Inc.',
'ROP': 'Roper Technologies, Inc.',
'DLTR': 'Dollar Tree, Inc.',
'LRN': 'K12 Inc',
'ELECU': 'Electrum Special Acquisition Corporation',
'AKO.A': 'Embotelladora Andina S.A.',
'CAPR': 'Capricor Therapeutics, Inc.',
'CDTI': 'Clean Diesel Technologies, Inc.',
'VTA': 'Invesco Credit Opportunities Fund',
'GAB-D': 'Gabelli Equity Trust, Inc. (The)',
'ASND': 'Ascendis Pharma A/S',
'GS-K': 'Goldman Sachs Group, Inc. (The)',
'STKL': 'SunOpta, Inc.',
'CSCO': 'Cisco Systems, Inc.',
'BBRG': 'Bravo Brio Restaurant Group, Inc.',
'CEV': 'Eaton Vance California Municipal Income Trust',
'DTK': 'Deutsche Bank AG',
'TVPT': 'Travelport Worldwide Limited',
'STN': 'Stantec Inc',
'ISBC': 'Investors Bancorp, Inc.',
'PBNC': 'Paragon Commercial Corporation',
'NEPT': 'Neptune Technologies & Bioresources Inc',
'GERN': 'Geron Corporation',
'TANN': 'TravelCenters of America LLC',
'MER-K': 'Merrill Lynch & Co., Inc.',
'DXLG': 'Destination XL Group, Inc.',
'GS-N': 'Goldman Sachs Group, Inc. (The)',
'EXG': 'Eaton Vance Tax-Managed Global Diversified Equity Income Fund',
'NYCB': 'New York Community Bancorp, Inc.',
'RP': 'RealPage, Inc.',
'MTT': 'Western Asset Municipal Defined Opportunity Trust Inc',
'SIRI': 'Sirius XM Holdings Inc.',
'CTL': 'CenturyLink, Inc.',
'EDE': 'Empire District Electric Company (The)',
'CODI': 'Compass Diversified Holdings',
'SHPG': 'Shire plc',
'SQQQ': 'ProShares UltraPro Short QQQ',
'UCFC': 'United Community Financial Corp.',
'GI': 'EndoChoice Holdings, Inc.',
'FRD': 'Friedman Industries Inc.',
'FMX': 'Fomento Economico Mexicano S.A.B. de C.V.',
'NTRI': 'NutriSystem Inc',
'RH': 'Restoration Hardware Holdings Inc.',
'AMAG': 'AMAG Pharmaceuticals, Inc.',
'NS': 'Nustar Energy L.P.',
'VSAR': 'Versartis, Inc.',
'EVK': 'Ever-Glory International Group, Inc.',
'THRM': 'Gentherm Inc',
'RFEM': 'First Trust RiverFront Dynamic Emerging Markets ETF',
'ENSV': 'ENSERVCO Corporation',
'BRN': 'Barnwell Industries, Inc.',
'NJR': 'NewJersey Resources Corporation',
'LWAY': 'Lifeway Foods, Inc.',
'BASI': 'Bioanalytical Systems, Inc.',
'SKY': 'Skyline Corporation',
'FSFR': 'Fifth Street Senior Floating Rate Corp.',
'CEE': 'Central Europe, Russia and Turkey Fund, Inc. (The)',
'EMD': 'Western Asset Emerging Markets Income Fund, Inc',
'SPP': 'Sanchez Production Partners LP',
'IP': 'International Paper Company',
'SSTK': 'Shutterstock, Inc.',
'PUB': 'People\'s Utah Bancorp',
'AAP': 'Advance Auto Parts Inc',
'MARK': 'Remark Media, Inc.',
'VJET': 'voxeljet AG',
'FDEF': 'First Defiance Financial Corp.',
'NEE-G': 'NextEra Energy, Inc.',
'HIHO': 'Highway Holdings Limited',
'PBPB': 'Potbelly Corporation',
'MNTA': 'Momenta Pharmaceuticals, Inc.',
'OI': 'Owens-Illinois, Inc.',
'WTFCW': 'Wintrust Financial Corporation',
'EPIX': 'ESSA Pharma Inc.',
'SPN': 'Superior Energy Services, Inc.',
'GSBD': 'Goldman Sachs BDC, Inc.',
'MCA': 'Blackrock MuniYield California Insured Fund, Inc.',
'RDUS': 'Radius Health, Inc.',
'NRZ': 'New Residential Investment Corp.',
'CUBI-E': 'Customers Bancorp, Inc',
'BRK.B': 'Berkshire Hathaway Inc.',
'MCO': 'Moody\'s Corporation',
'ZB-H': 'Zions Bancorporation',
'TCRZ': 'THL Credit, Inc.',
'IO': 'Ion Geophysical Corporation',
'SUI': 'Sun Communities, Inc.',
'MANU': 'Manchester United Ltd.',
'BDX': 'Becton, Dickinson and Company',
'OFIX': 'Orthofix International N.V.',
'WDAY': 'Workday, Inc.',
'VNOM': 'Viper Energy Partners LP',
'FVC': 'First Trust Dorsey Wright Dynamic Focus 5 ETF',
'TNP-B': 'Tsakos Energy Navigation Ltd',
'PAG': 'Penske Automotive Group, Inc.',
'FBNK': 'First Connecticut Bancorp, Inc.',
'EMCF': 'Emclaire Financial Corp',
'SCE-F': 'Southern California Edison Company',
'UBP-F': 'Urstadt Biddle Properties Inc.',
'QYLD': 'Recon Capital NASDAQ-100 Covered Call ETF',
'IEP': 'Icahn Enterprises L.P.',
'ALXA': 'Alexza Pharmaceuticals, Inc.',
'RDS.B': 'Royal Dutch Shell PLC',
'EMF': 'Templeton Emerging Markets Fund',
'MWO': 'Morgan Stanley',
'KNL': 'Knoll, Inc.',
'OSB': 'Norbord Inc.',
'WFD': 'Westfield Financial, Inc.',
'SUPN': 'Supernus Pharmaceuticals, Inc.',
'ACWX': 'iShares MSCI ACWI ex US Index Fund',
'HCCI': 'Heritage-Crystal Clean, Inc.',
'OPK': 'Opko Health Inc',
'SIGI': 'Selective Insurance Group, Inc.',
'BDN-E': 'Brandywine Realty Tr',
'TTHI': 'Transition Therapeutics, Inc.',
'NHS': 'Neuberger Berman High Yield Strategies Fund',
'SBBX': 'Sussex Bancorp',
'EHIC': 'eHi Car Services Limited',
'HTZ': 'Hertz Global Holdings, Inc',
'BXC': 'BlueLinx Holdings Inc.',
'SPNE': 'SeaSpine Holdings Corporation',
'QVCA': 'Liberty Interactive Corporation',
'TRCB': 'Two River Bancorp',
'BCH': 'Banco De Chile',
'XYL': 'Xylem Inc.',
'SFST': 'Southern First Bancshares, Inc.',
'WILC': 'G. Willi-Food International, Ltd.',
'LH': 'Laboratory Corporation of America Holdings',
'BANC-E': 'Banc of California, Inc.',
'EGY': 'Vaalco Energy Inc',
'STMP': 'Stamps.com Inc.',
'IDXG': 'Interpace Diagnostics Group, Inc.',
'ESEA': 'Euroseas Ltd.',
'NBY': 'NovaBay Pharmaceuticals, Inc.',
'RY-S': 'Royal Bank Of Canada',
'VFL': 'Delaware Investments Florida Insured Municipal Income Fund',
'DRWIW': 'DragonWave Inc',
'TTP': 'Tortoise Pipeline & Energy Fund, Inc.',
'ARRS': 'ARRIS International plc',
'GAB': 'Gabelli Equity Trust, Inc. (The)',
'CGI': 'Celadon Group, Inc.',
'FMBH': 'First Mid-Illinois Bancshares, Inc.',
'ETM': 'Entercom Communications Corporation',
'BCPC': 'Balchem Corporation',
'QCRH': 'QCR Holdings, Inc.',
'UFCS': 'United Fire Group, Inc',
'IL': 'IntraLinks Holdings, Inc.',
'FEIM': 'Frequency Electronics, Inc.',
'STAG-C': 'Stag Industrial, Inc.',
'HBAN': 'Huntington Bancshares Incorporated',
'EVGN': 'Evogene Ltd.',
'FRC': 'FIRST REPUBLIC BANK',
'BFR': 'BBVA Banco Frances S.A.',
'MCZ': 'Mad Catz Interactive Inc',
'DORM': 'Dorman Products, Inc.',
'AUMA': 'AR Capital Acquisition Corp.',
'CIVI': 'Civitas Solutions, Inc.',
'MS-F': 'Morgan Stanley',
'KELYB': 'Kelly Services, Inc.',
'JD': 'JD.com, Inc.',
'USB-O': 'U.S. Bancorp',
'ISHG': 'iShares S&P/Citigroup 1-3 Year International Treasury Bond Fun',
'FLKS': 'Flex Pharma, Inc.',
'BWINB': 'Baldwin & Lyons, Inc.',
'BYM': 'Blackrock Municipal Income Quality Trust',
'HDRAW': 'Hydra Industries Acquisition Corp.',
'AMT-A': 'American Tower Corporation (REIT)',
'HY': 'Hyster-Yale Materials Handling, Inc.',
'SN': 'Sanchez Energy Corporation',
'FRO': 'Frontline Ltd.',
'TRS': 'TriMas Corporation',
'WGO': 'Winnebago Industries, Inc.',
'FEUZ': 'First Trust Eurozone AlphaDEX ETF',
'TTGT': 'TechTarget, Inc.',
'ETP': 'ENERGY TRANSFER PARTNERS',
'UAL': 'United Continental Holdings, Inc.',
'HHY': 'Brookfield High Income Fund Inc.',
'TECH': 'Bio-Techne Corp',
'NSA': 'National Storage Affiliates Trust',
'MPX': 'Marine Products Corporation',
'YZC': 'Yanzhou Coal Mining Company Limited',
'BAC-I': 'Bank of America Corporation',
'CAPN': 'Capnia, Inc.',
'MBWM': 'Mercantile Bank Corporation',
'ESV': 'ENSCO plc',
'AUMAU': 'AR Capital Acquisition Corp.',
'MFS': 'Manitowoc Food Service, Inc.',
'AF-C': 'Astoria Financial Corporation',
'AIV-A': 'Apartment Investment and Management Company',
'PAHC': 'Phibro Animal Health Corporation',
'SCHW-D': 'The Charles Schwab Corporation',
'ZF': 'Zweig Fund, Inc. (The)',
'NPK': 'National Presto Industries, Inc.',
'UG': 'United-Guardian, Inc.',
'GM.WS.B': 'General Motors Company',
'BAC-L': 'Bank of America Corporation',
'EDGE': 'Edge Therapeutics, Inc.',
'TS': 'Tenaris S.A.',
'GBIM': 'GlobeImmune, Inc.',
'NBHC': 'National Bank Holdings Corporation',
'DLB': 'Dolby Laboratories',
'HCSG': 'Healthcare Services Group, Inc.',
'PBMD': 'Prima BioMed Ltd',
'DHR': 'Danaher Corporation',
'HAIN': 'The Hain Celestial Group, Inc.',
'TNAV': 'TeleNav, Inc.',
'SGYPU': 'Synergy Pharmaceuticals, Inc.',
'GAB-G': 'Gabelli Equity Trust, Inc. (The)',
'MICT': 'Micronet Enertec Technologies, Inc.',
'EROS': 'Eros International PLC',
'SUM': 'Summit Materials, Inc.',
'BPT': 'BP Prudhoe Bay Royalty Trust',
'B': 'Barnes Group, Inc.',
'EHTH': 'eHealth, Inc.',
'NMI': 'Nuveen Municipal Income Fund, Inc.',
'VNTV': 'Vantiv, Inc.',
'SCE-B': 'Southern California Edison Company',
'SB-B': 'Safe Bulkers, Inc',
'EIM': 'Eaton Vance Municipal Bond Fund',
'RECN': 'Resources Connection, Inc.',
'HMSY': 'HMS Holdings Corp',
'VIP': 'VimpelCom Ltd.',
'FTSL': 'First Trust Senior Loan Fund ETF',
'IMO': 'Imperial Oil Limited',
'TACO': 'Del Taco Restaurants, Inc.',
'NEWTZ': 'Newtek Business Services Corp.',
'BNCL': 'Beneficial Bancorp, Inc.',
'COTV': 'Cotiviti Holdings, Inc.',
'DIAX': 'Nuveen Dow 30SM Dynamic Overwrite Fund',
'CARZ': 'First Trust NASDAQ Global Auto Index Fund',
'WSFSL': 'WSFS Financial Corporation',
'FDP': 'Fresh Del Monte Produce, Inc.',
'RICE': 'Rice Energy Inc.',
'KIO': 'KKR Income Opportunities Fund',
'KITE': 'Kite Pharma, Inc.',
'HPT': 'Hospitality Properties Trust',
'SFR': 'Colony Starwood Homes',
'CRUS': 'Cirrus Logic, Inc.',
'LSXMA': 'Liberty Media Corporation',
'NTCT': 'NetScout Systems, Inc.',
'DFT': 'Dupont Fabros Technology, Inc.',
'SCSC': 'ScanSource, Inc.',
'WFC-J': 'Wells Fargo & Company',
'ASCMA': 'Ascent Capital Group, Inc.',
'TEF': 'Telefonica SA',
'UHAL': 'Amerco',
'CFNB': 'California First National Bancorp',
'MFD': 'Macquarie/First Trust Global',
'BELFB': 'Bel Fuse Inc.',
'IBTX': 'Independent Bank Group, Inc',
'NSSC': 'NAPCO Security Technologies, Inc.',
'FUR': 'Winthrop Realty Trust',
'HEOP': 'Heritage Oaks Bancorp',
'GTLS': 'Chart Industries, Inc.',
'FNX': 'First Trust Mid Cap Core AlphaDEX Fund',
'RELL': 'Richardson Electronics, Ltd.',
'AGX': 'Argan, Inc.',
'IHD': 'Voya Emerging Markets High Income Dividend Equity Fund',
'ATRS': 'Antares Pharma, Inc.',
'NWHM': 'New Home Company Inc. (The)',
'VISI': 'Volt Information Sciences, Inc.',
'MFA-B': 'MFA Financial, Inc.',
'EGAS': 'Gas Natural Inc.',
'NAVB': 'Navidea Biopharmaceuticals, Inc.',
'FEO': 'First Trust/Aberdeen Emerging Opportunity Fund',
'USEG': 'U.S. Energy Corp.',
'SGB': 'Southwest Georgia Financial Corporation',
'HTGM': 'HTG Molecular Diagnostics, Inc.',
'TGA': 'Transglobe Energy Corp',
'FLXN': 'Flexion Therapeutics, Inc.',
'TGEN': 'Tecogen Inc.',
'EAB': 'Entergy Arkansas, Inc.',
'RNVA': 'Rennova Health, Inc.',
'RIO': 'Rio Tinto Plc',
'MATW': 'Matthews International Corporation',
'PINC': 'Premier, Inc.',
'CBB': 'Cincinnati Bell Inc',
'HYLS': 'First Trust High Yield Long/Short ETF',
'ISRG': 'Intuitive Surgical, Inc.',
'FULL': 'Full Circle Capital Corporation',
'CPL': 'CPFL Energia S.A.',
'ABR-C': 'Arbor Realty Trust',
'SMSI': 'Smith Micro Software, Inc.',
'QURE': 'uniQure N.V.',
'CNLMR': 'CB Pharma Acquisition Corp.',
'HCHC': 'HC2 Holdings, Inc.',
'DXJS': 'WisdomTree Japan Hedged SmallCap Equity Fund',
'HURC': 'Hurco Companies, Inc.',
'HCAP': 'Harvest Capital Credit Corporation',
'ARCB': 'ArcBest Corporation',
'CFMS': 'ConforMIS, Inc.',
'PMV': 'PMV Acquisition Corp.',
'EGAN': 'eGain Corporation',
'VONV': 'Vanguard Russell 1000 Value ETF',
'SYMX': 'Synthesis Energy Systems, Inc.',
'VNO-L': 'Vornado Realty Trust',
'UAMY': 'United States Antimony Corporation',
'DB': 'Deutsche Bank AG',
'MDSO': 'Medidata Solutions, Inc.',
'BMRC': 'Bank of Marin Bancorp',
'NVGS': 'Navigator Holdings Ltd.',
'NXTD': 'NXT-ID Inc.',
'KLIC': 'Kulicke and Soffa Industries, Inc.',
'AVG': 'AVG Technologies N.V.',
'MTB.WS': 'M&T Bank Corporation',
'FRBK': 'Republic First Bancorp, Inc.',
'NYNY': 'Empire Resorts, Inc.',
'AIXG': 'Aixtron SE',
'IBN': 'ICICI Bank Limited',
'CVCY': 'Central Valley Community Bancorp',
'ASRVP': 'AmeriServ Financial Inc.',
'MDT': 'Medtronic plc',
'TIER': 'TIER REIT, Inc.',
'MMC': 'Marsh & McLennan Companies, Inc.',
'AAOI': 'Applied Optoelectronics, Inc.',
'YDKN': 'Yadkin Financial Corporation',
'AQXP': 'Aquinox Pharmaceuticals, Inc.',
'FSAM': 'Fifth Street Asset Management Inc.',
'NSIG': 'NeuroSigma, Inc.',
'BGE-B': 'Baltimore Gas & Electric Company',
'RMR': 'The RMR Group Inc.',
'THM': 'International Tower Hill Mines Ltd',
'RYN': 'Rayonier Inc.',
'PRGX': 'PRGX Global, Inc.',
'NZF': 'Nuveen Enhanced Municipal Credit Opportunities Fun',
'FFNW': 'First Financial Northwest, Inc.',
'MMT': 'MFS Multimarket Income Trust',
'TERP': 'TerraForm Power, Inc.',
'CCU': 'Compania Cervecerias Unidas, S.A.',
'BBT-H': 'BB&T Corporation',
'PCO': 'Pendrell Corporation',
'NAVI': 'Navient Corporation',
'NXPI': 'NXP Semiconductors N.V.',
'SORL': 'SORL Auto Parts, Inc.',
'GVP': 'GSE Systems, Inc.',
'JASN': 'Jason Industries, Inc.',
'CAF': 'Morgan Stanley China A Share Fund Inc.',
'VPG': 'Vishay Precision Group, Inc.',
'ICAD': 'icad inc.',
'TE': 'TECO Energy, Inc.',
'CAPX': 'Elkhorn S&P 500 Capital Expenditures Portfolio',
'EDUC': 'Educational Development Corporation',
'CERCW': 'Cerecor Inc.',
'BSD': 'BlackRock Strategic Municipal Trust Inc. (The)',
'WATT': 'Energous Corporation',
'QUIK': 'QuickLogic Corporation',
'CBAY': 'Cymabay Therapeutics Inc.',
'XLRN': 'Acceleron Pharma Inc.',
'NXTDW': 'NXT-ID Inc.',
'MIC': 'Macquarie Infrastructure Company',
'TRGP': 'Targa Resources, Inc.',
'MS-A': 'Morgan Stanley',
'EPR-F': 'EPR Properties',
'AMD': 'Advanced Micro Devices, Inc.',
'RAS-A': 'RAIT Financial Trust',
'LFC': 'China Life Insurance Company Limited',
'PHH': 'PHH Corp',
'IFGL': 'iShares FTSE EPRA/NAREIT Global Real Estate ex-U.S. Index Fund',
'GPP': 'Green Plains Partners LP',
'RELV': 'Reliv\' International, Inc.',
'BETR': 'Amplify Snack Brands, inc.',
'CTXS': 'Citrix Systems, Inc.',
'AI': 'Arlington Asset Investment Corp',
'AHPI': 'Allied Healthcare Products, Inc.',
'BFY': 'BlackRock New York Municipal Income Trust II',
'FUEL': 'Rocket Fuel Inc.',
'GNTX': 'Gentex Corporation',
'PGNX': 'Progenics Pharmaceuticals Inc.',
'WSR': 'Whitestone REIT',
'MNOV': 'MediciNova, Inc.',
'CHE': 'Chemed Corp.',
'CVE': 'Cenovus Energy Inc',
'OIBR': 'Oi S.A.',
'AMT-B': 'American Tower Corporation (REIT)',
'WYIG': 'JM Global Holding Company',
'ALL-B': 'Allstate Corporation (The)',
'GPRO': 'GoPro, Inc.',
'ULH': 'Universal Logistics Holdings, Inc.',
'ENZY ': 'Enzymotec Ltd.',
'VIPS': 'Vipshop Holdings Limited',
'SNPS': 'Synopsys, Inc.',
'ASTE': 'Astec Industries, Inc.',
'CPSI': 'Computer Programs and Systems, Inc.',
'MOC': 'Command Security Corporation',
'GIFI': 'Gulf Island Fabrication, Inc.',
'EQC-D': 'Equity Commonwealth',
'ABCO': 'The Advisory Board Company',
'REED': 'Reeds, Inc.',
'FNV': 'Franco-Nevada Corporation',
'SBNY': 'Signature Bank',
'LKOR': 'FlexShares Credit-Scored US Long Corporate Bond Index Fund',
'AXL': 'American Axle & Manufacturing Holdings, Inc.',
'GDV-D': 'Gabelli Dividend',
'AMH-D': 'American Homes 4 Rent',
'PBH': 'Prestige Brand Holdings, Inc.',
'CLX': 'Clorox Company (The)',
'GLPG': 'Galapagos NV',
'AIRM': 'Air Methods Corporation',
'LXFT': 'Luxoft Holding, Inc.',
'AMRS': 'Amyris, Inc.',
'DDBI': 'Legg Mason Developed EX-US Diversified Core ETF',
'ORRF': 'Orrstown Financial Services Inc',
'DLNG': 'Dynagas LNG Partners LP',
'AAPC': 'Atlantic Alliance Partnership Corp.',
'G': 'Genpact Limited',
'LMIA': 'LMI Aerospace, Inc.',
'EW': 'Edwards Lifesciences Corporation',
'SSL': 'Sasol Ltd.',
'CANF': 'Can-Fite Biopharma Ltd',
'NVLS': 'Nivalis Therapeutics, Inc.',
'CASM': 'CAS Medical Systems, Inc.',
'PW': 'Power REIT',
'MGNX': 'MacroGenics, Inc.',
'PSCF': 'PowerShares S&P SmallCap Financials Portfolio',
'IPB': 'Merrill Lynch & Co., Inc.',
'BECN': 'Beacon Roofing Supply, Inc.',
'UVE': 'UNIVERSAL INSURANCE HOLDINGS INC',
'GS-J': 'Goldman Sachs Group, Inc. (The)',
'CRF': 'Cornerstone Strategic Return Fund, Inc. (The)',
'CCCL': 'China Ceramics Co., Ltd.',
'PCAR': 'PACCAR Inc.',
'QLGC': 'QLogic Corporation',
'CHSCL': 'CHS Inc',
'TCB': 'TCF Financial Corporation',
'LSBK': 'Lake Shore Bancorp, Inc.',
'GHY': 'Prudential Global Short Duration High Yield Fund, Inc.',
'GPRK': 'Geopark Ltd',
'LION': 'Fidelity Southern Corporation',
'FORM': 'FormFactor, Inc.',
'OFG-B': 'OFG Bancorp',
'OXFD': 'Oxford Immunotec Global PLC',
'ACTS': 'Actions Semiconductor Co., Ltd.',
'CDZI': 'Cadiz, Inc.',
'RGNX': 'REGENXBIO Inc.',
'MMU': 'Western Asset Managed Municipals Fund, Inc.',
'ABIL': 'Ability Inc.',
'SGEN': 'Seattle Genetics, Inc.',
'DJCO': 'Daily Journal Corp. (S.C.)',
'HRZN': 'Horizon Technology Finance Corporation',
'PSTG': 'Pure Storage, Inc. ',
'NSS': 'NuStar Logistics, L.P.',
'CYTXW': 'Cytori Therapeutics Inc',
'WDR': 'Waddell & Reed Financial, Inc.',
'ATEN': 'A10 Networks, Inc.',
'NPV': 'Nuveen Virginia Premium Income Municipal Fund',
'FXCB': 'Fox Chase Bancorp, Inc.',
'DTRM': 'Determine, Inc. ',
'FII': 'Federated Investors, Inc.',
'CACQ': 'Caesars Acquisition Company',
'NYCB-U': 'New York Community Bancorp, Inc.',
'NOA': 'North American Energy Partners, Inc.',
'LFUS': 'Littelfuse, Inc.',
'JPC': 'Nuveen Preferred Income Opportunites Fund',
'ACTG': 'Acacia Research Corporation',
'A': 'Agilent Technologies, Inc.',
'MYI': 'Blackrock MuniYield Quality Fund III, Inc.',
'CM': 'Canadian Imperial Bank of Commerce',
'DVN': 'Devon Energy Corporation',
'JSYNW': 'Jensyn Acquistion Corp.',
'CRK': 'Comstock Resources, Inc.',
'GJH': 'STRATS Trust',
'KIM-K': 'Kimco Realty Corporation',
'VNO-G': 'Vornado Realty Trust',
'KMI': 'Kinder Morgan, Inc.',
'KEYW': 'The KEYW Holding Corporation',
'CYCC': 'Cyclacel Pharmaceuticals, Inc.',
'FLAT': 'region',
'RTN': 'Raytheon Company',
'NTWK': 'NetSol Technologies Inc.',
'WHR': 'Whirlpool Corporation',
'PRTO': 'Proteon Therapeutics, Inc.',
'NVIV': 'InVivo Therapeutics Holdings Corp.',
'QGEN': 'Qiagen N.V.',
'CMT': 'Core Molding Technologies Inc',
'GFED': 'Guaranty Federal Bancshares, Inc.',
'MX': 'MagnaChip Semiconductor Corporation',
'LZB': 'La-Z-Boy Incorporated',
'ZLIG': 'Aperion Biologics, Inc.',
'MIRN': 'Mirna Therapeutics, Inc.',
'AGI': 'Alamos Gold Inc.',
'MINI': 'Mobile Mini, Inc.',
'PZN': 'Pzena Investment Management Inc',
'NMR': 'Nomura Holdings Inc ADR',
'AGO': 'Assured Guaranty Ltd.',
'BW': 'Babcock',
'KBSF': 'KBS Fashion Group Limited',
'UHS': 'Universal Health Services, Inc.',
'WMAR': 'West Marine, Inc.',
'SYUT': 'Synutra International, Inc.',
'QUMU': 'Qumu Corporation',
'KWEB': 'KraneShares CSI China Internet ETF',
'UBSH': 'Union Bankshares Corporation',
'NETE': 'Net Element, Inc.',
'BF.B': 'Brown Forman Corporation',
'GTN.A': 'Gray Television, Inc.',
'TCB.WS': 'TCF Financial Corporation',
'MSK': 'Morgan Stanley',
'ZNWAA': 'Zion Oil & Gas Inc',
'YUM': 'Yum! Brands, Inc.',
'NFBK': 'Northfield Bancorp, Inc.',
'WOOD': 'iShares S&P Global Timber & Forestry Index Fund',
'HYZD': 'WisdomTree BofA Merrill Lynch High Yield Bond Zero Duration Fu',
'AWI': 'Armstrong World Industries Inc',
'ECPG': 'Encore Capital Group Inc',
'HUSI-F.CL': 'HSBC USA, Inc.',
'TSS': 'Total System Services, Inc.',
'ADK': 'Adcare Health Systems Inc',
'KO': 'Coca-Cola Company (The)',
'CDR': 'Cedar Realty Trust, Inc.',
'PPP': 'Primero Mining Corp',
'CCCR': 'China Commercial Credit, Inc.',
'SMTC': 'Semtech Corporation',
'GPIA': 'GP Investments Acquisition Corp.',
'JKS': 'JinkoSolar Holding Company Limited',
'CONN': 'Conn\'s, Inc.',
'CLWT': 'Euro Tech Holdings Company Limited',
'TRIP': 'TripAdvisor, Inc.',
'FLWS': '1-800 FLOWERS.COM, Inc.',
'FRC-E': 'FIRST REPUBLIC BANK',
'SOXX': 'iShares PHLX SOX Semiconductor Sector Index Fund',
'DDE': 'Dover Downs Gaming & Entertainment Inc',
'EOI': 'Eaton Vance Enhance Equity Income Fund',
'NVCR': 'NovoCure Limited',
'VRTX': 'Vertex Pharmaceuticals Incorporated',
'GBCI': 'Glacier Bancorp, Inc.',
'LIOX': 'Lionbridge Technologies, Inc.',
'HDRAU': 'Hydra Industries Acquisition Corp.',
'FTRI': 'First Trust Indxx Global Natural Resources Income ETF',
'PRH': 'Prudential Financial, Inc.',
'NSC': 'Norfolk Souther Corporation',
'TYL': 'Tyler Technologies, Inc.',
'ALR-B': 'Alere Inc.',
'GURE': 'Gulf Resources, Inc.',
'MGIC': 'Magic Software Enterprises Ltd.',
'PARN': 'Parnell Pharmaceuticals Holdings Ltd',
'BBW': 'Build-A-Bear Workshop, Inc.',
'RT': 'Ruby Tuesday, Inc.',
'SIG': 'Signet Jewelers Limited',
'FNFG': 'First Niagara Financial Group Inc.',
'WFC': 'Wells Fargo & Company',
'USB': 'U.S. Bancorp',
'TSLX': 'TPG Specialty Lending, Inc.',
'MYCC': 'ClubCorp Holdings, Inc.',
'CDR-B': 'Cedar Realty Trust, Inc.',
'RDC': 'Rowan Companies plc',
'NADL': 'North Atlantic Drilling Ltd.',
'SFE': 'Safeguard Scientifics, Inc.',
'DGX': 'Quest Diagnostics Incorporated',
'ARIS': 'ARI Network Services, Inc.',
'DUK': 'Duke Energy Corporation',
'QTWO': 'Q2 Holdings, Inc.',
'AZN': 'Astrazeneca PLC',
'WSTL': 'Westell Technologies, Inc.',
'SPSC': 'SPS Commerce, Inc.',
'PRA': 'ProAssurance Corporation',
'SEED': 'Origin Agritech Limited',
'GRID': 'First Trust NASDAQ Clean Edge Smart Grid Infrastructure Index ',
'APHB': 'AmpliPhi Biosciences Corporation',
'AFCO': 'American Farmland Company',
'ESBK': 'Elmira Savings Bank NY (The)',
'HE-U': 'Hawaiian Electric Industries, Inc.',
'ATV': 'Acorn International, Inc.',
'PSA-Z': 'Public Storage',
'SMRT': 'Stein Mart, Inc.',
'PHT': 'Pioneer High Income Trust',
'CIBR': 'First Trust NASDAQ Cybersecurity ETF',
'HTF': 'Horizon Technology Finance Corporation',
'DENN': 'Denny\'s Corporation',
'TWER': 'Towerstream Corporation',
'SOR': 'Source Capital, Inc.',
'ASEI': 'American Science and Engineering, Inc.',
'SIR': 'Select Income REIT',
'DRI': 'Darden Restaurants, Inc.',
'NEE-J': 'NextEra Energy, Inc.',
'SPWR': 'SunPower Corporation',
'MDGS': 'Medigus Ltd.',
'HOTR': 'Chanticleer Holdings, Inc.',
'MB': 'MINDBODY, Inc.',
'ADK-A': 'Adcare Health Systems Inc',
'PFX': 'Phoenix Companies, Inc. (The)',
'SCHL': 'Scholastic Corporation',
'BURL': 'Burlington Stores, Inc.',
'PGLC': 'Pershing Gold Corporation',
'ATHM': 'Autohome Inc.',
'RAS': 'RAIT Financial Trust',
'CCUR': 'Concurrent Computer Corporation',
'BC': 'Brunswick Corporation',
'DELT': 'Delta Technology Holdings Limited',
'LBTYB': 'Liberty Global plc',
'ZNGA': 'Zynga Inc.',
'NSAM': 'NorthStar Asset Management Group, Inc.',
'CBG': 'CBRE Group, Inc.',
'NRG': 'NRG Energy, Inc.',
'BGR': 'BlackRock Energy and Resources Trust',
'FLR': 'Fluor Corporation',
'HTHT': 'China Lodging Group, Limited',
'SINO': 'Sino-Global Shipping America, Ltd.',
'RBIO': 'rEVO Biologics, Inc.',
'OBAS': 'Optibase Ltd.',
'SEMI': 'SunEdison Semiconductor Limited',
'CLRBW': 'Cellectar Biosciences, Inc.',
'BGH': 'Babson Capital Global Short Duration High Yield Fund',
'XCOM': 'Xtera Communications, Inc.',
'ATRO': 'Astronics Corporation',
'SCTY': 'SolarCity Corporation',
'PPH': 'VanEck Vectors Pharmaceutical ETF',
'MCFT': 'MCBC Holdings, Inc.',
'IPGP': 'IPG Photonics Corporation',
'LPTN': 'Lpath, Inc.',
'SLIM': 'The Obesity ETF',
'BOTJ': 'Bank of the James Financial Group, Inc.',
'FAV': 'First Trust Dividend and Income Fund',
'STAR-I': 'iStar Financial Inc.',
'PBA': 'Pembina Pipeline Corp.',
'MTW': 'Manitowoc Company, Inc. (The)',
'VTL': 'Vital Therapies, Inc.',
'PSDV': 'pSivida Corp.',
'FIT': 'Fitbit, Inc.',
'BBT-G': 'BB&T Corporation',
'EFM': 'Entergy Mississippi, Inc.',
'KHC': 'The Kraft Heinz Company',
'CNHI': 'CNH Industrial N.V.',
'SAEX': 'SAExploration Holdings, Inc.',
'INB': 'Cohen & Steers Global Income Builder, Inc.',
'CLNY-B': 'Colony Capital, Inc',
'AGO-F': 'Assured Guaranty Ltd.',
'QAT': 'iShares MSCI Qatar Capped ETF',
'JBSS': 'John B. Sanfilippo & Son, Inc.',
'AFSD': 'Aflac Incorporated',
'TPB': 'Turning Point Brands, Inc.',
'PH': 'Parker-Hannifin Corporation',
'RDNT': 'RadNet, Inc.',
'ORCL': 'Oracle Corporation',
'TPLM': 'Triangle Petroleum Corporation',
'GOGO': 'Gogo Inc.',
'TWX': 'Time Warner Inc.',
'KIQ': 'Kelso Technologies Inc',
'PCTY': 'Paylocity Holding Corporation',
'WSBF': 'Waterstone Financial, Inc.',
'LINDW': 'Lindblad Expeditions Holdings Inc.',
'PSA-Y': 'Public Storage',
'XEL': 'Xcel Energy Inc.',
'THR': 'Thermon Group Holdings, Inc.',
'WHLRP': 'Wheeler Real Estate Investment Trust, Inc.',
'FLIC': 'The First of Long Island Corporation',
'UFPT': 'UFP Technologies, Inc.',
'GPAC': 'Global Partner Acquisition Corp.',
'XOXO': 'XO Group, Inc.',
'SHAK': 'Shake Shack, Inc.',
'BUFF': 'Blue Buffalo Pet Products, Inc.',
'MHG': 'Marine Harvest ASA',
'UBA': 'Urstadt Biddle Properties Inc.',
'RNR-E': 'RenaissanceRe Holdings Ltd.',
'KED': 'Kayne Anderson Energy Development Company',
'GOODN': 'Gladstone Commercial Corporation',
'PGEM': 'Ply Gem Holdings, Inc.',
'TXT': 'Textron Inc.',
'CSC': 'Computer Sciences Corporation',
'SKLN': 'Skyline Medical Inc.',
'DTUL': 'region',
'WBKC': 'Wolverine Bancorp, Inc.',
'WPG-H': 'WP Glimcher Inc.',
'FJP': 'First Trust Japan AlphaDEX Fund',
'FDEU': 'First Trust Dynamic Europe Equity Income Fund',
'PRCP': 'Perceptron, Inc.',
'LDP': 'Cohen & Steers Limited Duration Preferred and Income Fund, Inc',
'NBTB': 'NBT Bancorp Inc.',
'BRSS': 'Global Brass and Copper Holdings, Inc.',
'BWINA': 'Baldwin & Lyons, Inc.',
'TPC': 'Tutor Perini Corporation',
'GPACU': 'Global Partner Acquisition Corp.',
'WTR': 'Aqua America, Inc.',
'RIBT': 'RiceBran Technologies',
'JHX': 'James Hardie Industries plc.',
'QDEL': 'Quidel Corporation',
'UCP': 'UCP, Inc.',
'C-J': 'Citigroup Inc.',
'FIBK': 'First Interstate BancSystem, Inc.',
'HSKA': 'Heska Corporation',
'WSFS': 'WSFS Financial Corporation',
'IRET': 'Investors Real Estate Trust',
'CNS': 'Cohn & Steers Inc',
'HBHC': 'Hancock Holding Company',
'THFF': 'First Financial Corporation Indiana',
'BHBK': 'Blue Hills Bancorp, Inc.',
'LAQ': 'Latin America Equity Fund, Inc. (The)',
'LSTR': 'Landstar System, Inc.',
'NUW': 'Nuveen AMT-Free Municipal Value Fund',
'CLFD': 'Clearfield, Inc.',
'LNDC': 'Landec Corporation',
'OAKS-A': 'Five Oaks Investment Corp.',
'MFL': 'Blackrock MuniHoldings Investment Quality Fund',
'OMC': 'Omnicom Group Inc.',
'SDT': 'SandRidge Mississippian Trust I',
'TNH': 'Terra Nitrogen Company, L.P.',
'MARA': 'Marathon Patent Group, Inc.',
'SJT': 'San Juan Basin Royalty Trust',
'DSM': 'Dreyfus Strategic Municipal Bond Fund, Inc.',
'KCC': 'Lehman ABS Corporation',
'CPXX': 'Celator Pharmaceuticals Inc.',
'LIQT': 'LiqTech International, Inc.',
'ESNT': 'Essent Group Ltd.',
'SEE': 'Sealed Air Corporation',
'MMYT': 'MakeMyTrip Limited',
'KONA': 'Kona Grill, Inc.',
'AF': 'Astoria Financial Corporation',
'O-F': 'Realty Income Corporation',
'BORN': 'China New Borun Corporation',
'VIAB': 'Viacom Inc.',
'GLRI': 'Glori Energy Inc',
'SLVO': 'region',
'VSTO': 'Vista Outdoor Inc.',
'ING': 'ING Group, N.V.',
'SZMK': 'Sizmek Inc.',
'HBP': 'Huttig Building Products, Inc.',
'EGIF': 'Eagle Growth and Income Opportunities Fund',
'PJT': 'PJT Partners Inc.',
'PRAH': 'PRA Health Sciences, Inc.',
'CNI': 'Canadian National Railway Company',
'GGZ': 'Gabelli Global Small and Mid Cap Value Trust (The)',
'HSY': 'Hershey Company (The)',
'IILG': 'Interval Leisure Group, Inc.',
'HOTRW': 'Chanticleer Holdings, Inc.',
'IPI': 'Intrepid Potash, Inc',
'VIA': 'Viacom Inc.',
'STAR-F': 'iStar Financial Inc.',
'SIFI': 'SI Financial Group, Inc.',
'CPLA': 'Capella Education Company',
'CIG': 'Comp En De Mn Cemig ADS',
'C': 'Citigroup Inc.',
'KURA': 'Kura Oncology, Inc.',
'MEMP': 'Memorial Production Partners LP',
'SGMO': 'Sangamo BioSciences, Inc.',
'RIGL': 'Rigel Pharmaceuticals, Inc.',
'XENT': 'Intersect ENT, Inc.',
'DSX-B': 'Diana Shipping inc.',
'SYRG': 'Synergy Resources Corporation',
'SWNC': 'Southwestern Energy Company',
'KEY-G': 'KeyCorp',
'OIA': 'Invesco Municipal Income Opportunities Trust',
'PAYX': 'Paychex, Inc.',
'XNPT': 'XenoPort, Inc.',
'NLY-D': 'Annaly Capital Management Inc',
'PML': 'Pimco Municipal Income Fund II',
'AHL-B': 'Aspen Insurance Holdings Limited',
'SAM': 'Boston Beer Company, Inc. (The)',
'SPLS': 'Staples, Inc.',
'FENX': 'Fenix Parts, Inc.',
'SBR': 'Sabine Royalty Trust',
'IGI': 'Western Asset Investment Grade Defined Opportunity Trust Inc.',
'IVH': 'Ivy High Income Opportunities Fund',
'HEI': 'Heico Corporation',
'NWY': 'New York & Company, Inc.',
'WBK': 'Westpac Banking Corporation',
'SCCI': 'Shimmick Construction Company, Inc.',
'CRVP': 'Crystal Rock Holdings, Inc.',
'STNG': 'Scorpio Tankers Inc.',
'AB': 'AllianceBernstein Holding L.P.',
'ASRV': 'AmeriServ Financial Inc.',
'SCNB': 'Suffolk Bancorp',
'LAMR': 'Lamar Advertising Company',
'IAG': 'Iamgold Corporation',
'CY': 'Cypress Semiconductor Corporation',
'ISNS': 'Image Sensing Systems, Inc.',
'TRTL': 'Terrapin 3 Acquisition Corporation',
'FRC-C': 'FIRST REPUBLIC BANK',
'BXMX': 'Nuveen S&P 500 Buy-Write Income Fund',
'CYH': 'Community Health Systems, Inc.',
'JE': 'Just Energy Group, Inc.',
'TPHS': 'Trinity Place Holdings Inc.',
'LXRX': 'Lexicon Pharmaceuticals, Inc.',
'WEX': 'WEX Inc.',
'COST': 'Costco Wholesale Corporation',
'LHO-I': 'LaSalle Hotel Properties',
'KEQU': 'Kewaunee Scientific Corporation',
'MHNB': 'Maiden Holdings, Ltd.',
'PMO': 'Putnam Municipal Opportunities Trust',
'CLD': 'Cloud Peak Energy Inc',
'DWCH': 'Datawatch Corporation',
'EHT': 'Eaton Vance High Income 2021 Target Term Trust',
'INDY': 'iShares S&P India Nifty 50 Index Fund',
'ELGX': 'Endologix, Inc.',
'TNC': 'Tennant Company',
'KTCC': 'Key Tronic Corporation',
'WETF': 'WisdomTree Investments, Inc.',
'ABTX': 'Allegiance Bancshares, Inc.',
'GNT': 'GAMCO Natural Resources, Gold & Income Tust ',
'PAACW': 'Pacific Special Acquisition Corp.',
'MEIP': 'MEI Pharma, Inc.',
'WING': 'Wingstop Inc.',
'BCRH': 'Blue Capital Reinsurance Holdings Ltd.',
'RGR': 'Sturm, Ruger & Company, Inc.',
'SBUX': 'Starbucks Corporation',
'TXN': 'Texas Instruments Incorporated',
'ROL': 'Rollins, Inc.',
'MDM': 'Mountain Province Diamonds Inc.',
'PERI': 'Perion Network Ltd',
'ALL': 'Allstate Corporation (The)',
'ANDE': 'The Andersons, Inc.',
'PRQR': 'ProQR Therapeutics N.V.',
'GS-D': 'Goldman Sachs Group, Inc. (The)',
'MRO': 'Marathon Oil Corporation',
'PGR': 'Progressive Corporation (The)',
'CDW': 'CDW Corporation',
'AGM-B': 'Federal Agricultural Mortgage Corporation',
'AXN': 'Aoxing Pharmaceutical Company, Inc.',
'ERF': 'Enerplus Corporation',
'SGI': 'Silicon Graphics International Corp',
'OAK': 'Oaktree Capital Group, LLC',
'ROIAK': 'Radio One, Inc.',
'UBP-G': 'Urstadt Biddle Properties Inc.',
'GDF': 'Western Asset Global Partners Income Fund, Inc.',
'KRO': 'Kronos Worldwide Inc',
'SSI': 'Stage Stores, Inc.',
'CSTM': 'Constellium N.V.',
'PETX': 'Aratana Therapeutics, Inc.',
'SHLDW': 'Sears Holdings Corporation',
'FRP': 'FairPoint Communications, Inc.',
'TWO': 'Two Harbors Investments Corp',
'AYR': 'Aircastle Limited',
'GCO': 'Genesco Inc.',
'BCR': 'C.R. Bard, Inc.',
'DNI': 'Dividend and Income Fund',
'ERN': 'Erin Energy Corp.',
'VSAT': 'ViaSat, Inc.',
'DAVE': 'Famous Dave\'s of America, Inc.',
'TNP-D': 'Tsakos Energy Navigation Ltd',
'ARTX': 'Arotech Corporation',
'WHG': 'Westwood Holdings Group Inc',
'ECF': 'Ellsworth Growth and Income Fund Ltd.',
'PFBC': 'Preferred Bank',
'SFBC': 'Sound Financial Bancorp, Inc.',
'WFM': 'Whole Foods Market, Inc.',
'NXR': 'Nuveen Select Tax Free Income Portfolio III',
'VOXX': 'VOXX International Corporation',
'OKE': 'ONEOK, Inc.',
'SHOS': 'Sears Hometown and Outlet Stores, Inc.',
'ECT': 'ECA Marcellus Trust I',
'SAVE': 'Spirit Airlines, Inc.',
'CLNY-C': 'Colony Capital, Inc',
'CEL': 'Cellcom Israel, Ltd.',
'KFRC': 'Kforce, Inc.',
'EARS': 'Auris Medical Holding AG',
'CYRX': 'CryoPort, Inc.',
'TLF': 'Tandy Leather Factory, Inc.',
'GXP': 'Great Plains Energy Inc',
'UMC': 'United Microelectronics Corporation',
'BBX': 'BBX Capital Corporation',
'WMGI': 'Wright Medical Group N.V.',
'BPI': 'Bridgepoint Education, Inc.',
'GJP': 'Synthetic Fixed-Income Securities, Inc.',
'RLH': 'Red Lion Hotels Corporation',
'DCO': 'Ducommun Incorporated',
'SNV-C': 'Synovus Financial Corp.',
'PDT': 'John Hancock Premium Dividend Fund',
'SFM': 'Sprouts Farmers Market, Inc.',
'NCB': 'Nuveen California Municipal Value Fund 2',
'TRV': 'The Travelers Companies, Inc.',
'SGMS': 'Scientific Games Corp',
'FMN': 'Federated Premier Municipal Income Fund',
'CLF': 'Cliffs Natural Resources Inc.',
'BPOP': 'Popular, Inc.',
'MBII': 'Marrone Bio Innovations, Inc.',
'PNNT': 'PennantPark Investment Corporation',
'FITB': 'Fifth Third Bancorp',
'PARR': 'Par Pacific Holdings, Inc.',
'DTLA-': 'Brookfield DTLA Inc.',
'MTBCP': 'Medical Transcription Billing, Corp.',
'MUI': 'Blackrock Muni Intermediate Duration Fund Inc',
'FNY': 'First Trust Mid Cap Growth AlphaDEX Fund',
'QQQ': 'PowerShares QQQ Trust, Series 1',
'WFC-W': 'Wells Fargo & Company',
'ENR': 'Energizer Holdings, Inc.',
'AFSI': 'AmTrust Financial Services, Inc.',
'SEIC': 'SEI Investments Company',
'HDRA': 'Hydra Industries Acquisition Corp.',
'PPL': 'PPL Corporation',
'JSM': 'SLM Corporation',
'WSCI': 'WSI Industries Inc.',
'TRIB': 'Trinity Biotech plc',
'DEO': 'Diageo plc',
'WNFM': 'Wayne Farms, Inc.',
'EDBI': 'Legg Mason Emerging Markets Diversified Core ETF',
'VNRCP': 'Vanguard Natural Resources LLC',
'CFC-B': 'Countrywide Financial Corporation',
'BLDP': 'Ballard Power Systems, Inc.',
'ISL': 'Aberdeen Israel Fund, Inc.',
'WLKP': 'Westlake Chemical Partners LP',
'UNIS': 'Unilife Corporation',
'MS': 'Morgan Stanley',
'SLB': 'Schlumberger N.V.',
'DNKN': 'Dunkin\' Brands Group, Inc.',
'BTE': 'Baytex Energy Corp',
'MLM': 'Martin Marietta Materials, Inc.',
'PCBK': 'Pacific Continental Corporation (Ore)',
'DLX': 'Deluxe Corporation',
'PSTB': 'Park Sterling Corporation',
'BIOD': 'Biodel Inc.',
'VER-F': 'VEREIT Inc.',
'AVEO': 'AVEO Pharmaceuticals, Inc.',
'SSP': 'E.W. Scripps Company (The)',
'NWPX': 'Northwest Pipe Company',
'ACXM': 'Acxiom Corporation',
'VHC': 'VirnetX Holding Corp',
'PHMD': 'PhotoMedex, Inc.',
'JOF': 'Japan Smaller Capitalization Fund Inc',
'EVLV': 'EVINE Live Inc.',
'SLAB': 'Silicon Laboratories, Inc.',
'CCI': 'Crown Castle International Corporation',
'STZ': 'Constellation Brands Inc',
'CEO': 'CNOOC Limited',
'JCI': 'Johnson Controls, Inc.',
'CNAT': 'Conatus Pharmaceuticals Inc.',
'ATOS': 'Atossa Genetics Inc.',
'PCG-H': 'Pacific Gas & Electric Co.',
'BQH': 'Blackrock New York Municipal Bond Trust',
'AMH-A': 'American Homes 4 Rent',
'ADRU': 'BLDRS Europe 100 ADR Index Fund',
'ENLC': 'EnLink Midstream, LLC',
'HRMNU': 'Harmony Merger Corp.',
'PAI': 'Pacific American Income Shares, Inc.',
'WHLRW': 'Wheeler Real Estate Investment Trust, Inc.',
'TCI': 'Transcontinental Realty Investors, Inc.',
'TDE': 'Telephone and Data Systems, Inc.',
'SLCT': 'Select Bancorp, Inc.',
'JLS': 'Nuveen Mortgage Opportunity Term Fund',
'RADA': 'Rada Electronics Industries Limited',
'LUK': 'Leucadia National Corporation',
'PCG-D': 'Pacific Gas & Electric Co.',
'RNP': 'Cohen & Steers Reit and Preferred Income Fund Inc',
'GJO': 'STRATS Trust',
'OXLC': 'Oxford Lane Capital Corp.',
'DTJ': 'DTE Energy Company',
'SXC': 'SunCoke Energy, Inc.',
'NTN': 'NTN Buzztime, Inc.',
'WRB-B': 'W.R. Berkley Corporation',
'YORW': 'The York Water Company',
'FARM': 'Farmer Brothers Company',
'RGA': 'Reinsurance Group of America, Incorporated',
'FGB': 'First Trust Specialty Finance and Financial Opportunities Fund',
'E': 'ENI S.p.A.',
'SBS': 'Companhia de saneamento Basico Do Estado De Sao Paulo - Sabesp',
'HCA': 'HCA Holdings, Inc.',
'BJRI': 'BJ\'s Restaurants, Inc.',
'IOT': 'Income Opportunity Realty Investors, Inc.',
'HNSN': 'Hansen Medical, Inc.',
'LMBS': 'First Trust Low Duration Opportunities ETF',
'MUS': 'Blackrock MuniHoldings Quality Fund, Inc.',
'AXGN': 'AxoGen, Inc.',
'LUX': 'Luxottica Group, S.p.A.',
'PTY': 'Pimco Corporate & Income Opportunity Fund',
'AGM.A': 'Federal Agricultural Mortgage Corporation',
'NPP': 'Nuveen Performance Plus Municipal Fund, Inc.',
'CCBG': 'Capital City Bank Group',
'FHN-A': 'First Horizon National Corporation',
'CORI': 'Corium International, Inc.',
'AMRN': 'Amarin Corporation PLC',
'MS-E': 'Morgan Stanley',
'SRT': 'StarTek, Inc.',
'LPTH': 'LightPath Technologies, Inc.',
'HNRG': 'Hallador Energy Company',
'GME': 'Gamestop Corporation',
'TLOG': 'TetraLogic Pharmaceuticals Corporation',
'FVE': 'Five Star Quality Care, Inc.',
'ETX ': 'Eaton Vance Municipal Income 2028 Term Trust',
'BOFIL': 'BofI Holding, Inc.',
'MGRC': 'McGrath RentCorp',
'BWFG': 'Bankwell Financial Group, Inc.',
'PLD': 'ProLogis, Inc.',
'OTTR': 'Otter Tail Corporation',
'QQEW': 'First Trust NASDAQ-100 Equal Weighted Index Fund',
'MNP': 'Western Asset Municipal Partners Fund, Inc.',
'OPXAW': 'Opexa Therapeutics, Inc.',
'LIVN': 'LivaNova PLC',
'FTV$': 'Fortive Corporation',
'TCBIP': 'Texas Capital Bancshares, Inc.',
'CXE': 'Colonial High Income Municipal Trust',
'SODA': 'SodaStream International Ltd.',
'CHUY': 'Chuy\'s Holdings, Inc.',
'AAPL': 'Apple Inc.',
'PSTI': 'Pluristem Therapeutics, Inc.',
'HSEA': 'HSBC Holdings plc',
'SIEB': 'Siebert Financial Corp.',
'CVX': 'Chevron Corporation',
'CPSH': 'CPS Technologies Corp.',
'PRSS': 'CafePress Inc.',
'BHK': 'Blackrock Core Bond Trust',
'BOH': 'Bank of Hawaii Corporation',
'ALOT': 'AstroNova, Inc.',
'BOSC': 'B.O.S. Better Online Solutions',
'ISM': 'SLM Corporation',
'EPD': 'Enterprise Products Partners L.P.',
'SVBI': 'Severn Bancorp Inc',
'TROVU': 'TrovaGene, Inc.',
'BZH': 'Beazer Homes USA, Inc.',
'SMI': 'Semiconductor Manufacturing International Corporation',
'HRC': 'Hill-Rom Holdings Inc',
'GFA': 'Gafisa SA',
'CVLY': 'Codorus Valley Bancorp, Inc',
'SBY': 'Silver Bay Realty Trust Corp.',
'DF': 'Dean Foods Company',
'XL': 'XL Group plc',
'ANCX': 'Access National Corporation',
'CTLT': 'Catalent, Inc.',
'STDY': 'SteadyMed Ltd.',
'ALDX': 'Aldeyra Therapeutics, Inc.',
'CIL': 'Victory CEMP International Volatility Wtd Index ETF',
'ORG': 'The Organics ETF',
'CRAI': 'CRA International,Inc.',
'PFMT': 'Performant Financial Corporation',
'GS-C': 'Goldman Sachs Group, Inc. (The)',
'KEY': 'KeyCorp',
'EGN': 'Energen Corporation',
'IPWR': 'Ideal Power Inc.',
'EQR': 'Equity Residential',
'ABR-B': 'Arbor Realty Trust',
'LAYN': 'Layne Christensen Company',
'CTV': 'Qwest Corporation',
'BUSE': 'First Busey Corporation',
'GOVN': 'Government Properties Income Trust',
'RUSHA': 'Rush Enterprises, Inc.',
'NPI': 'Nuveen Premium Income Municipal Fund, Inc.',
'ETR': 'Entergy Corporation',
'NTES': 'NetEase, Inc.',
'MVF': 'MuniVest Fund, Inc.',
'JMBA': 'Jamba, Inc.',
'HYT': 'Blackrock Corporate High Yield Fund, Inc.',
'ANTM': 'Anthem, Inc.',
'CII': 'Blackrock Capital and Income Strategies Fund Inc',
'RPXC': 'RPX Corporation',
'NEO': 'NeoGenomics, Inc.',
'CIA': 'Citizens, Inc.',
'SXT': 'Sensient Technologies Corporation',
'NWS': 'News Corporation',
'RXDX': 'Ignyta, Inc.',
'FCH': 'FelCor Lodging Trust Incorporated',
'DWRE': 'DEMANDWARE, INC.',
'AVID': 'Avid Technology, Inc.',
'HYF': 'Managed High Yield Plus Fund, Inc.',
'CBNK': 'Chicopee Bancorp, Inc.',
'TWIN': 'Twin Disc, Incorporated',
'ALEX': 'Alexander & Baldwin Holdings, Inc.',
'OGE': 'OGE Energy Corporation',
'ESI': 'ITT Educational Services, Inc.',
'AJG': 'Arthur J. Gallagher & Co.',
'ALRM': 'Alarm.com Holdings, Inc.',
'BBU$': 'Brookfield Business Partners L.P.',
'CPTA': 'Capitala Finance Corp.',
'SCMP': 'Sucampo Pharmaceuticals, Inc.',
'CDE': 'Coeur Mining, Inc.',
'CHGG': 'Chegg, Inc.',
'NEN': 'New England Realty Associates Limited Partnership',
'COB': 'CommunityOne Bancorp',
'MTL-': 'Mechel OAO',
'BWP': 'Boardwalk Pipeline Partners L.P.',
'HPP': 'Hudson Pacific Properties, Inc.',
'MDCA': 'MDC Partners Inc.',
'ZIOP': 'ZIOPHARM Oncology Inc',
'URBN': 'Urban Outfitters, Inc.',
'MIY': 'Blackrock MuniYield Michigan Quality Fund, Inc.',
'CZZ': 'Cosan Limited',
'PRTS': 'U.S. Auto Parts Network, Inc.',
'ORIT': 'Oritani Financial Corp.',
'DVCR': 'Diversicare Healthcare Services Inc.',
'TI.A': 'Telecom Italia S.P.A.',
'FMER-A': 'FirstMerit Corporation',
'ZUMZ': 'Zumiez Inc.',
'BCOR': 'Blucora, Inc.',
'NWN': 'Northwest Natural Gas Company',
'QPAC': 'Quinpario Acquisition Corp. 2',
'PTX': 'Pernix Therapeutics Holdings, Inc.',
'FUL': 'H. B. Fuller Company',
'AIMT': 'Aimmune Therapeutics, Inc.',
'TPL': 'Texas Pacific Land Trust',
'ZFC': 'ZAIS Financial Corp.',
'SLMAP': 'SLM Corporation',
'HPS': 'John Hancock Preferred Income Fund III',
'STAF': 'Staffing 360 Solutions, Inc.',
'RJF': 'Raymond James Financial, Inc.',
'RGSE': 'Real Goods Solar, Inc.',
'RNET': 'RigNet, Inc.',
'ACHN': 'Achillion Pharmaceuticals, Inc.',
'MS-G': 'Morgan Stanley',
'ODP': 'Office Depot, Inc.',
'ETFC': 'E*TRADE Financial Corporation',
'MCD': 'McDonald\'s Corporation',
'UGLD': 'region',
'HTS-A': 'Hatteras Financial Corp',
'NWL': 'Newell Brands Inc.',
'PEG': 'Public Service Enterprise Group Incorporated',
'GORO': 'Gold Resource Corporation',
'CIVBP': 'Civista Bancshares, Inc. ',
'CCXI': 'ChemoCentryx, Inc.',
'LPLA': 'LPL Financial Holdings Inc.',
'BSQR': 'BSQUARE Corporation',
'AON': 'Aon plc',
'MITT-B': 'AG Mortgage Investment Trust, Inc.',
'FPA': 'First Trust Asia Pacific Ex-Japan AlphaDEX Fund',
'MSFT': 'Microsoft Corporation',
'SAL': 'Salisbury Bancorp, Inc.',
'CTT': 'CatchMark Timber Trust, Inc.',
'TCCB': 'Triangle Capital Corporation',
'WAC': 'Walter Investment Management Corp.',
'OPXA': 'Opexa Therapeutics, Inc.',
'ALR': 'Alere Inc.',
'CALX': 'Calix, Inc',
'NUVA': 'NuVasive, Inc.',
'JMM': 'Nuveen Multi-Market Income Fund',
'SWJ': 'Stanley Black & Decker, Inc.',
'CIVB': 'Civista Bancshares, Inc. ',
'BBD': 'Banco Bradesco Sa',
'AGU': 'Agrium Inc.',
'NEU': 'NewMarket Corporation',
'TREC': 'Trecora Resources',
'CXH': 'Colonial Investment Grade Municipal Trust',
'NNI': 'Nelnet, Inc.',
'ANW': 'Aegean Marine Petroleum Network Inc.',
'ACU': 'Acme United Corporation.',
'NMZ': 'Nuveen Municipal High Income Opportunity Fund',
'NMM': 'Navios Maritime Partners LP',
'ST': 'Sensata Technologies Holding N.V.',
'BRCD': 'Brocade Communications Systems, Inc.',
'ONSIZ': 'Oncobiologics, Inc.',
'INWK': 'InnerWorkings, Inc.',
'SCVL': 'Shoe Carnival, Inc.',
'KELYA': 'Kelly Services, Inc.',
'JHD': 'Nuveen High Income December 2019 Target Term Fund',
'MDWD': 'MediWound Ltd.',
'HEES': 'H&E Equipment Services, Inc.',
'ESMC': 'Escalon Medical Corp.',
'BOI': 'Brookfield Mortgage Opportunity Income Fund Inc.',
'CTWS': 'Connecticut Water Service, Inc.',
'SIX': 'Six Flags Entertainment Corporation New',
'LARK': 'Landmark Bancorp Inc.',
'ANH-A': 'Anworth Mortgage Asset Corporation',
'KCG': 'KCG Holdings, Inc.',
'NEWT': 'Newtek Business Services Corp.',
'GGT-B': 'Gabelli Multi-Media Trust Inc. (The)',
'WVFC': 'WVS Financial Corp.',
'CCM': 'Concord Medical Services Holdings Limited',
'ADP': 'Automatic Data Processing, Inc.',
'HIFS': 'Hingham Institution for Savings',
'RWC': 'RELM Wireless Corporation',
'KCAP': 'KCAP Financial, Inc.',
'PEI': 'Pennsylvania Real Estate Investment Trust',
'RACE': 'Ferrari N.V.',
'MCQ': 'Medley Capital Corporation',
'NFX': 'Newfield Exploration Company',
'ARI': 'Apollo Commercial Real Estate Finance',
'AAL': 'American Airlines Group, Inc.',
'SMCI': 'Super Micro Computer, Inc.',
'SDLP': 'Seadrill Partners LLC',
'IBB': 'iShares Nasdaq Biotechnology Index Fund',
'MFCB': 'MFC Bancorp Ltd.',
'GNC': 'GNC Holdings, Inc.',
'FTR': 'Frontier Communications Corporation',
'MA': 'Mastercard Incorporated',
'TNET': 'TriNet Group, Inc.',
'DNAI': 'ProNAi Therapeutics, Inc.',
'SEAS': 'SeaWorld Entertainment, Inc.',
'BAC-Y': 'Bank of America Corporation',
'PHK': 'Pimco High Income Fund',
'ZBH': 'Zimmer Biomet Holdings, Inc.',
'PBR': 'Petroleo Brasileiro S.A.- Petrobras',
'MSTX': 'Mast Therapeutics, Inc.',
'VNQI': 'Vanguard Global ex-U.S. Real Estate ETF',
'EFII': 'Electronics for Imaging, Inc.',
'CZR': 'Caesars Entertainment Corporation',
'EAD': 'Wells Fargo Income Opportunities Fund',
'GNMK': 'GenMark Diagnostics, Inc.',
'AV': 'Aviva plc',
'EYEGW': 'Eyegate Pharmaceuticals, Inc.',
'HOS': 'Hornbeck Offshore Services',
'BBF': 'BlackRock Municipal Income Investment Trust',
'GLOP': 'GasLog Partners LP',
'CIE': 'Cobalt International Energy, Inc.',
'CHSCP': 'CHS Inc',
'TUES': 'Tuesday Morning Corp.',
'TNP': 'Tsakos Energy Navigation Ltd',
'SMLR': 'Semler Scientific, Inc.',
'NVX': 'Nuveen California Dividend Advantage Municipal Fund 2',
'FLC': 'Flaherty & Crumrine Total Return Fund Inc',
'AAME': 'Atlantic American Corporation',
'RGCO': 'RGC Resources Inc.',
'ABB': 'ABB Ltd',
'VBTX': 'Veritex Holdings, Inc.',
'PGH': 'Pengrowth Energy Corporation',
'ENSG': 'The Ensign Group, Inc.',
'ONDK': 'On Deck Capital, Inc.',
'DECK': 'Deckers Outdoor Corporation',
'JCAP': 'Jernigan Capital, Inc.',
'OUTR': 'Outerwall Inc.',
'DX-B': 'Dynex Capital, Inc.',
'GEB': 'General Electric Company',
'BST': 'BlackRock Science and Technology Trust',
'HL': 'Hecla Mining Company',
'NMK-B': 'Niagara Mohawk Holdings, Inc.',
'AHS': 'AMN Healthcare Services Inc',
'ARCC': 'Ares Capital Corporation',
'UZC': 'United States Cellular Corporation',
'WAYN': 'Wayne Savings Bancshares Inc.',
'AWH': 'Allied World Assurance Company Holdings, AG',
'ENTA': 'Enanta Pharmaceuticals, Inc.',
'PAAS': 'Pan American Silver Corp.',
'APPS': 'Digital Turbine, Inc.',
'NWSA': 'News Corporation',
'CYS-A': 'CYS Investments, Inc.',
'DTYS': 'region',
'PRU': 'Prudential Financial, Inc.',
'CBPO': 'China Biologic Products, Inc.',
'GSH': 'Guangshen Railway Company Limited',
'SPNS': 'Sapiens International Corporation N.V.',
'CAMT': 'Camtek Ltd.',
'DGAS': 'Delta Natural Gas Company, Inc.',
'WTFCM': 'Wintrust Financial Corporation',
'HOVNP': 'Hovnanian Enterprises Inc',
'LTRX': 'Lantronix, Inc.',
'JBR': 'Select Asset Inc.',
'NI': 'NiSource, Inc',
'MVC': 'MVC Capital, Inc.',
'AMSGP': 'Amsurg Corp.',
'SLP': 'Simulations Plus, Inc.',
'SRI': 'Stoneridge, Inc.',
'HH': 'Hooper Holmes, Inc.',
'AMS': 'American Shared Hospital Services',
'FNBC': 'First NBC Bank Holding Company',
'INBK': 'First Internet Bancorp',
'CDK': 'CDK Global, Inc.',
'LII': 'Lennox International, Inc.',
'SRTSU': 'Sensus Healthcare, Inc.',
'SGYP': 'Synergy Pharmaceuticals, Inc.',
'C.WS.A': 'Citigroup Inc.',
'DBVT': 'DBV Technologies S.A.',
'BOX': 'Box, Inc.',
'ONS': 'Oncobiologics, Inc.',
'PYN': 'PIMCO New York Municipal Income Fund III',
'BBVA': 'Banco Bilbao Viscaya Argentaria S.A.',
'ASX': 'Advanced Semiconductor Engineering, Inc.',
'CBD': 'Companhia Brasileira de Distribuicao',
'GGM': 'Guggenheim Credit Allocation Fund',
'FLY': 'Fly Leasing Limited',
'HOT': 'Starwood Hotels & Resorts Worldwide, Inc.',
'LNTH': 'Lantheus Holdings, Inc.',
'BPMX': 'BioPharmX Corporation',
'FTC': 'First Trust Large Cap Growth AlphaDEX Fund',
'SFL': 'Ship Finance International Limited',
'PRE-I': 'PartnerRe Ltd.',
'TMH': 'Team Health Holdings, Inc.',
'GHM': 'Graham Corporation',
'UFAB': 'Unique Fabricating, Inc.',
'AIN': 'Albany International Corporation',
'KBR': 'KBR, Inc.',
'ELP': 'Companhia Paranaense de Energia (COPEL)',
'CIO': 'City Office REIT, Inc.',
'RAND': 'Rand Capital Corporation',
'KIM': 'Kimco Realty Corporation',
'KOSS': 'Koss Corporation',
'TFSCU': '1347 Capital Corp.',
'NOK': 'Nokia Corporation',
'TGB': 'Taseko Mines Limited',
'CAVM': 'Cavium, Inc.',
'SID': 'National Steel Company',
'WES': 'Western Gas Partners, LP',
'HLI': 'Houlihan Lokey, Inc.',
'MZF': 'Managed Duration Investment Grade Municipal Fund',
'NRF-C': 'Northstar Realty Finance Corp.',
'APB': 'Asia Pacific Fund, Inc. (The)',
'PQ': 'Petroquest Energy Inc',
'IID': 'Voya International High Dividend Equity Income Fund',
'NPM': 'Nuveen Premium Income Municipal Fund II, Inc.',
'PEB-B': 'Pebblebrook Hotel Trust',
'IMGN': 'ImmunoGen, Inc.',
'TOO': 'Teekay Offshore Partners L.P.',
'CHRW': 'C.H. Robinson Worldwide, Inc.',
'GSVC': 'GSV Capital Corp',
'CMCM': 'Cheetah Mobile Inc.',
'SCHW': 'The Charles Schwab Corporation',
'AIG': 'American International Group, Inc.',
'WRK': 'Westrock Company',
'VNCE': 'Vince Holding Corp.',
'JNP': 'Juniper Pharmaceuticals, Inc.',
'PHG': 'Koninklijke Philips N.V.',
'TAT': 'Transatlantic Petroleum Ltd',
'STFC': 'State Auto Financial Corporation',
'USB-H': 'U.S. Bancorp',
'BH': 'Biglari Holdings Inc.',
'Y': 'Alleghany Corporation',
'OMF': 'OneMain Holdings, Inc.',
'UEC': 'Uranium Energy Corp.',
'JGH': 'Nuveen Global High Income Fund',
'NANO': 'Nanometrics Incorporated',
'TCX': 'Tucows Inc.',
'UFPI': 'Universal Forest Products, Inc.',
'QIHU': 'Qihoo 360 Technology Co. Ltd.',
'DATA': 'Tableau Software, Inc.',
'ESLT': 'Elbit Systems Ltd.',
'GAINP': 'Gladstone Investment Corporation',
'YECO': 'Yulong Eco-Materials Limited',
'PPBI': 'Pacific Premier Bancorp Inc',
'MET-A': 'MetLife, Inc.',
'SALE': 'RetailMeNot, Inc.',
'ECHO': 'Echo Global Logistics, Inc.',
'CORT': 'Corcept Therapeutics Incorporated',
'FFBCW': 'First Financial Bancorp.',
'XIN': 'Xinyuan Real Estate Co Ltd',
'GSL-B': 'Global Ship Lease, Inc.',
'OXLCO': 'Oxford Lane Capital Corp.',
'ARWAU': 'Arowana Inc.',
'STWD': 'STARWOOD PROPERTY TRUST, INC.',
'PBYI': 'Puma Biotechnology Inc',
'HDRAR': 'Hydra Industries Acquisition Corp.',
'DW': 'Drew Industries Incorporated',
'NATR': 'Nature\'s Sunshine Products, Inc.',
'ICUI': 'ICU Medical, Inc.',
'BOLT': 'BioLight Life Sciences Ltd.',
'CHSP': 'Chesapeake Lodging Trust',
'FTGC': 'First Trust Global Tactical Commodity Strategy Fund',
'TUTI': 'Tuttle Tactical Management Multi-Strategy Income ETF',
'UE': 'Urban Edge Properties',
'AVX': 'AVX Corporation',
'PPC': 'Pilgrim\'s Pride Corporation',
'ES': 'Eversource Energy',
'FSCE': 'Fifth Street Finance Corp.',
'INCY': 'Incyte Corporation',
'ATRA': 'Atara Biotherapeutics, Inc.',
'LMCK': 'Liberty Media Corporation',
'MCRN': 'Milacron Holdings Corp.',
'ALG': 'Alamo Group, Inc.',
'SYNA': 'Synaptics Incorporated',
'DELTW': 'Delta Technology Holdings Limited',
'GYRO': 'Gyrodyne , LLC',
'ITW': 'Illinois Tool Works Inc.',
'LXP-C': 'Lexington Realty Trust',
'SSD': 'Simpson Manufacturing Company, Inc.',
'RAD': 'Rite Aid Corporation',
'CALI': 'China Auto Logistics Inc.',
'CRWS': 'Crown Crafts, Inc.',
'JP': 'Jupai Holdings Limited',
'BML-J': 'Bank of America Corporation',
'EWBC': 'East West Bancorp, Inc.',
'CBZ': 'CBIZ, Inc.',
'SVU': 'SuperValu Inc.',
'VPV': 'Invesco Pennsylvania Value Municipal Income Trust',
'BKD': 'Brookdale Senior Living Inc.',
'RNWK': 'RealNetworks, Inc.',
'ECOL': 'US Ecology, Inc.',
'CDC': 'Victory CEMP US EQ Income Enhanced Volatility Wtd Index ETF',
'MHY': 'Western Asset Managed High Income Fund, Inc.',
'RTEC': 'Rudolph Technologies, Inc.',
'MUSA': 'Murphy USA Inc.',
'MAGS': 'Magal Security Systems Ltd.',
'YNDX': 'Yandex N.V.',
'DUKH': 'Duke Energy Corporation',
'NVRO': 'Nevro Corp.',
'OFC-L': 'Corporate Office Properties Trust',
'MTB-C': 'M&T Bank Corporation',
'CBAN': 'Colony Bankcorp, Inc.',
'HCAC': 'Hennessy Capital Acquisition Corp. II',
'FTRPR': 'Frontier Communications Corporation',
'LHCG': 'LHC Group',
'AGTC': 'Applied Genetic Technologies Corporation',
'GOV': 'Government Properties Income Trust',
'TYG': 'Tortoise Energy Infrastructure Corporation',
'M': 'Macy\'s Inc',
'TGNA': 'TEGNA Inc.',
'GPX': 'GP Strategies Corporation',
'ENV': 'Envestnet, Inc',
'VRSK': 'Verisk Analytics, Inc.',
'DKT': 'Deutsch Bk Contingent Cap Tr V',
'WPX': 'WPX Energy, Inc.',
'FLIR': 'FLIR Systems, Inc.',
'HQH': 'Tekla Healthcare Investors',
'IBA': 'Industrias Bachoco, S.A. de C.V.',
'PMX': 'PIMCO Municipal Income Fund III',
'NTT': 'Nippon Telegraph and Telephone Corporation',
'HXL': 'Hexcel Corporation',
'KIM-J': 'Kimco Realty Corporation',
'SKX': 'Skechers U.S.A., Inc.',
'ARH-C': 'Arch Capital Group Ltd.',
'EPE': 'EP Energy Corporation',
'DHR$': 'Danaher Corporation',
'TCK': 'Teck Resources Ltd',
'STI': 'SunTrust Banks, Inc.',
'EMITF': 'Elbit Imaging Ltd.',
'ASML': 'ASML Holding N.V.',
'BMA': 'Macro Bank Inc.',
'TIL': 'Till Capital Ltd.',
'COBZ': 'CoBiz Financial Inc.',
'DLR-G': 'Digital Realty Trust, Inc.',
'SLG': 'SL Green Realty Corporation',
'RL': 'Ralph Lauren Corporation',
'ALTY': 'Global X SuperDividend Alternatives ETF',
'DSLV': 'region',
'ATAI': 'ATA Inc.',
'QINC': 'First Trust RBA Quality Income ETF',
'BKMU': 'Bank Mutual Corporation',
'TV': 'Grupo Televisa S.A.',
'AGR': 'Avangrid, Inc.',
'PCG-C': 'Pacific Gas & Electric Co.',
'RFDI': 'First Trust RiverFront Dynamic Developed International ETF',
'NAT': 'Nordic American Tankers Limited',
'DWFI': 'SPDR Dorsey Wright Fixed Income Allocation ETF',
'EQS': 'Equus Total Return, Inc.',
'MPWR': 'Monolithic Power Systems, Inc.',
'DLR-I': 'Digital Realty Trust, Inc.',
'SENEA': 'Seneca Foods Corp.',
'SMMT': 'Summit Therapeutics plc',
'USATP': 'USA Technologies, Inc.',
'ARDX': 'Ardelyx, Inc.',
'TM': 'Toyota Motor Corp Ltd Ord',
'PCOM': 'Points International, Ltd.',
'RXII': 'RXI Pharmaceuticals Corporation',
'DSS': 'Document Security Systems, Inc.',
'FLS': 'Flowserve Corporation',
'UNT': 'Unit Corporation',
'BIND': 'BIND Therapeutics, Inc.',
'MITK': 'Mitek Systems, Inc.',
'CPB': 'Campbell Soup Company',
'SAQ': 'Saratoga Investment Corp',
'HLX': 'Helix Energy Solutions Group, Inc.',
'KODK.WS.A': 'Eastman Kodak Company',
'NAME': 'Rightside Group, Ltd.',
'HCN-I': 'Welltower Inc.',
'PDM': 'Piedmont Office Realty Trust, Inc.',
'AHT': 'Ashford Hospitality Trust Inc',
'AFAM': 'Almost Family Inc',
'BLK': 'BlackRock, Inc.',
'NWBI': 'Northwest Bancshares, Inc.',
'GSL': 'Global Ship Lease, Inc.',
'SPLP': 'Steel Partners Holdings LP',
'PPR': 'Voya Prime Rate Trust',
'FCAP': 'First Capital, Inc.',
'EWZS': 'iShares MSCI Brazil Small-Cap ETF',
'GBR': 'New Concept Energy, Inc',
'HTS': 'Hatteras Financial Corp',
'SQNM': 'Sequenom, Inc.',
'SUNW': 'Sunworks, Inc.',
'ITCB': 'Ita? CorpBanca',
'ULTR': 'Ultrapetrol (Bahamas) Limited',
'FRA': 'Blackrock Floating Rate Income Strategies Fund Inc',
'CSPI': 'CSP Inc.',
'GDL': 'The GDL Fund',
'TSC': 'TriState Capital Holdings, Inc.',
'KFI': 'KKR Financial Holdings LLC',
'MOSY': 'MoSys, Inc.',
'MDXG': 'MiMedx Group, Inc',
'VNDA': 'Vanda Pharmaceuticals Inc.',
'VBFC': 'Village Bank and Trust Financial Corp.',
'HTBX': 'Heat Biologics, Inc.',
'IIIN': 'Insteel Industries, Inc.',
'MIW': 'Eaton Vance Michigan Municipal Bond Fund',
'ESNC': 'EnSync, Inc.',
'COSI': 'Cosi, Inc.',
'KALU': 'Kaiser Aluminum Corporation',
'XNET': 'Xunlei Limited',
'DYSL': 'Dynasil Corporation of America',
'GBLI': 'Global Indemnity plc',
'HBM': 'HudBay Minerals Inc',
'COWN': 'Cowen Group, Inc.',
'NNA': 'Navios Maritime Acquisition Corporation',
'IMNP ': 'Immune Pharmaceuticals Inc.',
'NEE-I': 'NextEra Energy, Inc.',
'VTTI': 'VTTI Energy Partners LP',
'MYD': 'Blackrock MuniYield Fund, Inc.',
'SPE': 'Special Opportunities Fund Inc.',
'SABR': 'Sabre Corporation',
'CWEI': 'Clayton Williams Energy, Inc.',
'IIF': 'Morgan Stanley India Investment Fund, Inc.',
'LEA': 'Lear Corporation',
'KZ': 'KongZhong Corporation',
'ZAIS': 'ZAIS Group Holdings, Inc.',
'REX': 'REX American Resources Corporation',
'BANR': 'Banner Corporation',
'JTA': 'Nuveen Tax-Advantaged Total Return Strategy Fund',
'EIP': 'Eaton Vance Pennsylvania Municipal Bond Fund',
'ITT': 'ITT Inc.',
'IOSP': 'Innospec Inc.',
'RCL': 'Royal Caribbean Cruises Ltd.',
'WOR': 'Worthington Industries, Inc.',
'PACEU': 'Pace Holdings Corp.',
'ARWA': 'Arowana Inc.',
'UEIC': 'Universal Electronics Inc.',
'NORT': 'Nordic Realty Trust, Inc.',
'KFH': 'KKR Financial Holdings LLC',
'HRG': 'HRG Group, Inc.',
'NUROW': 'NeuroMetrix, Inc.',
'BCBP': 'BCB Bancorp, Inc. (NJ)',
'TFG': 'Goldman Sachs Group, Inc. (The)',
'DXKW': 'WisdomTree Korea Hedged Equity Fund',
'DSKY': 'iDreamSky Technology Limited',
'ABMD': 'ABIOMED, Inc.',
'MPET': 'Magellan Petroleum Corporation',
'NTIP': 'Network-1 Technologies, Inc.',
'PY': 'Principal Shareholder Yield Index ETF',
'ARL': 'American Realty Investors, Inc.',
'FTSM': 'First Trust Enhanced Short Maturity ETF',
'SWFT': 'Swift Transportation Company',
'SYN': 'Synthetic Biologics, Inc',
'BNS': 'Bank of Nova Scotia (The)',
'NMK-C': 'Niagara Mohawk Holdings, Inc.',
'IDT': 'IDT Corporation',
'NXST': 'Nexstar Broadcasting Group, Inc.',
'TER': 'Teradyne, Inc.',
'SPXC': 'SPX Corporation',
'JASO': 'JA Solar Holdings, Co., Ltd.',
'KB': 'KB Financial Group Inc',
'OKS': 'ONEOK Partners, L.P.',
'CERCZ': 'Cerecor Inc.',
'SCCO': 'Southern Copper Corporation',
'MIII': 'M III Acquisition Corp.',
'NOVT': 'Novanta Inc.',
'IPG': 'Interpublic Group of Companies, Inc. (The)',
'ABEOW': 'Abeona Therapeutics Inc.',
'TDF': 'Templeton Dragon Fund, Inc.',
'ZIXI': 'Zix Corporation',
'DIOD': 'Diodes Incorporated',
'KN': 'Knowles Corporation',
'TMUSP': 'T-Mobile US, Inc.',
'RWT': 'Redwood Trust, Inc.',
'AIC': 'Arlington Asset Investment Corp',
'PVTBP': 'PrivateBancorp, Inc.',
'IRL': 'New Ireland Fund, Inc. (The)',
'SBCP': 'Sunshine Bancorp, Inc.',
'HFWA': 'Heritage Financial Corporation',
'BF.A': 'Brown Forman Corporation',
'FB': 'Facebook, Inc.',
'CONE': 'CyrusOne Inc',
'FUNC': 'First United Corporation',
'SNP': 'China Petroleum & Chemical Corporation',
'FHK': 'First Trust Hong Kong AlphaDEX Fund',
'SVT': 'Servotronics, Inc.',
'NDP': 'Tortoise Energy Independence Fund, Inc.',
'AVIR': 'Aviragen Therapeutics, Inc.',
'EGBN': 'Eagle Bancorp, Inc.',
'WLB': 'Westmoreland Coal Company',
'AXU': 'Alexco Resource Corp',
'NCIT': 'NCI, Inc.',
'TRST': 'TrustCo Bank Corp NY',
'SYNC': 'Synacor, Inc.',
'REG-F': 'Regency Centers Corporation',
'FBIO': 'Fortress Biotech, Inc.',
'VRML': 'Vermillion, Inc.',
'IDSY': 'I.D. Systems, Inc.',
'KIM-I': 'Kimco Realty Corporation',
'EFOI': 'Energy Focus, Inc.',
'GG': 'Goldcorp Inc.',
'GGT': 'Gabelli Multi-Media Trust Inc. (The)',
'FELE': 'Franklin Electric Co., Inc.',
'NGG': 'National Grid Transco, PLC',
'CLNY-A': 'Colony Capital, Inc',
'SAR': 'Saratoga Investment Corp',
'AMRB': 'American River Bankshares',
'VSLR': 'Vivint Solar, Inc.',
'ATW': 'Atwood Oceanics, Inc.',
'IBM': 'International Business Machines Corporation',
'BGC': 'General Cable Corporation',
'MIXT': 'MiX Telematics Limited',
'DSGX': 'The Descartes Systems Group Inc.',
'CPT': 'Camden Property Trust',
'WMC': 'Western Asset Mortgage Capital Corporation',
'EGO': 'Eldorado Gold Corporation',
'LTRPA': 'Liberty TripAdvisor Holdings, Inc.',
'SCL': 'Stepan Company',
'GMTA': 'GATX Corporation',
'CATB': 'Catabasis Pharmaceuticals, Inc.',
'PWR': 'Quanta Services, Inc.',
'ADRA': 'BLDRS Asia 50 ADR Index Fund',
'PKD': 'Parker Drilling Company',
'CPHD': 'CEPHEID',
'AVV': 'Aviva plc',
'ALJJ': 'ALJ Regional Holdings, Inc.',
'ANAC': 'Anacor Pharmaceuticals, Inc.',
'EVN': 'Eaton Vance Municipal Income Trust',
'FMNB': 'Farmers National Banc Corp.',
'MCV': 'Medley Capital Corporation',
'ARR': 'ARMOUR Residential REIT, Inc.',
'TKC': 'Turkcell Iletisim Hizmetleri AS',
'CP': 'Canadian Pacific Railway Limited',
'K': 'Kellogg Company',
'CDI': 'CDI Corporation',
'SUP': 'Superior Industries International, Inc.',
'TICC': 'TICC Capital Corp.',
'BAC': 'Bank of America Corporation',
'CTRP': 'Ctrip.com International, Ltd.',
'DGSE': 'DGSE Companies, Inc.',
'ERH': 'Wells Fargo Utilities and High Income Fund',
'NUTR': 'Nutraceutical International Corporation',
'ADBE': 'Adobe Systems Incorporated',
'HEQ': 'John Hancock Hedged Equity & Income Fund',
'CVCO': 'Cavco Industries, Inc.',
'TOUR': 'Tuniu Corporation',
'ALLY': 'Ally Financial Inc.',
'MIDD': 'The Middleby Corporation',
'CTRL': 'Control4 Corporation',
'DXB': 'Deutsche Bank AG',
'ACNB': 'ACNB Corporation',
'MHK': 'Mohawk Industries, Inc.',
'ARU': 'Ares Capital Corporation',
'ASPS': 'Altisource Portfolio Solutions S.A.',
'FEM': 'First Trust Emerging Markets AlphaDEX Fund',
'AVB': 'AvalonBay Communities, Inc.',
'EPAY': 'Bottomline Technologies, Inc.',
'EMCG': 'WisdomTree Emerging Markets Consumer Growth Fund',
'ETY': 'Eaton Vance Tax-Managed Diversified Equity Income Fund',
'BKE': 'Buckle, Inc. (The)',
'SMM': 'Salient Midstream & MLP Fund',
'EXAS': 'EXACT Sciences Corporation',
'SKYW': 'SkyWest, Inc.',
'RIBTW': 'RiceBran Technologies',
'COF-F': 'Capital One Financial Corporation',
'AHT-E': 'Ashford Hospitality Trust Inc',
'CSFL': 'CenterState Banks, Inc.',
'NOC': 'Northrop Grumman Corporation',
'TGLS': 'Tecnoglass Inc.',
'JCP': 'J.C. Penney Company, Inc. Holding Company',
'HUSA': 'Houston American Energy Corporation',
'AOS': 'Smith (A.O.) Corporation',
'GOODO': 'Gladstone Commercial Corporation',
'MGLN': 'Magellan Health, Inc.',
'MPW': 'Medical Properties Trust, Inc.',
'WSO.B': 'Watsco, Inc.',
'ADRO': 'Aduro Biotech, Inc.',
'FPF': 'First Trust Intermediate Duration Preferred & Income Fund',
'FTW': 'First Trust Taiwan AlphaDEX Fund',
'RNST': 'Renasant Corporation',
'INVN': 'InvenSense, Inc.',
'HT': 'Hersha Hospitality Trust',
'BEAT': 'BioTelemetry, Inc.',
'RJD': 'Raymond James Financial, Inc.',
'VVUS': 'VIVUS, Inc.',
'UVSP': 'Univest Corporation of Pennsylvania',
'EBS': 'Emergent Biosolutions, Inc.',
'TNXP': 'Tonix Pharmaceuticals Holding Corp.',
'LCAHU': 'Landcadia Holdings, Inc.',
'WFC-N': 'Wells Fargo & Company',
'NPTN': 'NeoPhotonics Corporation',
'ONVO': 'Organovo Holdings, Inc.',
'NYLD': 'NRG Yield, Inc.',
'MMAC': 'MMA Capital Management, LLC',
'OPHT': 'Ophthotech Corporation',
'EVM': 'Eaton Vance California Municipal Bond Fund',
'TCBIW': 'Texas Capital Bancshares, Inc.',
'NPO': 'Enpro Industries',
'EEI': 'Ecology and Environment, Inc.',
'SPI': 'SPI Energy Co., Ltd.',
'XBKS': 'Xenith Bankshares, Inc.',
'MSF': 'Morgan Stanley Emerging Markets Fund, Inc.',
'WNC': 'Wabash National Corporation',
'FCAU': 'Fiat Chrysler Automobiles N.V.',
'AKER': 'Akers Biosciences Inc',
'LBRDK': 'Liberty Broadband Corporation',
'IPL-D': 'Interstate Power and Light Company',
'EMCI': 'EMC Insurance Group Inc.',
'HOFT': 'Hooker Furniture Corporation',
'TWN': 'Taiwan Fund, Inc. (The)',
'IQNT': 'Inteliquent, Inc.',
'VWOB': 'Vanguard Emerging Markets Government Bond ETF',
'LOCK': 'LifeLock, Inc.',
'FOX': 'Twenty-First Century Fox, Inc.',
'LDF': 'Latin American Discovery Fund, Inc. (The)',
'RTK': 'Rentech, Inc.',
'ALSK': 'Alaska Communications Systems Group, Inc.',
'RSAS': 'RESAAS Services Inc.',
'AMIC': 'American Independence Corp.',
'BG': 'Bunge Limited',
'GIL': 'Gildan Activewear, Inc.',
'RMTI': 'Rockwell Medical, Inc.',
'CIG.C': 'Comp En De Mn Cemig ADS',
'SRET': 'Global X SuperDividend REIT ETF',
'NKA': 'Niska Gas Storage Partners LLC',
'PXD': 'Pioneer Natural Resources Company',
'RLYP': 'Relypsa, Inc.',
'STRZB': 'Starz',
'WILN': 'Wi-Lan Inc',
'YRCW': 'YRC Worldwide, Inc.',
'III': 'Information Services Group, Inc.',
'PSCM': 'PowerShares S&P SmallCap Materials Portfolio',
'LLNW': 'Limelight Networks, Inc.',
'KBH': 'KB Home',
'ICFI': 'ICF International, Inc.',
'BOOT': 'Boot Barn Holdings, Inc.',
'UNP': 'Union Pacific Corporation',
'GST-A': 'Gastar Exploration Inc.',
'MN': 'Manning & Napier, Inc.',
'HDNG': 'Hardinge, Inc.',
'GMZ': 'Goldman Sachs MLP Income Opportunities Fund',
'KKD': 'Krispy Kreme Doughnuts, Inc.',
'WPC': 'W.P. Carey Inc.',
'PSA-U': 'Public Storage',
'SMBC': 'Southern Missouri Bancorp, Inc.',
'SCE-C': 'Southern California Edison Company',
'GPS': 'Gap, Inc. (The)',
'CBL-D': 'CBL & Associates Properties, Inc.',
'LMFAW': 'LM Funding America, Inc.',
'HPE': 'Hewlett Packard Enterprise Company',
'ICE': 'Intercontinental Exchange Inc.',
'BEBE': 'bebe stores, inc.',
'GNE': 'Genie Energy Ltd.',
'VKQ': 'Invesco Municipal Trust',
'EMKR': 'EMCORE Corporation',
'MATR': 'Mattersight Corporation',
'CASC': 'Cascadian Therapeutics, Inc.',
'SRAQU': 'Silver Run Acquisition Corporation',
'SAUC': 'Diversified Restaurant Holdings, Inc.',
'FCT': 'First Trust Senior Floating Rate Income Fund II',
'SRNE': 'Sorrento Therapeutics, Inc.',
'STRP': 'Straight Path Communications Inc.',
'CPGX': 'Columbia Pipeline Group, Inc.',
'TBRA': 'Tobira Therapeutics, Inc.',
'GPT-A': 'Gramercy Property Trust Inc.',
'S': 'Sprint Corporation',
'AVNU': 'Avenue Financial Holdings, Inc.',
'KMDA': 'Kamada Ltd.',
'AHH': 'Armada Hoffler Properties, Inc.',
'VCLT': 'Vanguard Long-Term Corporate Bond ETF',
'ASGN': 'On Assignment, Inc.',
'TD': 'Toronto Dominion Bank (The)',
'RNR': 'RenaissanceRe Holdings Ltd.',
'CEA': 'China Eastern Airlines Corporation Ltd.',
'CIZ': 'Victory CEMP Developed Enhanced Volatility Wtd Index ETF',
'NYMTO': 'New York Mortgage Trust, Inc.',
'PLAB': 'Photronics, Inc.',
'FMSA': 'Fairmount Santrol Holdings Inc.',
'FTLB': 'First Trust Low Beta Income ETF',
'INTL': 'INTL FCStone Inc.',
'DYNT': 'Dynatronics Corporation',
'GGB': 'Gerdau S.A.',
'ALE': 'Allete, Inc.',
'BHLB': 'Berkshire Hills Bancorp, Inc.',
'FBZ': 'First Trust Brazil AlphaDEX Fund',
'AGFSW': 'AgroFresh Solutions, Inc.',
'LYV': 'Live Nation Entertainment, Inc.',
'OREX': 'Orexigen Therapeutics, Inc.',
'CUBE': 'CubeSmart',
'RFI': 'Cohen & Steers Total Return Realty Fund, Inc.',
'HCM': 'Hutchison China MediTech Limited',
'XRAY': 'DENTSPLY SIRONA Inc.',
'QTM': 'Quantum Corporation',
'MCHI': 'iShares MSCI China ETF',
'ONB': 'Old National Bancorp',
'HOG': 'Harley-Davidson, Inc.',
'QADA': 'QAD Inc.',
'DHXM': 'DHX Media Ltd.',
'WG': 'Willbros Group, Inc.',
'AFMD': 'Affimed N.V.',
'GCV': 'Gabelli Convertible and Income Securities Fund, Inc. (The)',
'IPHS': 'Innophos Holdings, Inc.',
'CRESY': 'Cresud S.A.C.I.F. y A.',
'DAKT': 'Daktronics, Inc.',
'EXC': 'Exelon Corporation',
'CYCCP': 'Cyclacel Pharmaceuticals, Inc.',
'W': 'Wayfair Inc.',
'BCS-A': 'Barclays PLC',
'SYRX': 'Sysorex Global',
'MHNC': 'Maiden Holdings, Ltd.',
'CUDA': 'Barracuda Networks, Inc.',
'ADMA': 'ADMA Biologics Inc',
'LOAN': 'Manhattan Bridge Capital, Inc',
'SBNB': 'Scorpio Tankers Inc.',
'ATRI': 'ATRION Corporation',
'TROX': 'Tronox Limited',
'DCI': 'Donaldson Company, Inc.',
'ANTH': 'Anthera Pharmaceuticals, Inc.',
'AEGN': 'Aegion Corp',
'FFIN': 'First Financial Bankshares, Inc.',
'CASI': 'CASI Pharmaceuticals, Inc.',
'CPST': 'Capstone Turbine Corporation',
'DNR': 'Denbury Resources Inc.',
'GRF': 'Eagle Capital Growth Fund, Inc.',
'GRIF': 'Griffin Industrial Realty, Inc.',
'KFY': 'Korn/Ferry International',
'CBR': 'Ciber, Inc.',
'FMY': 'First Trust',
'ROLL': 'RBC Bearings Incorporated',
'IMAX': 'Imax Corporation',
'BZM': 'BlackRock Maryland Municipal Bond Trust',
'PATI': 'Patriot Transportation Holding, Inc.',
'OUT': 'OUTFRONT Media Inc.',
'AWP': 'Alpine Global Premier Properties Fund',
'CLR': 'Continental Resources, Inc.',
'UBNK': 'United Financial Bancorp, Inc.',
'PATK': 'Patrick Industries, Inc.',
'RDS.A': 'Royal Dutch Shell PLC',
'GSOL': 'Global Sources Ltd.',
'MXL': 'MaxLinear, Inc',
'BDSI': 'BioDelivery Sciences International, Inc.',
'NSH': 'Nustar GP Holdings, LLC',
'WBS': 'Webster Financial Corporation',
'CTRN': 'Citi Trends, Inc.',
'HTGY': 'Hercules Capital, Inc.',
'BFIN': 'BankFinancial Corporation',
'BKK': 'Blackrock Municipal 2020 Term Trust',
'VALE.P': 'VALE S.A.',
'ATO': 'Atmos Energy Corporation',
'ICL': 'Israel Chemicals Shs',
'HRT': 'Arrhythmia Research Technology Inc.',
'TSRI': 'TSR, Inc.',
'CLDT': 'Chatham Lodging Trust (REIT)',
'DWSN': 'Dawson Geophysical Company',
'DEST': 'Destination Maternity Corporation',
'KEG': 'Key Energy Services, Inc.',
'BIDU': 'Baidu, Inc.',
'CTF': 'Nuveen Long/Short Commodity Total Return Fund',
'IBCP': 'Independent Bank Corporation',
'BLMN': 'Bloomin\' Brands, Inc.',
'CUNB': 'CU Bancorp (CA)',
'TA': 'TravelCenters of America LLC',
'FORR': 'Forrester Research, Inc.',
'AVD': 'American Vanguard Corporation',
'CRAY': 'Cray Inc',
'CAKE': 'The Cheesecake Factory Incorporated',
'ATLC': 'Atlanticus Holdings Corporation',
'NLY': 'Annaly Capital Management Inc',
'WRE': 'Washington Real Estate Investment Trust',
'ECCZ': 'Eagle Point Credit Company Inc.',
'CVC': 'Cablevision Systems Corporation',
'PNY': 'Piedmont Natural Gas Company, Inc.',
'FTEK': 'Fuel Tech, Inc.',
'VRTS': 'Virtus Investment Partners, Inc.',
'CMI': 'Cummins Inc.',
'CHK': 'Chesapeake Energy Corporation',
'STAY': 'Extended Stay America, Inc.',
'FENG': 'Phoenix New Media Limited',
'FFIV': 'F5 Networks, Inc.',
'TCBIL': 'Texas Capital Bancshares, Inc.',
'FI': 'Frank\'s International N.V.',
'AFST': 'AmTrust Financial Services, Inc.',
'YGE': 'Yingli Green Energy Holding Company Limited',
'STE': 'STERIS plc',
'AXP': 'American Express Company',
'THS': 'Treehouse Foods, Inc.',
'COL': 'Rockwell Collins, Inc.',
'CUZ': 'Cousins Properties Incorporated',
'QPACW': 'Quinpario Acquisition Corp. 2',
'SAN-A': 'Banco Santander, S.A.',
'KMX': 'CarMax Inc',
'GOOG': 'Alphabet Inc.',
'NBB': 'Nuveen Build America Bond Fund',
'DRIOW': 'LabStyle Innovations Corp.',
'NAO': 'Nordic American Offshore Ltd',
'AMBA': 'Ambarella, Inc.',
'RPT-D': 'Ramco-Gershenson Properties Trust',
'PL-E': 'Protective Life Corporation',
'IBKCP': 'IBERIABANK Corporation',
'WCN': 'Waste Connections, Inc.',
'MZA': 'MuniYield Arizona Fund, Inc.',
'SNCR': 'Synchronoss Technologies, Inc.',
'PVCT.WS': 'Provectus Biopharmaceuticals, Inc.',
'CCJ': 'Cameco Corporation',
'CNO': 'CNO Financial Group, Inc.',
'IBKCO': 'IBERIABANK Corporation',
'GOGL': 'Golden Ocean Group Limited',
'DFS-B': 'Discover Financial Services',
'FBP': 'First BanCorp.',
'CELG': 'Celgene Corporation',
'GRP.U': 'Granite Real Estate Inc.',
'EVGBC': 'Eaton Vance NextShares Trust',
'OCLR': 'Oclaro, Inc.',
'SRAQW': 'Silver Run Acquisition Corporation',
'PCRX': 'Pacira Pharmaceuticals, Inc.',
'JHA': 'Nuveen High Income 2020 Target Term Fund',
'ZION': 'Zions Bancorporation',
'GLADO': 'Gladstone Capital Corporation',
'AKRX': 'Akorn, Inc.',
'BLL': 'Ball Corporation',
'UZB': 'United States Cellular Corporation',
'AMBR': 'Amber Road, Inc.',
'DNN': 'Denison Mine Corp',
'NGL': 'NGL ENERGY PARTNERS LP',
'DNB': 'Dun & Bradstreet Corporation (The)',
'MYOK': 'MyoKardia, Inc.',
'PGZ': 'Principal Real Estate Income Fund',
'XCRA': 'Xcerra Corporation',
'CLB': 'Core Laboratories N.V.',
'TGTX': 'TG Therapeutics, Inc.',
'ANDAU': 'Andina Acquisition Corp. II',
'PKE': 'Park Electrochemical Corporation',
'SYK': 'Stryker Corporation',
'VKTXW': 'Viking Therapeutics, Inc.',
'WMK': 'Weis Markets, Inc.',
'PCM': 'PIMCO Commercial Mortgage Securities Trust, Inc.',
'NDRO': 'Enduro Royalty Trust',
'MQT': 'Blackrock MuniYield Quality Fund II, Inc.',
'KTH': 'Lehman ABS Corporation',
'HTY': 'John Hancock Tax-Advantaged Global Shareholder Yield Fund',
'PBI': 'Pitney Bowes Inc.',
'BCC': 'Boise Cascade, L.L.C.',
'DDF': 'Delaware Investments Dividend & Income Fund, Inc.',
'WIBC': 'Wilshire Bancorp, Inc.',
'PHI': 'Philippine Long Distance Telephone Company',
'AOD': 'Alpine Total Dynamic Dividend Fund',
'EDGW': 'Edgewater Technology, Inc.',
'EMI': 'Eaton Vance Michigan Municipal Income Trust',
'TBI': 'TrueBlue, Inc.',
'MUH': 'Blackrock MuniHoldings Fund II, Inc.',
'GKOS': 'Glaukos Corporation',
'SGOC': 'SGOCO Group, Ltd',
'NUO': 'Nuveen Ohio Quality Income Municipal Fund',
'NKX': 'Nuveen California AMT-Free Municipal Income Fund',
'RELY': 'Real Industry, Inc. ',
'BTX.WS': 'BioTime, Inc.',
'IND': 'ING Group, N.V.',
'CWT': 'California Water Service Group Holding',
'MU': 'Micron Technology, Inc.',
'CADTR': 'DT Asia Investments Limited',
'FPI': 'Farmland Partners Inc.',
'PLCM': 'Polycom, Inc.',
'TLRD': 'Tailored Brands, Inc.',
'CHFN': 'Charter Financial Corp.',
'ANGO': 'AngioDynamics, Inc.',
'SHO-E': 'Sunstone Hotel Investors, Inc.',
'SB-C': 'Safe Bulkers, Inc',
'LLL': 'L-3 Communications Holdings, Inc.',
'OB': 'OneBeacon Insurance Group, Ltd.',
'QPACU': 'Quinpario Acquisition Corp. 2',
'SBFGP': 'SB Financial Group, Inc.',
'NXQ': 'Nuveen Select Tax Free Income Portfolio II',
'PPHMP': 'Peregrine Pharmaceuticals Inc.',
'SGMA': 'SigmaTron International, Inc.',
'WIA': 'Western Asset/Claymore U.S. Treasury Inflation Prot Secs Fd',
'ONSIW': 'Oncobiologics, Inc.',
'POPE': 'Pope Resources',
'FYT': 'First Trust Small Cap Value AlphaDEX Fund',
'JEC': 'Jacobs Engineering Group Inc.',
'FTA': 'First Trust Large Cap Value AlphaDEX Fund',
'MOV': 'Movado Group Inc.',
'HCAPL': 'Harvest Capital Credit Corporation',
'PROV': 'Provident Financial Holdings, Inc.',
'VC': 'Visteon Corporation',
'ARWR': 'Arrowhead Pharmaceuticals, Inc.',
'MH-A': 'Maiden Holdings, Ltd.',
'JHI': 'John Hancock Investors Trust',
'BDC': 'Belden Inc',
'NYV': 'Nuveen New York Municipal Value Fund 2',
'CH': 'Aberdeen Chile Fund, Inc.',
'SYF': 'Synchrony Financial',
'ENX': 'Eaton Vance New York Municipal Bond Fund',
'HMNY': 'Helios and Matheson Analytics Inc',
'WMT': 'Wal-Mart Stores, Inc.',
'LMT': 'Lockheed Martin Corporation',
'MMSI': 'Merit Medical Systems, Inc.',
'PSO': 'Pearson, Plc',
'APO': 'Apollo Global Management, LLC',
'JMF': 'Nuveen Energy MLP Total Return Fund',
'PACW': 'PacWest Bancorp',
'LPI': 'Laredo Petroleum, Inc.',
'CHMG': 'Chemung Financial Corp',
'WFC-T': 'Wells Fargo & Company',
'JSYNR': 'Jensyn Acquistion Corp.',
'AFSS': 'AmTrust Financial Services, Inc.',
'OSIS': 'OSI Systems, Inc.',
'IRDMB': 'Iridium Communications Inc',
'DRIO': 'LabStyle Innovations Corp.',
'TGH': 'Textainer Group Holdings Limited',
'CKX': 'CKX Lands, Inc.',
'RIC': 'Richmont Mines, Inc.',
'OCIP': 'OCI Partners LP',
'PYT': 'PPlus Trust',
'RLOG': 'Rand Logistics, Inc.',
'NERV': 'Minerva Neurosciences, Inc',
'RESI': 'Altisource Residential Corporation',
'GEK': 'General Electric Capital Corporation',
'POWL': 'Powell Industries, Inc.',
'DCUB': 'Dominion Resources, Inc.',
'JQC': 'Nuveen Credit Strategies Income Fund',
'LOB': 'Live Oak Bancshares, Inc.',
'CNR': 'China Metro-Rural Holdings Limited',
'WLFC': 'Willis Lease Finance Corporation',
'JSD': 'Nuveen Short Duration Credit Opportunities Fund',
'MSTR': 'MicroStrategy Incorporated',
'HUN': 'Huntsman Corporation',
'DFT-C': 'Dupont Fabros Technology, Inc.',
'HES-A': 'Hess Corporation',
'USAT': 'USA Technologies, Inc.',
'BCS-': 'Barclays PLC',
'AMTG': 'Apollo Residential Mortgage, Inc.',
'DVAX': 'Dynavax Technologies Corporation',
'TRNS': 'Transcat, Inc.',
'MPG': 'Metaldyne Performance Group Inc.',
'PRGO': 'Perrigo Company',
'CFCB': 'Centrue Financial Corporation',
'AR': 'Antero Resources Corporation',
'NHA': 'Nuveen Municipal 2021 Target Term Fund',
'CRDC': 'Cardica, Inc.',
'TEI': 'Templeton Emerging Markets Income Fund, Inc.',
'CHL': 'China Mobile (Hong Kong) Ltd.',
'NGHC': 'National General Holdings Corp',
'BBT-E': 'BB&T Corporation',
'GPOR': 'Gulfport Energy Corporation',
'QLTI': 'QLT Inc.',
'STT': 'State Street Corporation',
'DRA': 'Diversified Real Asset Income Fund',
'GALTU': 'Galectin Therapeutics Inc.',
'NCV': 'AllianzGI Convertible & Income Fund',
'AHL-A': 'Aspen Insurance Holdings Limited',
'NVG': 'Nuveen Dividend Advantage Municipal Income Fund',
'MGP': 'MGM Growth Properties LLC',
'EGRX': 'Eagle Pharmaceuticals, Inc.',
'GSM': 'Ferroglobe PLC',
'PESI': 'Perma-Fix Environmental Services, Inc.',
'MCHX': 'Marchex, Inc.',
'GS': 'Goldman Sachs Group, Inc. (The)',
'KNOP': 'KNOT Offshore Partners LP',
'IXUS': 'iShares Core MSCI Total International Stock ETF',
'ELS': 'Equity Lifestyle Properties, Inc.',
'BBOX': 'Black Box Corporation',
'WTM': 'White Mountains Insurance Group, Ltd.',
'TG': 'Tredegar Corporation',
'AVAL': 'Grupo Aval Acciones y Valores S.A.',
'GGAL': 'Grupo Financiero Galicia S.A.',
'AHP': 'Ashford Hospitality Prime, Inc.',
'UL': 'Unilever PLC',
'RUBI': 'The Rubicon Project, Inc.',
'GLV': 'Clough Global Allocation Fund',
'AYI': 'Acuity Brands Inc',
'KEX': 'Kirby Corporation',
'BAC-D': 'Bank of America Corporation',
'AXAS': 'Abraxas Petroleum Corporation',
'CBX': 'CBX (Listing Market NYSE Networks AE',
'AAC': 'AAC Holdings, Inc.',
'ANSS': 'ANSYS, Inc.',
'GEO': 'Geo Group Inc (The)',
'CMO-E': 'Capstead Mortgage Corporation',
'MHF': 'Western Asset Municipal High Income Fund, Inc.',
'HIX': 'Western Asset High Income Fund II Inc.',
'MANH': 'Manhattan Associates, Inc.',
'PAACR': 'Pacific Special Acquisition Corp.',
'KTP': 'Lehman ABS Corporation',
'HGSH': 'China HGS Real Estate, Inc.',
'LCI': 'Lannett Co Inc',
'FSZ': 'First Trust Switzerland AlphaDEX Fund',
'TEX': 'Terex Corporation',
'RM': 'Regional Management Corp.',
'FLOW': 'SPX FLOW, Inc.',
'VIV': 'Telefonica Brasil S.A.',
'FSD': 'First Trust High Income Long Short Fund',
'KAMN': 'Kaman Corporation',
'FNK': 'First Trust Mid Cap Value AlphaDEX Fund',
'AFB': 'Alliance National Municipal Income Fund Inc',
'IHS': 'IHS Inc.',
'LMNR': 'Limoneira Co',
'GHL': 'Greenhill & Co., Inc.',
'TCBK': 'TriCo Bancshares',
'THLD': 'Threshold Pharmaceuticals, Inc.',
'RSO-C': 'Resource Capital Corp.',
'EVOL': 'Evolving Systems, Inc.',
'SGZA': 'Selective Insurance Group, Inc.',
'IFN': 'India Fund, Inc. (The)',
'PULM': 'Pulmatrix, Inc.',
'JGBB': 'WisdomTree Japan Interest Rate Strategy Fund',
'LIVE': 'Live Ventures Incorporated',
'ATR': 'AptarGroup, Inc.',
'C-P': 'Citigroup Inc.',
'ACH': 'Aluminum Corporation of China Limited',
'NVGN': 'Novogen Limited',
'IT': 'Gartner, Inc.',
'MMP': 'Magellan Midstream Partners L.P.',
'DGLD': 'region',
'MDC': 'M.D.C. Holdings, Inc.',
'CTRV': 'ContraVir Pharmaceuticals Inc',
'AIB': 'Apollo Investment Corporation',
'NCQ': 'NovaCopper Inc.',
'BVX': 'Bovie Medical Corporation',
'RTIX': 'RTI Surgical, Inc.',
'CLACU': 'Capitol Acquisition Corp. III',
'ACUR': 'Acura Pharmaceuticals, Inc.',
'PUK-': 'Prudential Public Limited Company',
'APDNW': 'Applied DNA Sciences Inc',
'RTTR': 'Ritter Pharmaceuticals, Inc.',
'OPHC': 'OptimumBank Holdings, Inc.',
'IESC': 'IES Holdings, Inc.',
'OAS': 'Oasis Petroleum Inc.',
'OSBCP': 'Old Second Bancorp, Inc.',
'EGL': 'Engility Holdings, Inc.',
'MWG': 'Morgan Stanley',
'ANH-B': 'Anworth Mortgage Asset Corporation',
'RUSHB': 'Rush Enterprises, Inc.',
'CMS-B': 'CMS Energy Corporation',
'VRA': 'Vera Bradley, Inc.',
'WB': 'Weibo Corporation',
'BANC-D': 'Banc of California, Inc.',
'CHCO': 'City Holding Company',
'MOCO': 'MOCON, Inc.',
'BXMT': 'Capital Trust, Inc.',
'SHBI': 'Shore Bancshares Inc',
'STAG-A': 'Stag Industrial, Inc.',
'ADVM': 'Adverum Biotechnologies, Inc.',
'DDR-J': 'DDR Corp.',
'MSGN': 'MSG Networks Inc.',
'SCI': 'Service Corporation International',
'EMES': 'Emerge Energy Services LP',
'ABR': 'Arbor Realty Trust',
'WBA': 'Walgreens Boots Alliance, Inc.',
'OA': 'Orbital ATK, Inc.',
'TYPE': 'Monotype Imaging Holdings Inc.',
'DISCK': 'Discovery Communications, Inc.',
'INTLL': 'INTL FCStone Inc.',
'SHG': 'Shinhan Financial Group Co Ltd',
'MLI': 'Mueller Industries, Inc.',
'AIW': 'Arlington Asset Investment Corp',
'CDXS': 'Codexis, Inc.',
'CHH': 'Choice Hotels International, Inc.',
'BPFHP': 'Boston Private Financial Holdings, Inc.',
'FNFG-B': 'First Niagara Financial Group Inc.',
'MJN': 'Mead Johnson Nutrition Company',
'RNR-C': 'RenaissanceRe Holdings Ltd.',
'INFY': 'Infosys Limited',
'WBB': 'Westbury Bancorp, Inc.',
'OLN': 'Olin Corporation',
'NHTC': 'Natural Health Trends Corp.',
'DD-B': 'E.I. du Pont de Nemours and Company',
'DCTH': 'Delcath Systems, Inc.',
'SLM': 'SLM Corporation',
'INVT': 'Inventergy Global, Inc.',
'RFP': 'Resolute Forest Products Inc.',
'PEO': 'Adams Natural Resources Fund, Inc.',
'ENOC': 'EnerNOC, Inc.',
'CAG': 'ConAgra Foods, Inc.',
'PCG-G': 'Pacific Gas & Electric Co.',
'TGP': 'Teekay LNG Partners L.P.',
'JLL': 'Jones Lang LaSalle Incorporated',
'CBS': 'CBS Corporation',
'MNR-B': 'Monmouth Real Estate Investment Corporation',
'AEK': 'Aegon NV',
'TGS': 'Transportadora De Gas Sa Ord B',
'ADS': 'Alliance Data Systems Corporation',
'ERII': 'Energy Recovery, Inc.',
'IMH': 'Impac Mortgage Holdings, Inc.',
'TANH': 'Tantech Holdings Ltd.',
'LBY': 'Libbey, Inc.',
'LLY': 'Eli Lilly and Company',
'FBC': 'Flagstar Bancorp, Inc.',
'QQQC': 'Global X NASDAQ China Technology ETF',
'USA': 'Liberty All-Star Equity Fund',
'USM': 'United States Cellular Corporation',
'BGFV': 'Big 5 Sporting Goods Corporation',
'PEB-D': 'Pebblebrook Hotel Trust',
'WYIGU': 'JM Global Holding Company',
'BRFS': 'BRF S.A.',
'STRZA': 'Starz',
'NTK': 'Nortek Inc.',
'GRR': 'Asia Tigers Fund, Inc. (The)',
'NOMD': 'Nomad Foods Limited',
'EACQU': 'Easterly Acquisition Corp.',
'CYS': 'CYS Investments, Inc.',
'RPRX': 'Repros Therapeutics Inc.',
'SPEX': 'Spherix Incorporated',
'ARTNA': 'Artesian Resources Corporation',
'COLB': 'Columbia Banking System, Inc.',
'PLSE': 'Pulse Biosciences, Inc',
'MHI': 'Pioneer Municipal High Income Trust',
'DRD': 'DRDGOLD Limited'
}
with open('bats_symbols.csv') as symbols:
symbolReader = csv.reader(symbols)
symbols = []
for row in symbolReader:
symbols.append(row[0])
if row[0] in stockDict:
symbols.append(stockDict[row[0]])
import json
output = open('search_terms.txt', 'w')
json.dump(symbols, output)
output.close()
|
ardasahiner/StockTrade
|
vendor/StockTickerData/makeSearchTerms.py
|
Python
|
gpl-3.0
| 281,883
|
[
"Amber",
"BLAST",
"BWA",
"CASINO",
"CDK",
"COLUMBUS",
"CRYSTAL",
"Jaguar"
] |
b3ef27769baa81afb71fb576a6850ac167cfe0ef220bd7ecd8fd85b5672e490f
|
# -*- coding: utf-8 -*-
"""
.. _tut-artifact-ica:
Repairing artifacts with ICA
============================
This tutorial covers the basics of independent components analysis (ICA) and
shows how ICA can be used for artifact repair; an extended example illustrates
repair of ocular and heartbeat artifacts.
.. contents:: Page contents
:local:
:depth: 2
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`. Because ICA can be computationally
intense, we'll also crop the data to 60 seconds; and to save ourselves from
repeatedly typing ``mne.preprocessing`` we'll directly import a few functions
and classes from that submodule:
"""
import os
import mne
from mne.preprocessing import (ICA, create_eog_epochs, create_ecg_epochs,
corrmap)
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60.)
###############################################################################
# .. note::
# Before applying ICA (or any artifact repair strategy), be sure to observe
# the artifacts in your data to make sure you choose the right repair tool.
# Sometimes the right tool is no tool at all — if the artifacts are small
# enough you may not even need to repair them to get good analysis results.
# See :ref:`tut-artifact-overview` for guidance on detecting and
# visualizing various types of artifact.
#
# What is ICA?
# ^^^^^^^^^^^^
#
# Independent components analysis (ICA) is a technique for estimating
# independent source signals from a set of recordings in which the source
# signals were mixed together in unknown ratios. A common example of this is
# the problem of `blind source separation`_: with 3 musical instruments playing
# in the same room, and 3 microphones recording the performance (each picking
# up all 3 instruments, but at varying levels), can you somehow "unmix" the
# signals recorded by the 3 microphones so that you end up with a separate
# "recording" isolating the sound of each instrument?
#
# It is not hard to see how this analogy applies to EEG/MEG analysis: there are
# many "microphones" (sensor channels) simultaneously recording many
# "instruments" (blinks, heartbeats, activity in different areas of the brain,
# muscular activity from jaw clenching or swallowing, etc). As long as these
# various source signals are `statistically independent`_ and non-gaussian, it
# is usually possible to separate the sources using ICA, and then re-construct
# the sensor signals after excluding the sources that are unwanted.
#
#
# ICA in MNE-Python
# ~~~~~~~~~~~~~~~~~
#
# .. sidebar:: ICA and dimensionality reduction
#
# If you want to perform ICA with *no* dimensionality reduction (other than
# the number of Independent Components (ICs) given in ``n_components``, and
# any subsequent exclusion of ICs you specify in ``ICA.exclude``), pass
# ``n_pca_components=None`` (this is the default value).
#
# However, if you *do* want to reduce dimensionality, consider this
# example: if you have 300 sensor channels and you set
# ``n_pca_components=None`` and ``n_components=50``, then the the first 50
# PCs are sent to the ICA algorithm (yielding 50 ICs), and during
# reconstruction `~mne.preprocessing.ICA.apply` will use the 50 ICs
# plus PCs number 51-300 (the full PCA residual). If instead you specify
# ``n_pca_components=120`` in `~mne.preprocessing.ICA.apply`, it will
# reconstruct using the 50 ICs plus the first 70 PCs in the PCA residual
# (numbers 51-120), thus discarding the smallest 180 components.
#
# **If you have previously been using EEGLAB**'s ``runica()`` and are
# looking for the equivalent of its ``'pca', n`` option to reduce
# dimensionality via PCA before the ICA step, set ``n_components=n``
# during initialization and pass ``n_pca_components=n`` to
# `~mne.preprocessing.ICA.apply`.
#
# MNE-Python implements three different ICA algorithms: ``fastica`` (the
# default), ``picard``, and ``infomax``. FastICA and Infomax are both in fairly
# widespread use; Picard is a newer (2017) algorithm that is expected to
# converge faster than FastICA and Infomax, and is more robust than other
# algorithms in cases where the sources are not completely independent, which
# typically happens with real EEG/MEG data. See [1]_ for more information.
#
# The ICA interface in MNE-Python is similar to the interface in
# `scikit-learn`_: some general parameters are specified when creating an
# `~mne.preprocessing.ICA` object, then the `~mne.preprocessing.ICA` object is
# fit to the data using its `~mne.preprocessing.ICA.fit` method. The results of
# the fitting are added to the `~mne.preprocessing.ICA` object as attributes
# that end in an underscore (``_``), such as ``ica.mixing_matrix_`` and
# ``ica.unmixing_matrix_``. After fitting, the ICA component(s) that you want
# to remove must be chosen, and the ICA fit must then be applied to the
# `~mne.io.Raw` or `~mne.Epochs` object using the `~mne.preprocessing.ICA`
# object's `~mne.preprocessing.ICA.apply` method.
#
# As is typically done with ICA, the data are first scaled to unit variance and
# whitened using principal components analysis (PCA) before performing the ICA
# decomposition. This is a two-stage process:
#
# 1. To deal with different channel types having different units
# (e.g., Volts for EEG and Tesla for MEG), data must be pre-whitened.
# If ``noise_cov=None`` (default), all data of a given channel type is
# scaled by the standard deviation across all channels. If ``noise_cov`` is
# a `~mne.Covariance`, the channels are pre-whitened using the covariance.
# 2. The pre-whitened data are then decomposed using PCA.
#
# From the resulting principal components (PCs), the first ``n_components`` are
# then passed to the ICA algorithm if ``n_components`` is an integer number.
# It can also be a float between 0 and 1, specifying the **fraction** of
# explained variance that the PCs should capture; the appropriate number of
# PCs (i.e., just as many PCs as are required to explain the given fraction
# of total variance) is then passed to the ICA.
#
# After visualizing the Independent Components (ICs) and excluding any that
# capture artifacts you want to repair, the sensor signal can be reconstructed
# using the `~mne.preprocessing.ICA` object's
# `~mne.preprocessing.ICA.apply` method. By default, signal
# reconstruction uses all of the ICs (less any ICs listed in ``ICA.exclude``)
# plus all of the PCs that were not included in the ICA decomposition (i.e.,
# the "PCA residual"). If you want to reduce the number of components used at
# the reconstruction stage, it is controlled by the ``n_pca_components``
# parameter (which will in turn reduce the rank of your data; by default
# ``n_pca_components=None`` resulting in no additional dimensionality
# reduction). The fitting and reconstruction procedures and the
# parameters that control dimensionality at various stages are summarized in
# the diagram below:
#
# .. graphviz:: ../../_static/diagrams/ica.dot
# :alt: Diagram of ICA procedure in MNE-Python
# :align: left
#
# See the Notes section of the `~mne.preprocessing.ICA` documentation
# for further details. Next we'll walk through an extended example that
# illustrates each of these steps in greater detail.
#
# Example: EOG and ECG artifact repair
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Visualizing the artifacts
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Let's begin by visualizing the artifacts that we want to repair. In this
# dataset they are big enough to see easily in the raw data:
# pick some channels that clearly show heartbeats and blinks
regexp = r'(MEG [12][45][123]1|EEG 00.)'
artifact_picks = mne.pick_channels_regexp(raw.ch_names, regexp=regexp)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
###############################################################################
# We can get a summary of how the ocular artifact manifests across each channel
# type using :func:`~mne.preprocessing.create_eog_epochs` like we did in the
# :ref:`tut-artifact-overview` tutorial:
eog_evoked = create_eog_epochs(raw).average()
eog_evoked.apply_baseline(baseline=(None, -0.2))
eog_evoked.plot_joint()
###############################################################################
# Now we'll do the same for the heartbeat artifacts, using
# :func:`~mne.preprocessing.create_ecg_epochs`:
ecg_evoked = create_ecg_epochs(raw).average()
ecg_evoked.apply_baseline(baseline=(None, -0.2))
ecg_evoked.plot_joint()
###############################################################################
# Filtering to remove slow drifts
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Before we run the ICA, an important step is filtering the data to remove
# low-frequency drifts, which can negatively affect the quality of the ICA fit.
# The slow drifts are problematic because they reduce the independence of the
# assumed-to-be-independent sources (e.g., during a slow upward drift, the
# neural, heartbeat, blink, and other muscular sources will all tend to have
# higher values), making it harder for the algorithm to find an accurate
# solution. A high-pass filter with 1 Hz cutoff frequency is recommended.
# However, because filtering is a linear operation, the ICA solution found from
# the filtered signal can be applied to the unfiltered signal (see [2]_ for
# more information), so we'll keep a copy of the unfiltered
# :class:`~mne.io.Raw` object around so we can apply the ICA solution to it
# later.
filt_raw = raw.copy()
filt_raw.load_data().filter(l_freq=1., h_freq=None)
###############################################################################
# Fitting and plotting the ICA solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. sidebar:: Ignoring the time domain
#
# The ICA algorithms implemented in MNE-Python find patterns across
# channels, but ignore the time domain. This means you can compute ICA on
# discontinuous :class:`~mne.Epochs` or :class:`~mne.Evoked` objects (not
# just continuous :class:`~mne.io.Raw` objects), or only use every Nth
# sample by passing the ``decim`` parameter to ``ICA.fit()``.
#
# Now we're ready to set up and fit the ICA. Since we know (from observing our
# raw data) that the EOG and ECG artifacts are fairly strong, we would expect
# those artifacts to be captured in the first few dimensions of the PCA
# decomposition that happens before the ICA. Therefore, we probably don't need
# a huge number of components to do a good job of isolating our artifacts
# (though it is usually preferable to include more components for a more
# accurate solution). As a first guess, we'll run ICA with ``n_components=15``
# (use only the first 15 PCA components to compute the ICA decomposition) — a
# very small number given that our data has over 300 channels, but with the
# advantage that it will run quickly and we will able to tell easily whether it
# worked or not (because we already know what the EOG / ECG artifacts should
# look like).
#
# ICA fitting is not deterministic (e.g., the components may get a sign
# flip on different runs, or may not always be returned in the same order), so
# we'll also specify a `random seed`_ so that we get identical results each
# time this tutorial is built by our web servers.
ica = ICA(n_components=15, random_state=97)
ica.fit(filt_raw)
###############################################################################
# Some optional parameters that we could have passed to the
# :meth:`~mne.preprocessing.ICA.fit` method include ``decim`` (to use only
# every Nth sample in computing the ICs, which can yield a considerable
# speed-up) and ``reject`` (for providing a rejection dictionary for maximum
# acceptable peak-to-peak amplitudes for each channel type, just like we used
# when creating epoched data in the :ref:`tut-overview` tutorial).
#
# Now we can examine the ICs to see what they captured.
# :meth:`~mne.preprocessing.ICA.plot_sources` will show the time series of the
# ICs. Note that in our call to :meth:`~mne.preprocessing.ICA.plot_sources` we
# can use the original, unfiltered :class:`~mne.io.Raw` object:
raw.load_data()
ica.plot_sources(raw, show_scrollbars=False)
###############################################################################
# Here we can pretty clearly see that the first component (``ICA000``) captures
# the EOG signal quite well, and the second component (``ICA001``) looks a lot
# like `a heartbeat <qrs_>`_ (for more info on visually identifying Independent
# Components, `this EEGLAB tutorial`_ is a good resource). We can also
# visualize the scalp field distribution of each component using
# :meth:`~mne.preprocessing.ICA.plot_components`. These are interpolated based
# on the values in the ICA mixing matrix:
# sphinx_gallery_thumbnail_number = 9
ica.plot_components()
###############################################################################
# .. note::
#
# :meth:`~mne.preprocessing.ICA.plot_components` (which plots the scalp
# field topographies for each component) has an optional ``inst`` parameter
# that takes an instance of :class:`~mne.io.Raw` or :class:`~mne.Epochs`.
# Passing ``inst`` makes the scalp topographies interactive: clicking one
# will bring up a diagnostic :meth:`~mne.preprocessing.ICA.plot_properties`
# window (see below) for that component.
#
# In the plots above it's fairly obvious which ICs are capturing our EOG and
# ECG artifacts, but there are additional ways visualize them anyway just to
# be sure. First, we can plot an overlay of the original signal against the
# reconstructed signal with the artifactual ICs excluded, using
# :meth:`~mne.preprocessing.ICA.plot_overlay`:
# blinks
ica.plot_overlay(raw, exclude=[0], picks='eeg')
# heartbeats
ica.plot_overlay(raw, exclude=[1], picks='mag')
###############################################################################
# We can also plot some diagnostics of each IC using
# :meth:`~mne.preprocessing.ICA.plot_properties`:
ica.plot_properties(raw, picks=[0, 1])
###############################################################################
# In the remaining sections, we'll look at different ways of choosing which ICs
# to exclude prior to reconstructing the sensor signals.
#
#
# Selecting ICA components manually
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Once we're certain which components we want to exclude, we can specify that
# manually by setting the ``ica.exclude`` attribute. Similar to marking bad
# channels, merely setting ``ica.exclude`` doesn't do anything immediately (it
# just adds the excluded ICs to a list that will get used later when it's
# needed). Once the exclusions have been set, ICA methods like
# :meth:`~mne.preprocessing.ICA.plot_overlay` will exclude those component(s)
# even if no ``exclude`` parameter is passed, and the list of excluded
# components will be preserved when using :meth:`mne.preprocessing.ICA.save`
# and :func:`mne.preprocessing.read_ica`.
ica.exclude = [0, 1] # indices chosen based on various plots above
###############################################################################
# Now that the exclusions have been set, we can reconstruct the sensor signals
# with artifacts removed using the :meth:`~mne.preprocessing.ICA.apply` method
# (remember, we're applying the ICA solution from the *filtered* data to the
# original *unfiltered* signal). Plotting the original raw data alongside the
# reconstructed data shows that the heartbeat and blink artifacts are repaired.
# ica.apply() changes the Raw object in-place, so let's make a copy first:
reconst_raw = raw.copy()
ica.apply(reconst_raw)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
reconst_raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
del reconst_raw
###############################################################################
# Using an EOG channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# It may have seemed easy to review the plots and manually select which ICs to
# exclude, but when processing dozens or hundreds of subjects this can become
# a tedious, rate-limiting step in the analysis pipeline. One alternative is to
# use dedicated EOG or ECG sensors as a "pattern" to check the ICs against, and
# automatically mark for exclusion any ICs that match the EOG/ECG pattern. Here
# we'll use :meth:`~mne.preprocessing.ICA.find_bads_eog` to automatically find
# the ICs that best match the EOG signal, then use
# :meth:`~mne.preprocessing.ICA.plot_scores` along with our other plotting
# functions to see which ICs it picked. We'll start by resetting
# ``ica.exclude`` back to an empty list:
ica.exclude = []
# find which ICs match the EOG pattern
eog_indices, eog_scores = ica.find_bads_eog(raw)
ica.exclude = eog_indices
# barplot of ICA component "EOG match" scores
ica.plot_scores(eog_scores)
# plot diagnostics
ica.plot_properties(raw, picks=eog_indices)
# plot ICs applied to raw data, with EOG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged EOG epochs, with EOG matches highlighted
ica.plot_sources(eog_evoked)
###############################################################################
# Note that above we used :meth:`~mne.preprocessing.ICA.plot_sources` on both
# the original :class:`~mne.io.Raw` instance and also on an
# :class:`~mne.Evoked` instance of the extracted EOG artifacts. This can be
# another way to confirm that :meth:`~mne.preprocessing.ICA.find_bads_eog` has
# identified the correct components.
#
#
# Using a simulated channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If you don't have an EOG channel,
# :meth:`~mne.preprocessing.ICA.find_bads_eog` has a ``ch_name`` parameter that
# you can use as a proxy for EOG. You can use a single channel, or create a
# bipolar reference from frontal EEG sensors and use that as virtual EOG
# channel. This carries a risk however: you must hope that the frontal EEG
# channels only reflect EOG and not brain dynamics in the prefrontal cortex (or
# you must not care about those prefrontal signals).
#
# For ECG, it is easier: :meth:`~mne.preprocessing.ICA.find_bads_ecg` can use
# cross-channel averaging of magnetometer or gradiometer channels to construct
# a virtual ECG channel, so if you have MEG channels it is usually not
# necessary to pass a specific channel name.
# :meth:`~mne.preprocessing.ICA.find_bads_ecg` also has two options for its
# ``method`` parameter: ``'ctps'`` (cross-trial phase statistics [3]_) and
# ``'correlation'`` (Pearson correlation between data and ECG channel).
ica.exclude = []
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
ica.plot_scores(ecg_scores)
# plot diagnostics
ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
ica.plot_sources(ecg_evoked)
###############################################################################
# The last of these plots is especially useful: it shows us that the heartbeat
# artifact is coming through on *two* ICs, and we've only caught one of them.
# In fact, if we look closely at the output of
# :meth:`~mne.preprocessing.ICA.plot_sources` (online, you can right-click →
# "view image" to zoom in), it looks like ``ICA014`` has a weak periodic
# component that is in-phase with ``ICA001``. It might be worthwhile to re-run
# the ICA with more components to see if that second heartbeat artifact
# resolves out a little better:
# refit the ICA with 30 components this time
new_ica = ICA(n_components=30, random_state=97)
new_ica.fit(filt_raw)
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = new_ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
new_ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
new_ica.plot_scores(ecg_scores)
# plot diagnostics
new_ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
new_ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
new_ica.plot_sources(ecg_evoked)
###############################################################################
# Much better! Now we've captured both ICs that are reflecting the heartbeat
# artifact (and as a result, we got two diagnostic plots: one for each IC that
# reflects the heartbeat). This demonstrates the value of checking the results
# of automated approaches like :meth:`~mne.preprocessing.ICA.find_bads_ecg`
# before accepting them.
# clean up memory before moving on
del raw, filt_raw, ica, new_ica
###############################################################################
# Selecting ICA components using template matching
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# When dealing with multiple subjects, it is also possible to manually select
# an IC for exclusion on one subject, and then use that component as a
# *template* for selecting which ICs to exclude from other subjects' data,
# using :func:`mne.preprocessing.corrmap` [4]_. The idea behind
# :func:`~mne.preprocessing.corrmap` is that the artifact patterns are similar
# enough across subjects that corresponding ICs can be identified by
# correlating the ICs from each ICA solution with a common template, and
# picking the ICs with the highest correlation strength.
# :func:`~mne.preprocessing.corrmap` takes a list of ICA solutions, and a
# ``template`` parameter that specifies which ICA object and which component
# within it to use as a template.
#
# Since our sample dataset only contains data from one subject, we'll use a
# different dataset with multiple subjects: the EEGBCI dataset [5]_ [6]_. The
# dataset has 109 subjects, we'll just download one run (a left/right hand
# movement task) from each of the first 4 subjects:
mapping = {
'Fc5.': 'FC5', 'Fc3.': 'FC3', 'Fc1.': 'FC1', 'Fcz.': 'FCz', 'Fc2.': 'FC2',
'Fc4.': 'FC4', 'Fc6.': 'FC6', 'C5..': 'C5', 'C3..': 'C3', 'C1..': 'C1',
'Cz..': 'Cz', 'C2..': 'C2', 'C4..': 'C4', 'C6..': 'C6', 'Cp5.': 'CP5',
'Cp3.': 'CP3', 'Cp1.': 'CP1', 'Cpz.': 'CPz', 'Cp2.': 'CP2', 'Cp4.': 'CP4',
'Cp6.': 'CP6', 'Fp1.': 'Fp1', 'Fpz.': 'Fpz', 'Fp2.': 'Fp2', 'Af7.': 'AF7',
'Af3.': 'AF3', 'Afz.': 'AFz', 'Af4.': 'AF4', 'Af8.': 'AF8', 'F7..': 'F7',
'F5..': 'F5', 'F3..': 'F3', 'F1..': 'F1', 'Fz..': 'Fz', 'F2..': 'F2',
'F4..': 'F4', 'F6..': 'F6', 'F8..': 'F8', 'Ft7.': 'FT7', 'Ft8.': 'FT8',
'T7..': 'T7', 'T8..': 'T8', 'T9..': 'T9', 'T10.': 'T10', 'Tp7.': 'TP7',
'Tp8.': 'TP8', 'P7..': 'P7', 'P5..': 'P5', 'P3..': 'P3', 'P1..': 'P1',
'Pz..': 'Pz', 'P2..': 'P2', 'P4..': 'P4', 'P6..': 'P6', 'P8..': 'P8',
'Po7.': 'PO7', 'Po3.': 'PO3', 'Poz.': 'POz', 'Po4.': 'PO4', 'Po8.': 'PO8',
'O1..': 'O1', 'Oz..': 'Oz', 'O2..': 'O2', 'Iz..': 'Iz'
}
raws = list()
icas = list()
for subj in range(4):
# EEGBCI subjects are 1-indexed; run 3 is a left/right hand movement task
fname = mne.datasets.eegbci.load_data(subj + 1, runs=[3])[0]
raw = mne.io.read_raw_edf(fname)
# remove trailing `.` from channel names so we can set montage
raw.rename_channels(mapping)
raw.set_montage('standard_1005')
# fit ICA
ica = ICA(n_components=30, random_state=97)
ica.fit(raw)
raws.append(raw)
icas.append(ica)
###############################################################################
# Now let's run :func:`~mne.preprocessing.corrmap`:
# use the first subject as template; use Fpz as proxy for EOG
raw = raws[0]
ica = icas[0]
eog_inds, eog_scores = ica.find_bads_eog(raw, ch_name='Fpz')
corrmap(icas, template=(0, eog_inds[0]))
###############################################################################
# The first figure shows the template map, while the second figure shows all
# the maps that were considered a "match" for the template (including the
# template itself). There were only three matches from the four subjects;
# notice the output message ``No maps selected for subject(s) 1, consider a
# more liberal threshold``. By default the threshold is set automatically by
# trying several values; here it may have chosen a threshold that is too high.
# Let's take a look at the ICA sources for each subject:
for index, (ica, raw) in enumerate(zip(icas, raws)):
fig = ica.plot_sources(raw, show_scrollbars=False)
fig.subplots_adjust(top=0.9) # make space for title
fig.suptitle('Subject {}'.format(index))
###############################################################################
# Notice that subject 1 *does* seem to have an IC that looks like it reflects
# blink artifacts (component ``ICA000``). Notice also that subject 3 appears to
# have *two* components that are reflecting ocular artifacts (``ICA000`` and
# ``ICA002``), but only one was caught by :func:`~mne.preprocessing.corrmap`.
# Let's try setting the threshold manually:
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9)
###############################################################################
# Now we get the message ``At least 1 IC detected for each subject`` (which is
# good). At this point we'll re-run :func:`~mne.preprocessing.corrmap` with
# parameters ``label='blink', plot=False`` to *label* the ICs from each subject
# that capture the blink artifacts (without plotting them again).
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9, label='blink',
plot=False)
print([ica.labels_ for ica in icas])
###############################################################################
# Notice that the first subject has 3 different labels for the IC at index 0:
# "eog/0/Fpz", "eog", and "blink". The first two were added by
# :meth:`~mne.preprocessing.ICA.find_bads_eog`; the "blink" label was added by
# the last call to :func:`~mne.preprocessing.corrmap`. Notice also that each
# subject has at least one IC index labelled "blink", and subject 3 has two
# components (0 and 2) labelled "blink" (consistent with the plot of IC sources
# above). The ``labels_`` attribute of :class:`~mne.preprocessing.ICA` objects
# can also be manually edited to annotate the ICs with custom labels. They also
# come in handy when plotting:
icas[3].plot_components(picks=icas[3].labels_['blink'])
icas[3].exclude = icas[3].labels_['blink']
icas[3].plot_sources(raws[3], show_scrollbars=False)
###############################################################################
# As a final note, it is possible to extract ICs numerically using the
# :meth:`~mne.preprocessing.ICA.get_components` method of
# :class:`~mne.preprocessing.ICA` objects. This will return a :class:`NumPy
# array <numpy.ndarray>` that can be passed to
# :func:`~mne.preprocessing.corrmap` instead of the :class:`tuple` of
# ``(subject_index, component_index)`` we passed before, and will yield the
# same result:
template_eog_component = icas[0].get_components()[:, eog_inds[0]]
corrmap(icas, template=template_eog_component, threshold=0.9)
print(template_eog_component)
###############################################################################
# An advantage of using this numerical representation of an IC to capture a
# particular artifact pattern is that it can be saved and used as a template
# for future template-matching tasks using :func:`~mne.preprocessing.corrmap`
# without having to load or recompute the ICA solution that yielded the
# template originally. Put another way, when the template is a NumPy array, the
# :class:`~mne.preprocessing.ICA` object containing the template does not need
# to be in the list of ICAs provided to :func:`~mne.preprocessing.corrmap`.
#
#
# References
# ^^^^^^^^^^
#
# .. [1] Ablin P, Cardoso J, Gramfort A (2018). Faster Independent Component
# Analysis by Preconditioning With Hessian Approximations. *IEEE
# Transactions on Signal Processing* 66:4040–4049.
# https://doi.org/10.1109/TSP.2018.2844203
#
# .. [2] Winkler I, Debener S, Müller K-R, Tangermann M (2015). On the
# influence of high-pass filtering on ICA-based artifact reduction in
# EEG-ERP. Proceedings of EMBC-2015, 4101–4105.
# https://doi.org/10.1109/EMBC.2015.7319296
#
# .. [3] Dammers J, Schiek M, Boers F, Silex C, Zvyagintsev M, Pietrzyk U,
# Mathiak K (2008). Integration of amplitude and phase statistics for
# complete artifact removal in independent components of neuromagnetic
# recordings. *IEEE Transactions on Biomedical Engineering*
# 55(10):2353–2362. https://doi.org/10.1109/TBME.2008.926677
#
# .. [4] Viola FC, Thorne J, Edmonds B, Schneider T, Eichele T, Debener S
# (2009). Semi-automatic identification of independent components
# representing EEG artifact. *Clinical Neurophysiology* 120(5):868–877.
# https://doi.org/10.1016/j.clinph.2009.01.015
#
# .. [5] Schalk G, McFarland DJ, Hinterberger T, Birbaumer N, Wolpaw JR (2004).
# BCI2000: A General-Purpose Brain-Computer Interface (BCI) System.
# *IEEE Transactions on Biomedical Engineering* 51(6):1034-1043.
# https://doi.org/10.1109/TBME.2004.827072
#
# .. [6] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,
# Mietus JE, Moody GB, Peng C-K, Stanley HE (2000). PhysioBank,
# PhysioToolkit, and PhysioNet: Components of a New Research Resource
# for Complex Physiologic Signals. *Circulation* 101(23):e215-e220.
# https://doi.org/10.1161/01.CIR.101.23.e215
#
#
# .. LINKS
#
# .. _`blind source separation`:
# https://en.wikipedia.org/wiki/Signal_separation
# .. _`statistically independent`:
# https://en.wikipedia.org/wiki/Independence_(probability_theory)
# .. _`scikit-learn`: https://scikit-learn.org
# .. _`random seed`: https://en.wikipedia.org/wiki/Random_seed
# .. _`regular expression`: https://www.regular-expressions.info/
# .. _`qrs`: https://en.wikipedia.org/wiki/QRS_complex
# .. _`this EEGLAB tutorial`: https://labeling.ucsd.edu/tutorial/labels
|
Eric89GXL/mne-python
|
tutorials/preprocessing/plot_40_artifact_correction_ica.py
|
Python
|
bsd-3-clause
| 30,767
|
[
"Gaussian"
] |
28c4c9976d7dba5a3017d740d2049b2060f6372777549845ff01a35873fc1154
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.utils.encoding import force_str
from django.views.generic.base import View
from .models import Artist, Author, Book, Page
@override_settings(ROOT_URLCONF='generic_views.urls')
class ListViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIsInstance(res.context['view'], View)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_orphaned_queryset(self):
self._make_authors(92)
res = self.client.get('/list/authors/paginated-orphaned/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 1)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '3'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '4'})
self.assertEqual(res.status_code, 404)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
with self.assertRaises(ImproperlyConfigured):
self.client.get('/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
def test_explicitly_ordered_list_view(self):
Book.objects.create(name="Zebras for Dummies", pages=800, pubdate=datetime.date(2006, 9, 1))
res = self.client.get('/list/books/sorted/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, '2066')
self.assertEqual(res.context['object_list'][1].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][2].name, 'Zebras for Dummies')
res = self.client.get('/list/books/sortedbypagesandnamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][1].name, 'Zebras for Dummies')
self.assertEqual(res.context['object_list'][2].name, '2066')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
|
yephper/django
|
tests/generic_views/test_list.py
|
Python
|
bsd-3-clause
| 12,393
|
[
"MOOSE"
] |
5c7895301157d015a8272211c9ba3ab51d6af14ee57ee5661f30136aa59e0538
|
# -*- coding: utf-8 -*-
from flask import abort, Blueprint, g, jsonify, render_template, request, redirect, url_for
from config import CROSSWALKS, PROFILES
from datausa.profile.profile import Profile
from datausa.utils.data import attr_cache, acs_crosswalk
from datausa.utils.manip import stat
from datausa.search.views import get_img
from random import randint
# create the profile Blueprint
mod = Blueprint("profile", __name__, url_prefix="/profile")
@mod.before_request
def before_request():
g.page_type = "profile"
# create a route and function for the education profile that accepts a CIP id
@mod.route("/")
def profiles():
g.page_type = "home"
bg_imgs = randint(1,4)
return render_template("general/home.html", bg_imgs = bg_imgs)
# create a route and function for the education profile that accepts a CIP id
@mod.route("/<attr_type>/<attr_id>/")
def profile(attr_type, attr_id):
if "_iocode" in attr_type:
attr_type = "iocode"
allowed_type = attr_type in PROFILES or attr_type in CROSSWALKS
allowed_id = attr_type in attr_cache and attr_id in attr_cache[attr_type]
if not allowed_type or not allowed_id:
abort(404);
if attr_type in CROSSWALKS:
attr = attr_cache[attr_type][attr_id]
crosswalks = acs_crosswalk(attr_type, attr_id)
crosswalk_map = {"acs_occ": "soc", "acs_ind": "naics", "iocode": "naics"}
crosswalk_labels = {"acs_occ": "ACS Occupation Code", "acs_ind": "ACS Industry Code", "iocode": "BEA I/O Code"}
if len(crosswalks) > 1:
g.page_type = "redirect"
attr["type"] = crosswalk_labels[attr_type]
return render_template("profile/redirect.html", attr=attr, crosswalks=crosswalks, crosswalk_type=crosswalk_map[attr_type])
return redirect(url_for('.profile', attr_type=crosswalk_map[attr_type], attr_id=crosswalks[0]["id"]))
g.page_class = attr_type
# pass id and type to Profile class
attr_data = attr_cache[attr_type][attr_id]
p = Profile(attr_data["id"], attr_type)
# render the profile template and pass the profile to jinja
return render_template("profile/index.html", profile = p)
# create a route and function for the education profile that accepts a CIP id
@mod.route("/dataloca/<attr_type>/<attr_id>/")
def profile_dataloca(attr_type, attr_id):
return render_template("profile/dataloca.html")
@mod.route("/stat/")
def statView():
args = {k: v for k, v in request.args.iteritems()}
col = args.pop("col", "name")
moe = args.pop("moe", False)
truncate = int(args.pop("truncate", 0))
if moe == "False":
moe = False
dataset = args.pop("dataset", False)
if dataset == "False":
dataset = False
return jsonify(stat(args, col=col, dataset=dataset, moe=moe, truncate=truncate))
@mod.route("/<attr_kind>/<attr_id>/img/")
def splash_img(attr_kind, attr_id, mode="thumb"):
return get_img(attr_kind, attr_id, "splash")
@mod.route("/<attr_type>/<attr_id>/<section>/<topic>/")
def embed_view(attr_type, attr_id, section, topic):
viz_only = request.args.get("viz", False)
if not attr_type in PROFILES:
abort(404);
g.page_class = "{} embed".format(attr_type)
topics = topic.split(",")
required_namespaces = Profile.compute_namespaces(attr_type, section, topics)
p = Profile(attr_id, attr_type, required_namespaces)
section = p.section_by_topic(section, topics)
if not section or not section.topics:
abort(404)
for t in section.topics:
if viz_only:
if "description" in t:
del t["description"]
if "stat" in t:
del t["stat"]
if "category" in t:
del t["category"]
return render_template("profile/embed.html", profile = p, section = section)
|
tgarland1/datausa-site
|
datausa/profile/views.py
|
Python
|
agpl-3.0
| 3,921
|
[
"MOE"
] |
2efcbb6959a2f48f5c1b77bc30467af0a2368ee41e6ac0c78a83179fceda8c7c
|
import sys
import time
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base import Script
from DIRAC.Core.DISET.MessageClient import MessageClient
Script.parseCommandLine()
def sendPingMsg( msgClient, pingid = 0 ):
"""
Send Ping message to the server
"""
result = msgClient.createMessage( "Ping" )
if not result[ 'OK' ]:
return result
msgObj = result[ 'Value' ]
msgObj.id = pingid
return msgClient.sendMessage( msgObj )
def pongCB( msgObj ):
"""
Callback for the Pong message.
Just send a Ping message incrementing in 1 the id
"""
pongid = msgObj.id
print "RECEIVED PONG %d" % pongid
return sendPingMsg( msgObj.msgClient, pongid + 1 )
def disconnectedCB( msgClient ):
"""
Reconnect :)
"""
retryCount = 0
while retryCount:
result = msgClient.connect()
if result[ 'OK' ]:
return result
time.sleep( 1 )
retryCount -= 1
return S_ERROR( "Could not reconnect... :P" )
if __name__ == "__main__":
msgClient = MessageClient( "Framework/PingPong" )
msgClient.subscribeToMessage( 'Pong', pongCB )
msgClient.subscribeToDisconnect( disconnectedCB )
result = msgClient.connect()
if not result[ 'OK' ]:
print "CANNOT CONNECT: %s" % result[ 'Message' ]
sys.exit(1)
result = sendPingMsg( msgClient )
if not result[ 'OK' ]:
print "CANNOT SEND PING: %s" % result[ 'Message' ]
sys.exit(1)
#Wait 10 secs of pingpongs :P
time.sleep( 10 )
|
fstagni/DIRACDocs
|
source/DeveloperGuide/Framework/stableconns/client.py
|
Python
|
gpl-3.0
| 1,425
|
[
"DIRAC"
] |
48d23e902469970437eb21fc070bdb12d3b7038eea4c0ef2f54955582cd60549
|
########################################################################
# $HeadURL$
# File : VirtualMachineDB.py
# Author : Ricardo Graciani
# occi and multi endpoint author : Victor Mendez
########################################################################
""" VirtualMachineDB class is a front-end to the virtual machines DB
Life cycle of VMs Images in DB
- New: Inserted by Director (Name - Status = New ) if not existing when launching a new instance
- Validated: Declared by VMMonitoring Server when an Instance reports back correctly
- Error: Declared by VMMonitoring Server when an Instance reports back wrong requirements
Life cycle of VMs Instances in DB
- New: Inserted by Director before launching a new instance, to check if image is valid
- Submitted: Inserted by Director (adding UniqueID) when launches a new instance
- Wait_ssh_context: Declared by Director for submitted instance wich need later contextualization using ssh (VirtualMachineContextualization will check)
- Contextualizing: on the waith_ssh_context path is the next status before Running
- Running: Declared by VMMonitoring Server when an Instance reports back correctly (add LastUpdate, publicIP and privateIP)
- Stopping: Declared by VMManager Server when an Instance has been deleted outside of the VM (f.e "Delete" button on Browse Instances)
- Halted: Declared by VMMonitoring Server when an Instance reports halting
- Stalled: Declared by VMManager Server when detects Instance no more running
- Error: Declared by VMMonitoring Server when an Instance reports back wrong requirements or reports as running when Halted
New Instances can be launched by Director if VMImage is not in Error Status.
Instance UniqueID: for KVM it could be the MAC, for Amazon the returned InstanceID(i-5dec3236), for Occi returned the VMID
Life cycle of VMs RunningPods in DB
- New: Inserted by VM Scheduler (RunningPod - Status = New ) if not existing when launching a new instance
- Unactive: Declared by VMScheduler Server when out of campaign dates
- Active: Declared by VMScheduler Server when withing of campaign dates
- Error: For compatibility with common private functions
"""
import types
# DIRAC
from DIRAC import gConfig, S_ERROR, S_OK
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities import DEncode, Time
__RCSID__ = "$Id: VirtualMachineDB.py 16 2010-03-15 11:39:29Z ricardo.graciani@gmail.com $"
class VirtualMachineDB( DB ):
# When checking the Status on the DB it must be one of these values, if not, the last one (Error) is set
# When declaring a new Status, it will be set to Error if not in the list
validImageStates = [ 'New', 'Validated', 'Error' ]
validInstanceStates = [ 'New', 'Submitted', 'Running', 'Stopping', 'Halted', 'Stalled', 'Error' ]
validRunningPodStates = [ 'New', 'Unactive', 'Active', 'Error' ]
# In seconds !
stallingInterval = 60 * 40
# When attempting a transition it will be checked if the current state is allowed
allowedTransitions = { 'Image' : {
'Validated' : [ 'New', 'Validated' ],
},
'Instance' : {
'Submitted' : [ 'New' ],
'Running' : [ 'Submitted', 'Running', 'Stalled', 'New' ],
'Stopping' : [ 'Running', 'Stalled' ],
'Halted' : [ 'New','Running', 'Stopping', 'Stalled', 'Halted' ],
'Stalled': [ 'New', 'Submitted', 'Running' ],
},
'RunningPod' : {
'Active' : [ 'New', 'Active', 'Unactive' ],
'Unactive' : [ 'New', 'Active', 'Unactive' ],
}
}
tablesDesc = {}
tablesDesc[ 'vm_Images' ] = { 'Fields' : { 'VMImageID' : 'BIGINT UNSIGNED AUTO_INCREMENT NOT NULL',
'Name' : 'VARCHAR(255) NOT NULL',
'Status' : 'VARCHAR(16) NOT NULL',
'LastUpdate' : 'DATETIME',
'ErrorMessage' : 'VARCHAR(255) NOT NULL DEFAULT ""',
},
'PrimaryKey' : 'VMImageID',
}
tablesDesc[ 'vm_Instances' ] = { 'Fields' : { 'InstanceID' : 'BIGINT UNSIGNED AUTO_INCREMENT NOT NULL',
'RunningPod' : 'VARCHAR(255) NOT NULL',
'Name' : 'VARCHAR(255) NOT NULL',
'Endpoint' : 'VARCHAR(255) NOT NULL',
'UniqueID' : 'VARCHAR(255) NOT NULL DEFAULT ""',
'VMImageID' : 'INTEGER UNSIGNED NOT NULL',
'Status' : 'VARCHAR(32) NOT NULL',
'LastUpdate' : 'DATETIME',
'PublicIP' : 'VARCHAR(32) NOT NULL DEFAULT ""',
'PrivateIP' : 'VARCHAR(32) NOT NULL DEFAULT ""',
'ErrorMessage' : 'VARCHAR(255) NOT NULL DEFAULT ""',
'MaxAllowedPrice' : 'FLOAT DEFAULT NULL',
'Uptime' : 'INTEGER UNSIGNED DEFAULT 0',
'Load' : 'FLOAT DEFAULT 0',
'Jobs' : 'INTEGER UNSIGNED NOT NULL DEFAULT 0'
},
'PrimaryKey' : 'InstanceID',
'Indexes': { 'Status': [ 'Status' ] },
}
tablesDesc[ 'vm_History' ] = { 'Fields' : { 'InstanceID' : 'INTEGER UNSIGNED NOT NULL',
'Status' : 'VARCHAR(32) NOT NULL',
'Load' : 'FLOAT NOT NULL',
'Jobs' : 'INTEGER UNSIGNED NOT NULL DEFAULT 0',
'TransferredFiles' : 'INTEGER UNSIGNED NOT NULL DEFAULT 0',
'TransferredBytes' : 'BIGINT UNSIGNED NOT NULL DEFAULT 0',
'Update' : 'DATETIME'
},
'Indexes': { 'InstanceID': [ 'InstanceID' ] },
}
tablesDesc[ 'vm_RunningPods' ] = { 'Fields' : { 'RunningPodID' : 'BIGINT UNSIGNED AUTO_INCREMENT NOT NULL',
'RunningPod' : 'VARCHAR(32) NOT NULL',
'CampaignStartDate' : 'DATETIME',
'CampaignEndDate' : 'DATETIME',
'Status' : 'VARCHAR(32) NOT NULL',
'LastUpdate' : 'DATETIME',
'ErrorMessage' : 'VARCHAR(255) NOT NULL DEFAULT ""'
},
'PrimaryKey' : 'RunningPodID',
'Indexes': { 'RunningPod': [ 'RunningPod', 'Status' ]
}
}
#######################
# VirtualDB constructor
#######################
def __init__( self, maxQueueSize = 10 ):
DB.__init__( self, 'VirtualMachineDB', 'WorkloadManagement/VirtualMachineDB', maxQueueSize )
if not self._MySQL__initialized:
raise Exception( 'Can not connect to VirtualMachineDB, exiting...' )
result = self.__initializeDB()
if not result[ 'OK' ]:
raise Exception( 'Can\'t create tables: %s' % result[ 'Message' ] )
#######################
# Public Functions
#######################
def setRunningPodStatus( self, runningPodName ):
"""
Set Status of a given runningPod depending in date interval
returns:
S_OK(Status) if Status is valid and not Error
S_ERROR(ErrorMessage) otherwise
"""
tableName, validStates, idName = self.__getTypeTuple( 'RunningPod' )
runningPodID = self.getFields( tableName, [ idName ], {'RunningPod': runningPodName} )
if not runningPodID[ 'OK' ]:
return runningPodID
runningPodID = runningPodID[ 'Value' ][0][0]
if not runningPodID:
return S_ERROR( 'Running pod %s not found in DB' % runningPodName )
# The runningPod exits in DB set status
runningPodDict = self.getRunningPodDict( runningPodName )
if not runningPodDict[ 'OK' ]:
return runningPodDict
runningPodDict = runningPodDict[ 'Value' ]
startdate=Time.fromString(runningPodDict['CampaignStartDate'])
enddate=Time.fromString(runningPodDict['CampaignEndDate'])
currentdate=Time.date()
if currentdate<startdate:
runningPodState='Unactive'
elif currentdate>enddate:
runningPodState='Unactive'
else:
runningPodState='Active'
return self.__setState( 'RunningPod', runningPodID, runningPodState )
def getRunningPodStatus( self, runningPodName):
"""
Check Status of a given runningPod
returns:
S_OK(Status) if Status is valid and not Error
S_ERROR(ErrorMessage) otherwise
"""
tableName, validStates, idName = self.__getTypeTuple( 'RunningPod' )
runningPodID = self.getFields( tableName, [ idName ], {'RunningPod': runningPodName} )
if not runningPodID[ 'OK' ]:
return runningPodID
runningPodID = runningPodID[ 'Value' ][0][0]
if not runningPodID:
return S_ERROR( 'Running pod %s not found in DB' % runningPodName )
# The runningPod exits in DB set status
return self.__getStatus( 'RunningPod', runningPodID )
def getRunningPodDict( self, runningPodName ):
"""
Return from CS a Dictionary with RunningPod definition
"""
#FIXME: this MUST not be on the DB module !!
#FIXME: isn't checking for Image
runningPodsCSPath = '/Resources/VirtualMachines/RunningPods'
definedRunningPods = gConfig.getSections( runningPodsCSPath )
if not definedRunningPods[ 'OK' ]:
return definedRunningPods
if runningPodName not in definedRunningPods['Value']:
return S_ERROR( 'RunningPod "%s" not defined' % runningPodName )
runningPodCSPath = '%s/%s' % ( runningPodsCSPath, runningPodName )
runningPodDict = {}
cloudEndpoints = gConfig.getValue( '%s/CloudEndpoints' % runningPodCSPath , '' )
if not cloudEndpoints:
return S_ERROR( 'Missing CloudEndpoints for RunnningPod "%s"' % runningPodName )
for option, value in gConfig.getOptionsDict( runningPodCSPath )['Value'].items():
runningPodDict[option] = value
runningPodRequirementsDict = gConfig.getOptionsDict( '%s/Requirements' % runningPodCSPath )
if not runningPodRequirementsDict[ 'OK' ]:
return S_ERROR( 'Missing Requirements for RunningPod "%s"' % runningPodName )
if 'CPUTime' in runningPodRequirementsDict[ 'Value' ]:
runningPodRequirementsDict['Value']['CPUTime'] = int( runningPodRequirementsDict['Value']['CPUTime'] )
if 'OwnerGroup' in runningPodRequirementsDict[ 'Value' ]:
runningPodRequirementsDict['Value']['OwnerGroup'] = runningPodRequirementsDict['Value']['OwnerGroup'].split(', ')
runningPodDict['Requirements'] = runningPodRequirementsDict['Value']
return S_OK( runningPodDict )
def insertRunningPod( self, runningPodName ):
"""
Insert a RunningPod record
If RunningPod name already exists then update CampaignStartDate, CampaignEndDate
to be called by VMScheduler on creation of RunningPod record
"""
tableName, validStates, idName = self.__getTypeTuple( 'RunningPod' )
runningPodDict = self.getRunningPodDict( runningPodName )
if not runningPodDict[ 'OK' ]:
return runningPodDict
runningPodDict = runningPodDict[ 'Value' ]
runningPodID = self.getFields( tableName, [ idName ], {'RunningPod': runningPodName} )
if not runningPodID[ 'OK' ]:
return runningPodID
#runningPodID = runningPodID[ 'Value' ][0][0]
if runningPodID[ 'Value' ]:
runningPodID = runningPodID[ 'Value' ][0][0]
if runningPodID > 0:
# updating CampaignStartDate, CampaignEndDate
sqlUpdate = 'UPDATE `%s` SET CampaignStartDate = "%s", CampaignEndDate = "%s" WHERE %s = %s' % \
( tableName, runningPodDict['CampaignStartDate'], runningPodDict['CampaignEndDate'], idName, runningPodID )
return self._update( sqlUpdate )
# The runningPod does not exits in DB, has to be inserted
fields = [ 'RunningPod', 'CampaignStartDate', 'CampaignEndDate', 'Status']
values = [ runningPodName, runningPodDict['CampaignStartDate'], runningPodDict['CampaignEndDate'], 'New']
return self.insertFields( tableName , fields, values )
def checkImageStatus( self, imageName ):
"""
Check Status of a given image
Will insert a new Image in the DB if it does not exits
returns:
S_OK(Status) if Status is valid and not Error
S_ERROR(ErrorMessage) otherwise
"""
ret = self.__getImageID( imageName )
if not ret[ 'OK' ]:
return ret
return self.__getStatus( 'Image', ret[ 'Value' ] )
def insertInstance( self, uniqueID, imageName, instanceName, endpoint, runningPodName ):
"""
Check Status of a given image
Will insert a new Instance in the DB
returns:
S_OK( InstanceID ) if new Instance is properly inserted
S_ERROR(ErrorMessage) otherwise
"""
imageStatus = self.checkImageStatus( imageName )
if not imageStatus[ 'OK' ]:
return imageStatus
return self.__insertInstance( uniqueID, imageName, instanceName, endpoint, runningPodName )
def setInstanceUniqueID( self, instanceID, uniqueID ):
"""
Assign a uniqueID to an instance
"""
result = self.__getInstanceID( uniqueID )
if result[ 'OK' ]:
return S_ERROR( 'UniqueID is not unique: %s' % uniqueID )
result = self._escapeString( uniqueID )
if not result[ 'OK' ]:
return result
uniqueID = result[ 'Value' ]
try:
instanceID = int( instanceID )
except ValueError:
#except Exception, e:
#FIXME: do we really want to raise an Exception ?
#raise e
return S_ERROR( "instanceID has to be a number" )
tableName, _validStates, idName = self.__getTypeTuple( 'Instance' )
sqlUpdate = "UPDATE `%s` SET UniqueID = %s WHERE %s = %d" % ( tableName, uniqueID, idName, instanceID )
return self._update( sqlUpdate )
def getInstanceParameter( self, pName, instanceID ):
""" Get the instance parameter pName for the given instanceID
:param pName: parameter name
:param instance ID: instance ID
:return: S_OK/S_ERROR, parameter value
"""
tableName, _validStates, idName = self.__getTypeTuple( 'Instance' )
if not pName in VirtualMachineDB.tablesDesc['vm_Instances']['Fields']:
return S_ERROR( 'Invalid Instance parameter %s' % pName )
sqlQuery = "SELECT %s FROM `%s` WHERE %s = %s" % ( pName, tableName, idName, instanceID )
result = self._query( sqlQuery )
if not result[ 'OK' ]:
return result
value = result[ 'Value' ][ 0 ][ 0 ]
return S_OK( value )
def getUniqueID( self, instanceID ):
"""
For a given dirac instanceID get the corresponding cloud endpoint uniqueID
"""
return self.getInstanceParameter( 'UniqueID', instanceID )
def getUniqueIDByName( self, instanceName ):
""" Get the cloud provider unique ID corresponding to the DIRAC unique name
:param name: VM name
:return: S_OK/S_ERROR, cloud unique ID as value
"""
tableName, _validStates, idName = self.__getTypeTuple( 'Instance' )
sqlQuery = "SELECT UniqueID FROM `%s` WHERE Name = '%s'" % ( tableName, instanceName )
result = self._query( sqlQuery )
if not result[ 'OK' ]:
return result
uniqueID = result[ 'Value' ][ 0 ][ 0 ]
return S_OK( uniqueID )
def getInstanceID( self, uniqueID ):
"""
Public interface for __getInstanceID
"""
return self.__getInstanceID( uniqueID )
def declareInstanceSubmitted( self, uniqueID ):
"""
After submission of the instance the Director should declare the submitted Status
"""
instanceID = self.__getInstanceID( uniqueID )
if not instanceID[ 'OK' ]:
return instanceID
instanceID = instanceID[ 'Value' ]
status = self.__setState( 'Instance', instanceID, 'Submitted' )
if status[ 'OK' ]:
self.__addInstanceHistory( instanceID, 'Submitted' )
return status
def setPublicIP( self, instanceID, publicIP ):
"""
Update publicIP when used for contextualization previus to declareInstanceRunning
"""
publicIP = publicIP.replace( '::ffff:', '' )
result = self._escapeString( publicIP )
if not result[ 'OK' ]:
return result
publicIP = result[ 'Value' ]
try:
instanceID = int( instanceID )
except ValueError:
return S_ERROR( "instanceID has to be an integer value" )
tableName, _validStates, idName = self.__getTypeTuple( 'Instance' )
sqlUpdate = 'UPDATE `%s` SET PublicIP = %s WHERE %s = %d' % ( tableName, publicIP, idName, instanceID )
return self._update( sqlUpdate )
def declareInstanceRunning( self, uniqueID, publicIP, privateIP = "" ):
"""
Declares an instance Running and sets its associated info (uniqueID, publicIP, privateIP)
Returns S_ERROR if:
- instanceName does not have a "Submitted" or "Contextualizing" entry
- uniqueID is not unique
"""
instanceID = self.__getInstanceID( uniqueID )
if not instanceID[ 'OK' ]:
return instanceID
instanceID = instanceID[ 'Value' ]
# No IPv6 prefix
publicIP = publicIP.replace( '::ffff:', '' )
self.__setInstanceIPs( instanceID, publicIP, privateIP )
status = self.__setState( 'Instance', instanceID, 'Running' )
if status[ 'OK' ]:
self.__addInstanceHistory( instanceID, 'Running' )
return self.getAllInfoForUniqueID( uniqueID )
def declareInstanceStopping( self, instanceID ):
"""
From "Stop" buttom of Browse Instance
Declares "Stopping" the instance, next heat-beat from VM will recibe a stop response to do an ordenate termination
It returns S_ERROR if the status is not OK
"""
status = self.__setState( 'Instance', instanceID, 'Stopping' )
if status[ 'OK' ]:
self.__addInstanceHistory( instanceID, 'Stopping' )
return status
def getInstanceStatus( self, instanceID ):
"""
By dirac instanceID
"""
tableName, validStates, idName = self.__getTypeTuple( 'Instance' )
if not tableName:
return S_ERROR( 'Unknown DB object Instance' )
ret = self.__getStatus( 'Instance', instanceID )
if not ret[ 'OK' ]:
return ret
if not ret[ 'Value' ]:
return S_ERROR( 'Unknown InstanceID = %s' % ( instanceID ) )
status = ret[ 'Value' ]
if not status in validStates:
return self.__setError( 'Instances', instanceID, 'Invalid Status: %s' % status )
return S_OK( status )
def recordDBHalt( self, instanceID, load ):
"""
Insert the heart beat info from a halting instance
Declares "Halted" the instance and the image
It returns S_ERROR if the status is not OK
"""
status = self.__setState( 'Instance', instanceID, 'Halted' )
if status[ 'OK' ]:
self.__addInstanceHistory( instanceID, 'Halted', load )
return status
def declareInstanceHalting( self, uniqueID, load ):
"""
Insert the heart beat info from a halting instance
Declares "Halted" the instance and the image
It returns S_ERROR if the status is not OK
"""
instanceID = self.__getInstanceID( uniqueID )
if not instanceID[ 'OK' ]:
return instanceID
instanceID = instanceID[ 'Value' ]
status = self.__setState( 'Instance', instanceID, 'Halted' )
if status[ 'OK' ]:
self.__addInstanceHistory( instanceID, 'Halted', load )
return status
def declareStalledInstances( self ):
"""
Check last Heart Beat for all Running instances and declare them Stalled if older than interval
"""
oldInstances = self.__getOldInstanceIDs( self.stallingInterval,
self.allowedTransitions[ 'Instance' ][ 'Stalled' ] )
if not oldInstances[ 'OK' ]:
return oldInstances
stallingInstances = []
if not oldInstances[ 'Value' ]:
return S_OK( stallingInstances )
for instanceID in oldInstances['Value']:
instanceID = instanceID[ 0 ]
stalled = self.__setState( 'Instance', instanceID, 'Stalled' )
if not stalled[ 'OK' ]:
continue
self.__addInstanceHistory( instanceID, 'Stalled' )
stallingInstances.append( instanceID )
return S_OK( stallingInstances )
def instanceIDHeartBeat( self, uniqueID, load, jobs, transferredFiles, transferredBytes, uptime ):
"""
Insert the heart beat info from a running instance
It checks the status of the instance and the corresponding image
Declares "Running" the instance and the image
It returns S_ERROR if the status is not OK
"""
instanceID = self.__getInstanceID( uniqueID )
if not instanceID[ 'OK' ]:
return instanceID
instanceID = instanceID[ 'Value' ]
result = self.__runningInstance( instanceID, load, jobs, transferredFiles, transferredBytes )
if not result[ 'OK' ]:
return result
self.__setLastLoadJobsAndUptime( instanceID, load, jobs, uptime )
status = self.__getStatus( 'Instance', instanceID )
if not status[ 'OK' ]:
return result
status = status[ 'Value' ]
if status == 'Stopping':
return S_OK( 'stop' )
return S_OK()
def getPublicIpFromInstance( self, uniqueId ):
"""
For a given instance uniqueId it returns the asociated PublicIP in the instance table,
thus the ImageName of such instance
"""
tableName, _validStates, _idName = self.__getTypeTuple( 'Instance' )
publicIP = self.getFields( tableName, [ 'PublicIP' ], {'UniqueID': uniqueId} )
if not publicIP[ 'OK' ]:
return publicIP
publicIP = publicIP[ 'Value' ]
if not publicIP:
return S_ERROR( 'Unknown %s = %s' % ( 'UniqueID', uniqueId ) )
return S_OK( publicIP[ 0 ][ 0 ] )
def getEndpointFromInstance( self, uniqueId ):
"""
For a given instance uniqueId it returns the asociated Endpoint in the instance
table, thus the ImageName of such instance
"""
tableName, _validStates, _idName = self.__getTypeTuple( 'Instance' )
endpoint = self.getFields( tableName, [ 'Endpoint' ], {'UniqueID': uniqueId} )
if not endpoint[ 'OK' ]:
return endpoint
endpoint = endpoint[ 'Value' ]
if not endpoint:
return S_ERROR( 'Unknown %s = %s' % ( 'UniqueID', uniqueId ) )
return S_OK( endpoint[ 0 ][ 0 ] )
def getImageNameFromInstance( self, uniqueId ):
"""
For a given uniqueId it returns the asociated Name in the instance table, thus the ImageName of such instance
"""
tableName, _validStates, _idName = self.__getTypeTuple( 'Instance' )
imageName = self.getFields( tableName, [ 'Name' ], {'UniqueID': uniqueId} )
if not imageName[ 'OK' ]:
return imageName
imageName = imageName[ 'Value' ]
if not imageName:
return S_ERROR( 'Unknown %s = %s' % ( 'UniqueID', uniqueId ) )
return S_OK( imageName[ 0 ][ 0 ] )
def getInstancesByStatus( self, status ):
"""
Get dictionary of Image Names with InstanceIDs in given status
"""
if status not in self.validInstanceStates:
return S_ERROR( 'Status %s is not known' % status )
# InstanceTuple
tableName, _validStates, _idName = self.__getTypeTuple( 'Instance' )
runningInstances = self.getFields( tableName, [ 'VMImageID', 'UniqueID' ], {'Status': status} )
if not runningInstances[ 'OK' ]:
return runningInstances
runningInstances = runningInstances[ 'Value' ]
instancesDict = {}
imagesDict = {}
# ImageTuple
tableName, _validStates, idName = self.__getTypeTuple( 'Image' )
for imageID, uniqueID in runningInstances:
if not imageID in imagesDict:
imageName = self.getFields( tableName, [ 'Name' ], {idName: imageID} )
if not imageName[ 'OK' ]:
continue
imagesDict[ imageID ] = imageName[ 'Value' ][ 0 ][ 0 ]
if not imagesDict[ imageID ] in instancesDict:
instancesDict[ imagesDict[ imageID ] ] = []
instancesDict[ imagesDict[ imageID ] ].append( uniqueID )
return S_OK( instancesDict )
def getInstancesInfoByStatus( self, status ):
"""
Get from Instances fields UniqueID, Endpoint, PublicIP, RunningPod for instances in the given status
"""
if status not in self.validInstanceStates:
return S_ERROR( 'Status %s is not known' % status )
tableName, _validStates, _idName = self.__getTypeTuple( 'Instance' )
runningInstances = self.getFields( tableName, [ 'UniqueID', 'Endpoint', 'PublicIP', 'RunningPod' ],
{'Status': status} )
if not runningInstances[ 'OK' ]:
return runningInstances
runningInstances = runningInstances[ 'Value' ]
return S_OK( runningInstances )
def getInstancesByStatusAndEndpoint( self, status, endpoint ):
"""
Get dictionary of Image Names with InstanceIDs in given status
"""
if status not in self.validInstanceStates:
return S_ERROR( 'Status %s is not known' % status )
# InstanceTuple
tableName, _validStates, _idName = self.__getTypeTuple( 'Instance' )
runningInstances = self.getFields( tableName, [ 'VMImageID', 'UniqueID' ],
{'Status': status, 'Endpoint': endpoint} )
if not runningInstances[ 'OK' ]:
return runningInstances
runningInstances = runningInstances[ 'Value' ]
instancesDict = {}
imagesDict = {}
# InstanceTuple
tableName, _validStates, idName = self.__getTypeTuple( 'Image' )
for imageID, uniqueID in runningInstances:
if not imageID in imagesDict:
imageName = self.getFields( tableName, [ 'Name' ], {idName: imageID} )
if not imageName[ 'OK' ]:
continue
imagesDict[ imageID ] = imageName[ 'Value' ][ 0 ][ 0 ]
if not imagesDict[ imageID ] in instancesDict:
instancesDict[ imagesDict[ imageID ] ] = []
instancesDict[ imagesDict[ imageID ] ].append( uniqueID )
return S_OK( instancesDict )
def getAllInfoForUniqueID( self, uniqueID ):
"""
Get all fields for a uniqueID
"""
instanceID = self.__getInstanceID( uniqueID )
if not instanceID[ 'OK' ]:
return instanceID
instanceID = instanceID[ 'Value' ]
instData = self.__getInfo( 'Instance', instanceID )
if not instData[ 'OK' ]:
return instData
instData = instData[ 'Value' ]
imgData = self.__getInfo( 'Image', instData[ 'VMImageID' ] )
if not imgData[ 'OK' ]:
return imgData
imgData = imgData[ 'Value' ]
return S_OK( { 'Image' : imgData, 'Instance' : instData } )
#############################
# Monitoring Public Functions
#############################
def getInstancesContent( self, selDict, sortList, start = 0, limit = 0 ):
"""
Function to get the contents of the db
parameters are a filter to the db
"""
#Main fields
tables = ( "`vm_Images` AS img", "`vm_Instances` AS inst")
imageFields = ( 'VMImageID', 'Name')
instanceFields = ( 'RunningPod', 'InstanceID', 'Endpoint', 'Name', 'UniqueID', 'VMImageID',
'Status', 'PublicIP', 'Status', 'ErrorMessage', 'LastUpdate', 'Load', 'Uptime', 'Jobs' )
fields = [ 'img.%s' % f for f in imageFields ] + [ 'inst.%s' % f for f in instanceFields ]
sqlQuery = "SELECT %s FROM %s" % ( ", ".join( fields ), ", ".join( tables ) )
sqlCond = [ 'img.VMImageID = inst.VMImageID' ]
for field in selDict:
if field in instanceFields:
sqlField = "inst.%s" % field
elif field in imageFields:
sqlField = "img.%s" % field
elif field in fields:
sqlField = field
else:
continue
value = selDict[ field ]
if type( value ) in ( types.StringType, types.UnicodeType ):
value = [ str( value ) ]
sqlCond.append( " OR ".join( [ "%s=%s" % ( sqlField, self._escapeString( str( value ) )[ 'Value' ] ) for value in selDict[field] ] ) )
sqlQuery += " WHERE %s" % " AND ".join( sqlCond )
if sortList:
sqlSortList = []
for sorting in sortList:
if sorting[0] in instanceFields:
sqlField = "inst.%s" % sorting[0]
elif sorting[0] in imageFields:
sqlField = "img.%s" % sorting[0]
elif sorting[0] in fields:
sqlField = sorting[0]
else:
continue
direction = sorting[1].upper()
if direction not in ( "ASC", "DESC" ):
continue
sqlSortList.append( "%s %s" % ( sqlField, direction ) )
if sqlSortList:
sqlQuery += " ORDER BY %s" % ", ".join( sqlSortList )
if limit:
sqlQuery += " LIMIT %d,%d" % ( start, limit )
retVal = self._query( sqlQuery )
if not retVal[ 'OK' ]:
return retVal
data = []
#Total records
for record in retVal[ 'Value' ]:
record = list( record )
data.append( record )
totalRecords = len( data )
sqlQuery = "SELECT COUNT( InstanceID ) FROM %s WHERE %s" % ( ", ".join( tables ),
" AND ".join( sqlCond ) )
retVal = self._query( sqlQuery )
if retVal[ 'OK' ]:
totalRecords = retVal[ 'Value' ][0][0]
#return
return S_OK( { 'ParameterNames' : fields,
'Records' : data,
'TotalRecords' : totalRecords } )
def getHistoryForInstanceID( self, instanceId ):
try:
instanceId = int( instanceId )
except ValueError:
return S_ERROR( "Instance Id has to be a number!" )
fields = ( 'Status', 'Load', 'Update', 'Jobs', 'TransferredFiles', 'TransferredBytes' )
sqlFields = [ '`%s`' % f for f in fields ]
sqlQuery = "SELECT %s FROM `vm_History` WHERE InstanceId=%d" % ( ", ".join( sqlFields ), instanceId )
retVal = self._query( sqlQuery )
if not retVal[ 'OK' ]:
return retVal
return S_OK( { 'ParameterNames' : fields, 'Records' : retVal[ 'Value' ] } )
def getInstanceCounters( self, groupField = "Status", selDict = {} ):
validFields = VirtualMachineDB.tablesDesc[ 'vm_Instances' ][ 'Fields' ]
if groupField not in validFields:
return S_ERROR( "%s is not a valid field" % groupField )
sqlCond = []
for field in selDict:
if field not in validFields:
return S_ERROR( "%s is not a valid field" % field )
value = selDict[ field ]
if type( value ) not in ( types.DictType, types.TupleType ):
value = ( value, )
value = [ self._escapeString( str( v ) )[ 'Value' ] for v in value ]
sqlCond.append( "`%s` in (%s)" % ( field, ", ".join( value ) ) )
sqlQuery = "SELECT `%s`, COUNT( `%s` ) FROM `vm_Instances`" % ( groupField, groupField )
if sqlCond:
sqlQuery += " WHERE %s" % " AND ".join( sqlCond )
sqlQuery += " GROUP BY `%s`" % groupField
result = self._query( sqlQuery )
if not result[ 'OK' ]:
return result
return S_OK( dict( result[ 'Value' ] ) )
def getHistoryValues( self, averageBucket, selDict = {}, fields2Get = False, timespan = 0 ):
try:
timespan = max( 0, int( timespan ) )
except ValueError:
return S_ERROR( "Timespan has to be an integer" )
cumulativeFields = [ 'Jobs', 'TransferredFiles', 'TransferredBytes' ]
validDataFields = [ 'Load', 'Jobs', 'TransferredFiles', 'TransferredBytes' ]
allValidFields = VirtualMachineDB.tablesDesc[ 'vm_History' ][ 'Fields' ]
if not fields2Get:
fields2Get = list( validDataFields )
for field in fields2Get:
if field not in validDataFields:
return S_ERROR( "%s is not a valid data field" % field )
#paramFields = fields2Get
try:
bucketSize = int( averageBucket )
except ValueError:
return S_ERROR( "Average bucket has to be an integer" )
sqlGroup = "FROM_UNIXTIME(UNIX_TIMESTAMP( `Update` ) - UNIX_TIMESTAMP( `Update` ) mod %d)" % bucketSize
sqlFields = [ '`InstanceID`', sqlGroup ] #+ [ "SUM(`%s`)/COUNT(`%s`)" % ( f, f ) for f in fields2Get ]
for field in fields2Get:
if field in cumulativeFields:
sqlFields.append( "MAX(`%s`)" % field )
else:
sqlFields.append( "SUM(`%s`)/COUNT(`%s`)" % ( field, field ) )
sqlGroup = "%s, InstanceID" % sqlGroup
paramFields = [ 'Update' ] + fields2Get
sqlCond = []
for field in selDict:
if field not in allValidFields:
return S_ERROR( "%s is not a valid field" % field )
value = selDict[ field ]
if type( value ) not in ( types.ListType, types.TupleType ):
value = ( value, )
value = [ self._escapeString( str( v ) )[ 'Value' ] for v in value ]
sqlCond.append( "`%s` in (%s)" % ( field, ", ".join( value ) ) )
if timespan > 0:
sqlCond.append( "TIMESTAMPDIFF( SECOND, `Update`, UTC_TIMESTAMP() ) < %d" % timespan )
sqlQuery = "SELECT %s FROM `vm_History`" % ", ".join( sqlFields )
if sqlCond:
sqlQuery += " WHERE %s" % " AND ".join( sqlCond )
sqlQuery += " GROUP BY %s ORDER BY `Update` ASC" % sqlGroup
result = self._query( sqlQuery )
if not result[ 'OK' ]:
return result
dbData = result[ 'Value' ]
#Need ext?
requireExtension = set()
for i in range( len( fields2Get ) ):
f = fields2Get[i]
if f in cumulativeFields:
requireExtension.add( i )
if requireExtension:
rDates = []
for row in dbData:
if row[1] not in rDates:
rDates.append( row[1] )
vmData = {}
for row in dbData:
vmID = row[0]
if vmID not in vmData:
vmData[ vmID ] = {}
vmData[ vmID ][ row[1] ] = row[2:]
rDates.sort()
dbData = []
for vmID in vmData:
prevValues = False
for rDate in rDates:
if rDate not in vmData[ vmID ]:
if prevValues:
instValues = [ rDate ]
for i in range( len( prevValues ) ):
instValues.append( prevValues[ i ] )
dbData.append( instValues )
else:
row = vmData[ vmID ][ rDate ]
prevValues = []
for i in range( len ( row ) ):
if i in requireExtension:
prevValues.append( row[i] )
else:
prevValues.append( 0 )
instValues = [ rDate ]
for i in range( len( row ) ):
instValues.append( row[ i ] )
dbData.append( instValues )
else:
#If we don't require extension just strip vmName
dbData = [ row[1:] for row in dbData ]
#Final sum
sumData = {}
for record in dbData:
recDate = record[0]
rawData = record[1:]
if recDate not in sumData:
sumData[ recDate ] = [ 0.0 for f in rawData ]
for i in range( len( rawData ) ):
sumData[ recDate ][i] += float( rawData[i] )
finalData = []
if len( sumData ) > 0:
firstValues = sumData[ sorted( sumData )[0] ]
for date in sorted( sumData ):
finalData.append( [ date ] )
values = sumData[ date ]
for i in range( len( values ) ):
if i in requireExtension:
finalData[-1].append( max( 0, values[i] - firstValues[i] ) )
else:
finalData[-1].append( values[i] )
return S_OK( { 'ParameterNames' : paramFields,
'Records' : finalData } )
def getRunningInstancesHistory( self, timespan = 0, bucketSize = 900 ):
try:
bucketSize = max( 300, int( bucketSize ) )
except ValueError:
return S_ERROR( "Bucket has to be an integer" )
try:
timespan = max( 0, int( timespan ) )
except ValueError:
return S_ERROR( "Timespan has to be an integer" )
groupby = "FROM_UNIXTIME(UNIX_TIMESTAMP( `Update` ) - UNIX_TIMESTAMP( `Update` ) mod %d )" % bucketSize
sqlFields = [ groupby, "COUNT( DISTINCT( `InstanceID` ) )" ]
sqlQuery = "SELECT %s FROM `vm_History`" % ", ".join( sqlFields )
sqlCond = [ "`Status` = 'Running'" ]
if timespan > 0:
sqlCond.append( "TIMESTAMPDIFF( SECOND, `Update`, UTC_TIMESTAMP() ) < %d" % timespan )
sqlQuery += " WHERE %s" % " AND ".join( sqlCond )
sqlQuery += " GROUP BY %s ORDER BY `Update` ASC" % groupby
return self._query( sqlQuery )
def getRunningInstancesBEPHistory( self, timespan = 0, bucketSize = 900 ):
try:
bucketSize = max( 300, int( bucketSize ) )
except ValueError:
return S_ERROR( "Bucket has to be an integer" )
try:
timespan = max( 0, int( timespan ) )
except ValueError:
return S_ERROR( "Timespan has to be an integer" )
groupby = "FROM_UNIXTIME(UNIX_TIMESTAMP( h.`Update` ) - UNIX_TIMESTAMP( h.`Update` ) mod %d )" % bucketSize
sqlFields = [ groupby, " i.Endpoint, COUNT( DISTINCT( h.`InstanceID` ) ) " ]
sqlQuery = "SELECT %s FROM `vm_History` h, `vm_Instances` i" % ", ".join( sqlFields )
sqlCond = [ " h.InstanceID = i.InstanceID AND h.`Status` = 'Running'" ]
if timespan > 0:
sqlCond.append( "TIMESTAMPDIFF( SECOND, `Update`, UTC_TIMESTAMP() ) < %d" % timespan )
sqlQuery += " WHERE %s" % " AND ".join( sqlCond )
sqlQuery += " GROUP BY %s , EndPoint ORDER BY `Update` ASC" % groupby
return self._query( sqlQuery )
def getRunningInstancesByRunningPodHistory( self, timespan = 0, bucketSize = 900 ):
try:
bucketSize = max( 300, int( bucketSize ) )
except ValueError:
return S_ERROR( "Bucket has to be an integer" )
try:
timespan = max( 0, int( timespan ) )
except ValueError:
return S_ERROR( "Timespan has to be an integer" )
groupby = "FROM_UNIXTIME(UNIX_TIMESTAMP( h.`Update` ) - UNIX_TIMESTAMP( h.`Update` ) mod %d )" % bucketSize
sqlFields = [ groupby, " i.RunningPod, COUNT( DISTINCT( h.`InstanceID` ) ) " ]
sqlQuery = "SELECT %s FROM `vm_History` h, `vm_Instances` i" % ", ".join( sqlFields )
sqlCond = [ " h.InstanceID = i.InstanceID AND h.`Status` = 'Running'" ]
if timespan > 0:
sqlCond.append( "TIMESTAMPDIFF( SECOND, `Update`, UTC_TIMESTAMP() ) < %d" % timespan )
sqlQuery += " WHERE %s" % " AND ".join( sqlCond )
sqlQuery += " GROUP BY %s , RunningPod ORDER BY `Update` ASC" % groupby
return self._query( sqlQuery )
def getRunningInstancesByImageHistory( self, timespan = 0, bucketSize = 900 ):
try:
bucketSize = max( 300, int( bucketSize ) )
except ValueError:
return S_ERROR( "Bucket has to be an integer" )
try:
timespan = max( 0, int( timespan ) )
except ValueError:
return S_ERROR( "Timespan has to be an integer" )
groupby = "FROM_UNIXTIME(UNIX_TIMESTAMP( h.`Update` ) - UNIX_TIMESTAMP( h.`Update` ) mod %d )" % bucketSize
sqlFields = [ groupby, " ins.Name, COUNT( DISTINCT( h.`InstanceID` ) ) " ]
sqlQuery = "SELECT %s FROM `vm_History` h, `vm_Images` img, `vm_Instances` ins" % ", ".join( sqlFields )
sqlCond = [ " h.InstanceID = ins.InstanceID AND img.VMImageID = ins.VMImageID AND h.`Status` = 'Running'" ]
if timespan > 0:
sqlCond.append( "TIMESTAMPDIFF( SECOND, `Update`, UTC_TIMESTAMP() ) < %d" % timespan )
sqlQuery += " WHERE %s" % " AND ".join( sqlCond )
sqlQuery += " GROUP BY %s , ins.Name ORDER BY `Update` ASC" % groupby
return self._query( sqlQuery )
#######################
# Private Functions
#######################
def __initializeDB( self ):
"""
Create the tables
"""
tables = self._query( "show tables" )
if not tables[ 'OK' ]:
return tables
tablesInDB = [ table[0] for table in tables[ 'Value' ] ]
tablesToCreate = {}
for tableName in self.tablesDesc:
if not tableName in tablesInDB:
tablesToCreate[ tableName ] = self.tablesDesc[ tableName ]
return self._createTables( tablesToCreate )
def __getTypeTuple( self, element ):
"""
return tuple of (tableName, validStates, idName) for object
"""
# defaults
tableName, validStates, idName = '', [], ''
if element == 'Image':
tableName = 'vm_Images'
validStates = self.validImageStates
idName = 'VMImageID'
elif element == 'Instance':
tableName = 'vm_Instances'
validStates = self.validInstanceStates
idName = 'InstanceID'
elif element == 'RunningPod':
tableName = 'vm_RunningPods'
validStates = self.validRunningPodStates
idName = 'RunningPodID'
return ( tableName, validStates, idName )
def __insertInstance( self, uniqueID, imageName, instanceName, endpoint, runningPodName ):
"""
Attempts to insert a new Instance for the given Image in a given Endpoint of a runningPodName
"""
image = self.__getImageID( imageName )
if not image[ 'OK' ]:
return image
imageID = image[ 'Value' ]
tableName, validStates, _idName = self.__getTypeTuple( 'Instance' )
if uniqueID:
status = 'Submitted'
else:
status = validStates[ 0 ]
fields = [ 'UniqueID', 'RunningPod', 'Name', 'Endpoint', 'VMImageID', 'Status', 'LastUpdate' ]
values = [ uniqueID, runningPodName, instanceName, endpoint, imageID, status, Time.toString() ]
#runningPodDict = self.getRunningPodDict( runningPodName )
#if not runningPodDict[ 'OK' ]:
# return runningPodDict
#runningPodDict = runningPodDict[ 'Value' ]
#if 'MaxAllowedPrice' in runningPodDict:
# fields.append( 'MaxAllowedPrice' )
# values.append( runningPodDict[ 'MaxAllowedPrice' ] )
instance = self.insertFields( tableName , fields, values )
if not instance[ 'OK' ]:
return instance
if 'lastRowId' in instance:
self.__addInstanceHistory( instance[ 'lastRowId' ], status )
return S_OK( instance[ 'lastRowId' ] )
return S_ERROR( 'Failed to insert new Instance' )
def __runningInstance( self, instanceID, load, jobs, transferredFiles, transferredBytes ):
"""
Checks image status, set it to running and set instance status to running
"""
# Check the Image is OK
imageID = self.__getImageForRunningInstance( instanceID )
if not imageID[ 'OK' ]:
self.__setError( 'Instance', instanceID, imageID[ 'Message' ] )
return imageID
imageID = imageID[ 'Value' ]
# Update Instance to Running
stateInstance = self.__setState( 'Instance', instanceID, 'Running' )
if not stateInstance[ 'OK' ]:
return stateInstance
# Update Image to Validated
stateImage = self.__setState( 'Image', imageID, 'Validated' )
if not stateImage[ 'OK' ]:
self.__setError( 'Instance', instanceID, stateImage[ 'Message' ] )
return stateImage
# Add History record
self.__addInstanceHistory( instanceID, 'Running', load, jobs, transferredFiles, transferredBytes )
return S_OK()
def __getImageForRunningInstance( self, instanceID ):
"""
Looks for imageID for a given instanceID.
Check image Transition to Running is allowed
Returns:
S_OK( imageID )
S_ERROR( Reason )
"""
info = self.__getInfo( 'Instance', instanceID )
if not info[ 'OK' ]:
return info
info = info[ 'Value' ]
_tableName, _validStates, idName = self.__getTypeTuple( 'Image' )
imageID = info[ idName ]
imageStatus = self.__getStatus( 'Image', imageID )
if not imageStatus[ 'OK' ]:
return imageStatus
return S_OK( imageID )
def __getOldInstanceIDs( self, secondsIdle, states ):
"""
Return list of instance IDs that have not updated after the given time stamp
they are required to be in one of the given states
"""
tableName, _validStates, idName = self.__getTypeTuple( 'Instance' )
sqlCond = []
sqlCond.append( 'TIMESTAMPDIFF( SECOND, `LastUpdate`, UTC_TIMESTAMP() ) > % d' % secondsIdle )
sqlCond.append( 'Status IN ( "%s" )' % '", "'.join( states ) )
sqlSelect = 'SELECT %s from `%s` WHERE %s' % ( idName, tableName, " AND ".join( sqlCond ) )
return self._query( sqlSelect )
def __getSubmittedInstanceID( self, imageName ):
"""
Retrieve and InstanceID associated to a submitted Instance for a given Image
"""
tableName, _validStates, idName = self.__getTypeTuple( 'Image' )
imageID = self.getFields( tableName, [ idName ], {'Name': imageName} )
if not imageID[ 'OK' ]:
return imageID
imageID = imageID[ 'Value' ]
if not imageID:
return S_ERROR( 'Unknown Image = %s' % imageName )
#FIXME: <> is obsolete
if len( imageID ) <> 1:
return S_ERROR( 'Image name "%s" is not unique' % imageName )
imageID = imageID[ 0 ][ 0 ]
imageIDName = idName
tableName, _validStates, idName = self.__getTypeTuple( 'Instance' )
instanceID = self.getFields( tableName, [ idName ], [ imageIDName, 'Status' ], {imageID: 'Submitted'} )
if not instanceID[ 'OK' ]:
return instanceID
instanceID = instanceID[ 'Value' ]
if not instanceID:
return S_ERROR( 'No Submitted instance of "%s" found' % imageName )
return S_OK( instanceID[ 0 ][ 0 ] )
def __setState( self, element, iD, state ):
"""
Attempt to set element in state, checking if transition is allowed
"""
knownStates = self.allowedTransitions[ element ].keys()
if not state in knownStates:
return S_ERROR( 'Transition to %s not possible' % state )
allowedStates = self.allowedTransitions[ element ][ state ]
currentState = self.__getStatus( element, iD )
if not currentState[ 'OK' ]:
return currentState
currentState = currentState[ 'Value' ]
if not currentState in allowedStates:
msg = 'Transition ( %s -> %s ) not allowed' % ( currentState, state )
if currentState == "Halted":
val_state = "halt"
elif currentState == "Stopping":
val_state = "stop"
else:
val_state = currentState
return {'OK': False, "Message": msg, 'State': val_state }
tableName, _validStates, idName = self.__getTypeTuple( element )
if currentState == state:
sqlUpdate = 'UPDATE `%s` SET LastUpdate = UTC_TIMESTAMP() WHERE %s = %s' % ( tableName, idName, iD )
else:
sqlUpdate = 'UPDATE `%s` SET Status = "%s", LastUpdate = UTC_TIMESTAMP() WHERE %s = %s' % \
( tableName, state, idName, iD )
ret = self._update( sqlUpdate )
if not ret[ 'OK' ]:
return ret
return S_OK( state )
def __setInstanceIPs( self, instanceID, publicIP, privateIP ):
"""
Update parameters for an instanceID reporting as running
"""
values = self._escapeValues( [ publicIP, privateIP ] )
if not values[ 'OK' ]:
return S_ERROR( "Cannot escape values: %s" % str( values ) )
publicIP, privateIP = values[ 'Value' ]
tableName, _validStates, idName = self.__getTypeTuple( 'Instance' )
sqlUpdate = 'UPDATE `%s` SET PublicIP = %s, PrivateIP = %s WHERE %s = %s' % \
( tableName, publicIP, privateIP, idName, instanceID )
return self._update( sqlUpdate )
def __getInstanceID( self, uniqueID ):
"""
For a given uniqueID of an instance return associated internal InstanceID
"""
tableName, _validStates, idName = self.__getTypeTuple( 'Instance' )
result = self.getFields( tableName, [ idName ], {'UniqueID': uniqueID } )
if not result['OK']:
return result
if not result['Value']:
return S_ERROR( 'Unknown %s = %s' % ( 'UniqueID', uniqueID ) )
return S_OK( result['Value'][0][0] )
instanceID = self.getFields( tableName, [ idName ], {'UniqueID': uniqueID} )
if not instanceID[ 'OK' ]:
return instanceID
instanceID = instanceID[ 'Value' ]
if not instanceID:
return S_ERROR( 'Unknown %s = %s' % ( 'UniqueID', uniqueID ) )
return S_OK( instanceID[ 0 ][ 0 ] )
def __getImageID( self, imageName ):
"""
For a given imageName return corresponding ID
Will insert the image in New Status if it does not exits,
"""
tableName, validStates, idName = self.__getTypeTuple( 'Image' )
imageID = self.getFields( tableName, [ idName ], {'Name': imageName} )
if not imageID[ 'OK' ]:
return imageID
imageID = imageID[ 'Value' ]
if len( imageID ) > 1:
return S_ERROR( 'Image name "%s" is not unique' % imageName )
if len( imageID ) == 0:
# The image does not exits in DB, has to be inserted
imageID = 0
else:
# The image exits in DB, has to match
imageID = imageID[ 0 ][ 0 ]
if imageID:
ret = self.getFields( tableName, [ idName ], {'Name': imageName} )
if not ret[ 'OK' ]:
return ret
if not ret[ 'Value' ]:
return S_ERROR( 'Image "%s" in DB but it does not match' % imageName )
else:
return S_OK( imageID )
ret = self.insertFields( tableName, [ 'Name', 'Status', 'LastUpdate' ],
[ imageName, validStates[ 0 ], Time.toString() ] )
if ret[ 'OK' ] and 'lastRowId' in ret:
rowID = ret[ 'lastRowId' ]
ret = self.getFields( tableName, [idName], {'Name': imageName} )
if not ret[ 'OK' ]:
return ret
if not ret[ 'Value' ] or rowID <> ret[ 'Value' ][ 0 ][ 0 ]:
result = self.__getInfo( 'Image', rowID )
if result[ 'OK' ]:
image = result[ 'Value' ]
self.log.error( 'Trying to insert Name: "%s"' % ( imageName ) )
self.log.error( 'But inserted Name: "%s"' % ( image['Name'] ) )
return self.__setError( 'Image', rowID, 'Failed to insert new Image' )
return S_OK( rowID )
return S_ERROR( 'Failed to insert new Image' )
def __addInstanceHistory( self, instanceID, status, load = 0.0, jobs = 0,
transferredFiles = 0, transferredBytes = 0 ):
"""
Insert a History Record
"""
try:
load = float( load )
except ValueError:
return S_ERROR( "Load has to be a float value" )
try:
jobs = int( jobs )
except ValueError:
return S_ERROR( "Jobs has to be an integer value" )
try:
transferredFiles = int( transferredFiles )
except ValueError:
return S_ERROR( "Transferred files has to be an integer value" )
self.insertFields( 'vm_History' , [ 'InstanceID', 'Status', 'Load',
'Update', 'Jobs', 'TransferredFiles',
'TransferredBytes' ],
[ instanceID, status, load,
Time.toString(), jobs,
transferredFiles, transferredBytes ] )
return
def __setLastLoadJobsAndUptime( self, instanceID, load, jobs, uptime ):
if not uptime:
sqlQuery = "SELECT MAX( UNIX_TIMESTAMP( `Update` ) ) - MIN( UNIX_TIMESTAMP( `Update` ) ) FROM `vm_History` WHERE InstanceID = %d GROUP BY InstanceID" % instanceID
result = self._query( sqlQuery )
if result[ 'OK' ] and len( result[ 'Value' ] ) > 0:
uptime = int( result[ 'Value' ][0][0] )
sqlUpdate = "UPDATE `vm_Instances` SET `Uptime` = %d, `Jobs`= %d, `Load` = %f WHERE `InstanceID` = %d" % ( uptime,
jobs,
load,
instanceID )
self._update( sqlUpdate )
return S_OK()
def __getInfo( self, element, iD ):
"""
Return dictionary with info for Images and Instances by ID
"""
tableName, _validStates, idName = self.__getTypeTuple( element )
if not tableName:
return S_ERROR( 'Unknown DB object: %s' % element )
fields = self.tablesDesc[ tableName ][ 'Fields' ]
ret = self.getFields( tableName , fields, {idName: iD} )
if not ret[ 'OK' ]:
return ret
if not ret[ 'Value' ]:
return S_ERROR( 'Unknown %s = %s' % ( idName, iD ) )
data = {}
values = ret[ 'Value' ][ 0 ]
fields = fields.keys()
for i in xrange( len( fields ) ):
data[ fields[ i ] ] = values[ i ]
return S_OK( data )
def __getStatus( self, element, iD ):
"""
Check and return status of Images and Instances by ID
returns:
S_OK(Status) if Status is valid and not Error
S_ERROR(ErrorMessage) otherwise
"""
tableName, validStates, idName = self.__getTypeTuple( element )
if not tableName:
return S_ERROR( 'Unknown DB object: %s' % element )
ret = self.getFields( tableName, [ 'Status', 'ErrorMessage' ], {idName: iD} )
if not ret[ 'OK' ]:
return ret
if not ret[ 'Value' ]:
return S_ERROR( 'Unknown %s = %s' % ( idName, iD ) )
status, msg = ret[ 'Value' ][ 0 ]
if not status in validStates:
return self.__setError( element, iD, 'Invalid Status: %s' % status )
if status == validStates[ -1 ]:
return S_ERROR( msg )
return S_OK( status )
def __setError( self, element, iD, reason ):
"""
"""
( tableName, validStates, idName ) = self.__getTypeTuple( element )
if not tableName:
return S_ERROR( 'Unknown DB object: %s' % element )
sqlUpdate = 'UPDATE `%s` SET Status = "%s", ErrorMessage = "%s", LastUpdate = UTC_TIMESTAMP() WHERE %s = %s'
sqlUpdate = sqlUpdate % ( tableName, validStates[ -1 ], reason, idName, iD )
ret = self._update( sqlUpdate )
if not ret[ 'OK' ]:
return ret
return S_ERROR( reason )
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
xianghuzhao/VMDIRAC
|
VMDIRAC/WorkloadManagementSystem/DB/VirtualMachineDB.py
|
Python
|
gpl-3.0
| 54,667
|
[
"DIRAC"
] |
196a74ff517dd62b556832517470b8628b07f5e32b2986f17079b8d345088056
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2017, California Institute of Technology
# All rights reserved.
"""
Reorder the integer arguments to the commands in a LAMMPS input
file if these arguments violate LAMMPS order requirements.
We have to do this because the moltemplate.sh script will automatically
assign these integers in a way which may violate these restrictions
and the user has little control over this.
This script:
swaps the I and J integers in "pair_coeff I J ..." commands when I > J
Other features may be added later
"""
import sys
def main():
lines_orig = []
f = None
fname = None
num_lines_ignore = 0
# Lines from files passed as arguments are read and processed silently.
# (Why? Sometimes it's necessary to read the contents of previous input scripts
# in order to be able to understand a script command which appears later.
# I'm assuming these files will be processed by lammps in the same order. So I
# must insure that moltemplate.sh passes them to this program in that order.
# I'm too lazy to read the "include" commands in input scripts correctly.)
if len(sys.argv) > 1:
for fname in sys.argv[1:]:
f = open(fname, 'r')
in_stream = f
lines_orig += in_stream.readlines()
num_lines_ignore += len(lines_orig)
f.close()
# Lines read from the standard input are read, processed, and printed to stdout
in_stream = sys.stdin
lines_orig += in_stream.readlines()
pair_style_list = []
swap_occured = False
warn_wildcard = False
i = 0
while i < len(lines_orig):
# Read the next logical line
# Any lines ending in '&' should be merged with the next line before
# breaking
line_orig = ''
while i < len(lines_orig):
line_counter = 1 + i - num_lines_ignore
line_orig += lines_orig[i]
if ((len(line_orig) < 2) or (line_orig[-2:] != '&\n')):
break
i += 1
line = line_orig.replace('&\n', '\n').rstrip('\n')
comment = ''
if '#' in line_orig:
ic = line.find('#')
line = line_orig[:ic]
# keep track of comments (put them back later)
comment = line_orig[ic:].rstrip()
tokens = line.strip().split()
if ((len(tokens) >= 2) and (tokens[0] == 'pair_style')):
pair_style_list = tokens[1:]
if ((len(tokens) >= 3) and (tokens[0] == 'pair_coeff')):
if ((tokens[1].isdigit() and (tokens[2].isdigit())) and
(int(tokens[1]) > int(tokens[2]))):
swap_occured = True
tmp = tokens[2]
tokens[2] = tokens[1]
tokens[1] = tmp
if i >= num_lines_ignore:
# polite warning:
sys.stderr.write(
'swapped pair_coeff order on line ' + str(line_counter))
# if (fname != None):
# sys.stderr.write(' of file \"'+fname+'\"')
sys.stderr.write('\n')
# Deal with the "hbond/" pair coeffs.
#
# The hbond/dreiding pair style designates one of the two atom types
# as a donor, and the other as an acceptor (using the 'i','j' flags)
# If swapped atom types eariler, we also need to swap 'i' with 'j'.
#
# If "hbond/dreiding.." pair style is used with "hybrid" or
# "hybrid/overlay" then tokens[3] is the name of the pair style
# and tokens[5] is either 'i' or 'j'.
if len(pair_style_list) > 0:
if ((pair_style_list[0] == 'hybrid') or
(pair_style_list[0] == 'hybrid/overlay')):
if ((len(tokens) > 5) and (tokens[5] == 'i') and (tokens[3][0:6] == 'hbond/')):
tokens[5] = 'j'
sys.stderr.write(
' (and replaced \"i\" with \"j\")\n')
elif ((len(tokens) > 5) and (tokens[5] == 'j') and (tokens[3][0:6] == 'hbond/')):
tokens[5] = 'i'
sys.stderr.write(
' (and replaced \"j\" with \"i\")\n')
elif (pair_style_list[0][0:6] == 'hbond/'):
if ((len(tokens) > 4) and (tokens[4] == 'i')):
tokens[4] = 'j'
sys.stderr.write(
' (and replaced \"i\" with \"j\")\n')
elif ((len(tokens) > 4) and (tokens[4] == 'j')):
tokens[4] = 'i'
sys.stderr.write(
' (and replaced \"j\" with \"i\")\n')
sys.stdout.write(
(' '.join(tokens) + comment).replace('\n', '&\n') + '\n')
else:
if ((('*' in tokens[1]) or ('*' in tokens[2]))
and
(not (('*' == tokens[1]) and ('*' == tokens[2])))):
warn_wildcard = True
if i >= num_lines_ignore:
sys.stdout.write(line_orig)
else:
if i >= num_lines_ignore:
sys.stdout.write(line_orig)
i += 1
if swap_occured:
sys.stderr.write('\n'
' WARNING: Atom order in some pair_coeff commands was swapped to pacify LAMMPS.\n'
' For some exotic pair_styles such as hbond/dreiding, this is not enough. If you\n'
' use exotic pair_styles, please verify the \"pair_coeff\" commands are correct.\n')
if warn_wildcard:
sys.stderr.write('\n'
' WARNING: The use of wildcard characters (\"*\") in your \"pair_coeff\"\n'
' commands is not recommended.\n'
' (It is safer to specify each interaction pair manually.\n'
' Check every pair_coeff command. Make sure that every atom type in\n'
' the first group is <= atom types in the second group.\n'
' Moltemplate does NOT do this when wildcards are used.)\n'
' If you are using a many-body pair style then ignore this warning.\n')
return
if __name__ == '__main__':
main()
|
yidongxiainl/lammps
|
tools/moltemplate/moltemplate/postprocess_input_script.py
|
Python
|
gpl-2.0
| 6,865
|
[
"LAMMPS"
] |
52df4c6323ad593d94121dd3f805473e2128c2fc13570d8f5357be2970872555
|
# Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import operator
import threading
import sys
import timeit
from . import sigtools, dlti
from ._upfirdn import upfirdn, _output_len
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import fftpack, linalg
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
product, r_, ravel, real_if_close, reshape,
roots, sort, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
import math
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
from .filter_design import cheby1, _validate_sos
from .fir_filter_design import firwin
if sys.version_info.major >= 3 and sys.version_info.minor >= 5:
from math import gcd
else:
from fractions import gcd
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'resample_poly', 'detrend',
'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
return _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _bvalfromboundary(boundary):
try:
return _boundarydict[boundary] << 2
except KeyError:
raise ValueError("Acceptable boundary flags are 'fill', 'circular' "
"(or 'wrap'), and 'symmetric' (or 'symm').")
def _inputs_swap_needed(mode, shape1, shape2):
"""
If in 'valid' mode, returns whether or not the input arrays need to be
swapped depending on whether `shape1` is at least as large as `shape2` in
every dimension.
This is important for some of the correlation and convolution
implementations in this module, where the larger array input needs to come
before the smaller array input when operating in this mode.
Note that if the mode provided is not 'valid', False is immediately
returned.
"""
if mode == 'valid':
ok1, ok2 = True, True
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
ok1 = False
if not d2 >= d1:
ok2 = False
if not (ok1 or ok2):
raise ValueError("For 'valid' mode, one must be at least "
"as large as the other in every dimension")
return not ok1
return False
def correlate(in1, in2, mode='full', method='auto'):
r"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the correlation.
``direct``
The correlation is determined directly from sums, the definition of
correlation.
``fft``
The Fast Fourier Transform is used to perform the correlation more
quickly (only available for numerical arrays.)
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See `convolve` Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
See Also
--------
choose_conv_method : contains more documentation on `method`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as::
z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])
This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')``
then
.. math::
z[k] = (x * y)(k - N + 1)
= \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}
for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`
where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`,
and :math:`y_m` is 0 when m is outside the range of y.
``method='fft'`` only works for numerical arrays as it relies on
`fftconvolve`. In certain cases (i.e., arrays of objects or when
rounding integers can lose precision), ``method='direct'`` is always used.
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0:
return in1 * in2.conj()
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
# this either calls fftconvolve or this function with method=='direct'
if method in ('fft', 'auto'):
return convolve(in1, _reverse_and_conj(in2), mode, method)
elif method == 'direct':
# fastpath to faster numpy.correlate for 1d inputs when possible
if _np_conv_ok(in1, in2, mode):
return np.correlate(in1, in2, mode)
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward if mode == 'full'. Also, it fails
# with 'valid' mode if in2 is larger than in1, so swap those, too.
# Don't swap inputs for 'same' mode, since shape of in1 matters.
swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
_inputs_swap_needed(mode, in1.shape, in2.shape))
if swapped_inputs:
in1, in2 = in2, in1
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
if swapped_inputs:
# Reverse and conjugate to undo the effect of swapping inputs
z = _reverse_and_conj(z)
return z
else:
raise ValueError("Acceptable method flags are 'auto',"
" 'direct', or 'fft'.")
def _centered(arr, newshape):
# Return the center newshape portion of the array.
newshape = asarray(newshape)
currshape = array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
As of v0.19, `convolve` automatically chooses this method or the direct
method based on an estimation of which is faster.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse.
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complexfloating) or
np.issubdtype(in2.dtype, np.complexfloating))
shape = s1 + s2 - 1
# Check that input sizes are compatible with 'valid' mode
if _inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
in1, s1, in2, s2 = in2, s2, in1, s1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [fftpack.helper.next_fast_len(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
sp1 = np.fft.rfftn(in1, fshape)
sp2 = np.fft.rfftn(in2, fshape)
ret = (np.fft.irfftn(sp1 * sp2, fshape)[fslice].copy())
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp1 = fftpack.fftn(in1, fshape)
sp2 = fftpack.fftn(in2, fshape)
ret = fftpack.ifftn(sp1 * sp2)[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _numeric_arrays(arrays, kinds='buifc'):
"""
See if a list of arrays are all numeric.
Parameters
----------
ndarrays : array or list of arrays
arrays to check if numeric.
numeric_kinds : string-like
The dtypes of the arrays to be checked. If the dtype.kind of
the ndarrays are not in this string the function returns False and
otherwise returns True.
"""
if type(arrays) == ndarray:
return arrays.dtype.kind in kinds
for array_ in arrays:
if array_.dtype.kind not in kinds:
return False
return True
def _prod(iterable):
"""
Product of a list of numbers.
Faster than np.prod for short lists like array shapes.
"""
product = 1
for x in iterable:
product *= x
return product
def _fftconv_faster(x, h, mode):
"""
See if using `fftconvolve` or `_correlateND` is faster. The boolean value
returned depends on the sizes and shapes of the input values.
The big O ratios were found to hold across different machines, which makes
sense as it's the ratio that matters (the effective speed of the computer
is found in both big O constants). Regardless, this had been tuned on an
early 2015 MacBook Pro with 8GB RAM and an Intel i5 processor.
"""
if mode == 'full':
out_shape = [n + k - 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 10963.92823819 if x.ndim == 1 else 8899.1104874
elif mode == 'same':
out_shape = x.shape
if x.ndim == 1:
if h.size <= x.size:
big_O_constant = 7183.41306773
else:
big_O_constant = 856.78174111
else:
big_O_constant = 34519.21021589
elif mode == 'valid':
out_shape = [n - k + 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 41954.28006344 if x.ndim == 1 else 66453.24316434
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
# see whether the Fourier transform convolution method or the direct
# convolution method is faster (discussed in scikit-image PR #1792)
direct_time = (x.size * h.size * _prod(out_shape))
fft_time = sum(n * math.log(n) for n in (x.shape + h.shape +
tuple(out_shape)))
return big_O_constant * fft_time < direct_time
def _reverse_and_conj(x):
"""
Reverse array `x` in all dimensions and perform the complex conjugate
"""
reverse = [slice(None, None, -1)] * x.ndim
return x[reverse].conj()
def _np_conv_ok(volume, kernel, mode):
"""
See if numpy supports convolution of `volume` and `kernel` (i.e. both are
1D ndarrays and of the appropriate shape). Numpy's 'same' mode uses the
size of the larger input, while Scipy's uses the size of the first input.
Invalid mode strings will return False and be caught by the calling func.
"""
if volume.ndim == kernel.ndim == 1:
if mode in ('full', 'valid'):
return True
elif mode == 'same':
return volume.size >= kernel.size
else:
return False
def _timeit_fast(stmt="pass", setup="pass", repeat=3):
"""
Returns the time the statement/function took, in seconds.
Faster, less precise version of IPython's timeit. `stmt` can be a statement
written as a string or a callable.
Will do only 1 loop (like IPython's timeit) with no repetitions
(unlike IPython) for very slow functions. For fast functions, only does
enough loops to take 5 ms, which seems to produce similar results (on
Windows at least), and avoids doing an extraneous cycle that isn't
measured.
"""
timer = timeit.Timer(stmt, setup)
# determine number of calls per rep so total time for 1 rep >= 5 ms
x = 0
for p in range(0, 10):
number = 10**p
x = timer.timeit(number) # seconds
if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
break
if x > 1: # second
# If it's macroscopic, don't bother with repetitions
best = x
else:
number *= 10
r = timer.repeat(repeat, number)
best = min(r)
sec = best / number
return sec
def choose_conv_method(in1, in2, mode='full', measure=False):
"""
Find the fastest convolution/correlation method.
This primarily exists to be called during the ``method='auto'`` option in
`convolve` and `correlate`, but can also be used when performing many
convolutions of the same input shapes and dtypes, determining
which method to use for all of them, either to avoid the overhead of the
'auto' option or to use accurate real-world measurements.
Parameters
----------
in1 : array_like
The first argument passed into the convolution function.
in2 : array_like
The second argument passed into the convolution function.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
measure : bool, optional
If True, run and time the convolution of `in1` and `in2` with both
methods and return the fastest. If False (default), predict the fastest
method using precomputed values.
Returns
-------
method : str
A string indicating which convolution method is fastest, either
'direct' or 'fft'
times : dict, optional
A dictionary containing the times (in seconds) needed for each method.
This value is only returned if ``measure=True``.
See Also
--------
convolve
correlate
Notes
-----
For large n, ``measure=False`` is accurate and can quickly determine the
fastest method to perform the convolution. However, this is not as
accurate for small n (when any dimension in the input or output is small).
In practice, we found that this function estimates the faster method up to
a multiplicative factor of 5 (i.e., the estimated method is *at most* 5
times slower than the fastest method). The estimation values were tuned on
an early 2015 MacBook Pro with 8GB RAM but we found that the prediction
held *fairly* accurately across different machines.
If ``measure=True``, time the convolutions. Because this function uses
`fftconvolve`, an error will be thrown if it does not support the inputs.
There are cases when `fftconvolve` supports the inputs but this function
returns `direct` (e.g., to protect against floating point integer
precision).
.. versionadded:: 0.19
Examples
--------
Estimate the fastest method for a given input:
>>> from scipy import signal
>>> a = np.random.randn(1000)
>>> b = np.random.randn(1000000)
>>> method = signal.choose_conv_method(a, b, mode='same')
>>> method
'fft'
This can then be applied to other arrays of the same dtype and shape:
>>> c = np.random.randn(1000)
>>> d = np.random.randn(1000000)
>>> # `method` works with correlate and convolve
>>> corr1 = signal.correlate(a, b, mode='same', method=method)
>>> corr2 = signal.correlate(c, d, mode='same', method=method)
>>> conv1 = signal.convolve(a, b, mode='same', method=method)
>>> conv2 = signal.convolve(c, d, mode='same', method=method)
"""
volume = asarray(in1)
kernel = asarray(in2)
if measure:
times = {}
for method in ['fft', 'direct']:
times[method] = _timeit_fast(lambda: convolve(volume, kernel,
mode=mode, method=method))
chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
return chosen_method, times
# fftconvolve doesn't support complex256
fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192"
if hasattr(np, fftconv_unsup):
if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup:
return 'direct'
# for integer input,
# catch when more precision required than float provides (representing an
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return 'direct'
if _numeric_arrays([volume, kernel], kinds='b'):
return 'direct'
if _numeric_arrays([volume, kernel]):
if _fftconv_faster(volume, kernel, mode):
return 'fft'
return 'direct'
def convolve(in1, in2, mode='full', method='auto'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the convolution.
``direct``
The convolution is determined directly from sums, the definition of
convolution.
``fft``
The Fourier Transform is used to perform the convolution by calling
`fftconvolve`.
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
choose_conv_method : chooses the fastest appropriate convolution method
fftconvolve
Notes
-----
By default, `convolve` and `correlate` use ``method='auto'``, which calls
`choose_conv_method` to choose the fastest method using pre-computed
values (`choose_conv_method` can also measure real-world timing with a
keyword argument). Because `fftconvolve` relies on floating point numbers,
there are certain constraints that may force `method=direct` (more detail
in `choose_conv_method` docstring).
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
elif volume.ndim != kernel.ndim:
raise ValueError("volume and kernel should have the same "
"dimensionality")
if _inputs_swap_needed(mode, volume.shape, kernel.shape):
# Convolution is commutative; order doesn't have any effect on output
volume, kernel = kernel, volume
if method == 'auto':
method = choose_conv_method(volume, kernel, mode=mode)
if method == 'fft':
out = fftconvolve(volume, kernel, mode=mode)
result_type = np.result_type(volume, kernel)
if result_type.kind in {'u', 'i'}:
out = np.around(out)
return out.astype(result_type)
elif method == 'direct':
# fastpath to faster numpy.convolve for 1d inputs when possible
if _np_conv_ok(volume, kernel, mode):
return np.convolve(volume, kernel, mode)
return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
else:
raise ValueError("Acceptable method flags are 'auto',"
" 'direct', or 'fft'.")
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `a`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `a`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * volume.ndim
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(volume.ndim):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or array_like, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * im.ndim
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') /
product(mysize, axis=0) - lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> ascent = misc.ascent()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
>>> ax_orig.imshow(ascent, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('convolve2d inputs must both be 2D arrays')
if _inputs_swap_needed(mode, in1.shape, in2.shape):
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('correlate2d inputs must both be 2D arrays')
swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
if swapped_inputs:
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue)
if swapped_inputs:
out = out[::-1, ::-1]
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a), len(b)) - 1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
lfiltic : Construct initial conditions for `lfilter`.
lfilter_zi : Compute initial state (steady state of step response) for
`lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
savgol_filter : A Savitzky-Golay filter.
sosfilt: Filter data using cascaded second-order sections.
sosfiltfilt: A forward-backward filter using second-order sections.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
- a[1]*y[n-1] - ... - a[N]*y[n-N]
where `M` is the degree of the numerator, `N` is the degree of the
denominator, and `n` is the sample number. It is implemented using
the following difference equations (assuming M = N)::
a[0]*y[n] = b[0] * x[n] + d[0][n-1]
d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
...
d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
d[N-1][n] = b[N] * x[n] - a[N] * y[n]
where `d` are the state variables.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -M
b[0] + b[1]z + ... + b[M] z
Y(z) = -------------------------------- X(z)
-1 -N
a[0] + a[1]z + ... + a[N] z
Examples
--------
Generate a noisy signal to be filtered:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 201)
>>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
... 0.1*np.sin(2*np.pi*1.25*t + 1) +
... 0.18*np.cos(2*np.pi*3.85*t))
>>> xn = x + np.random.randn(len(t)) * 0.08
Create an order 3 lowpass butterworth filter:
>>> b, a = signal.butter(3, 0.05)
Apply the filter to xn. Use lfilter_zi to choose the initial condition of
the filter:
>>> zi = signal.lfilter_zi(b, a)
>>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
Apply the filter again, to have a result filtered at an order the same as
filtfilt:
>>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
Use filtfilt to apply the filter:
>>> y = signal.filtfilt(b, a, xn)
Plot the original signal and the various filtered versions:
>>> plt.figure
>>> plt.plot(t, xn, 'b', alpha=0.75)
>>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
>>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
... 'filtfilt'), loc='best')
>>> plt.grid(True)
>>> plt.show()
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of
# singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[ind] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter given input and output vectors.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
where ``K = max(M, N)``.
See Also
--------
lfilter, lfilter_zi
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal`` using inverse filtering.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See Also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = zeros(N - D + 1, float)
input[0] = 1
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
See Also
--------
scipy.fftpack.hilbert : Return Hilbert transform of a periodic sequence x.
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the
instantaneous phase in respect to time. The instantaneous phase corresponds
to the phase angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = (np.diff(instantaneous_phase) /
... (2.0*np.pi) * fs)
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
Processing, Third Edition, 2009. Chapter 12.
ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fftpack.fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if x.ndim > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = fftpack.ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if x.ndim > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fftpack.fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = x.ndim
while k > 2:
h = h[:, newaxis]
k -= 1
x = fftpack.ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
Examples
--------
>>> from scipy import signal
>>> vals = [1, 4, 1+1.j, 3]
>>> p_sorted, indx = signal.cmplx_sort(vals)
>>> p_sorted
array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j])
>>> indx
array([0, 2, 3, 1])
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `invresz`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residue, invresz, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `residuez`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, residuez, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n]) /
factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `residue`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invresz, residue, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `invres`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fftpack.fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftpack.fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = fftpack.ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
W.shape = (Nx,)
sl = [slice(None)] * x.ndim
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
if N % 2 == 0: # special treatment if low number of points is even. So far we have set Y[-N/2]=X[-N/2]
if N < Nx: # if downsampling
sl[axis] = slice(N//2,N//2+1,None) # select the component at frequency N/2
Y[sl] += X[sl] # add the component of X at N/2
elif N < num: # if upsampling
sl[axis] = slice(num-N//2,num-N//2+1,None) # select the component at frequency -N/2
Y[sl] /= 2 # halve the component at -N/2
temp = Y[sl]
sl[axis] = slice(N//2,N//2+1,None) # select the component at +N/2
Y[sl] = temp # set that equal to the component at -N/2
y = fftpack.ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0)):
"""
Resample `x` along the given axis using polyphase filtering.
The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. Values beyond the boundary of the signal are assumed to be zero
during the filtering step.
Parameters
----------
x : array_like
The data to be resampled.
up : int
The upsampling factor.
down : int
The downsampling factor.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : string, tuple, or array_like, optional
Desired window to use to design the low-pass filter, or the FIR filter
coefficients to employ. See below for details.
Returns
-------
resampled_x : array
The resampled array.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample : Resample up or down using the FFT method.
Notes
-----
This polyphase method will likely be faster than the Fourier method
in `scipy.signal.resample` when the number of samples is large and
prime, or when the number of samples is large and `up` and `down`
share a large greatest common denominator. The length of the FIR
filter used will depend on ``max(up, down) // gcd(up, down)``, and
the number of operations during polyphase filtering will depend on
the filter length and `down` (see `scipy.signal.upfirdn` for details).
The argument `window` specifies the FIR low-pass filter design.
If `window` is an array_like it is assumed to be the FIR filter
coefficients. Note that the FIR filter is applied after the upsampling
step, so it should be designed to operate on a signal at a sampling
frequency higher than the original by a factor of `up//gcd(up, down)`.
This function's output will be centered with respect to this array, so it
is best to pass a symmetric filter with an odd number of samples if, as
is usually the case, a zero-phase filter is desired.
For any other type of `window`, the functions `scipy.signal.get_window`
and `scipy.signal.firwin` are called to generate the appropriate filter
coefficients.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * down / float(up)``.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle for the FFT method, and gets closer to zero
for the polyphase method:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f_fft = signal.resample(y, 100)
>>> f_poly = signal.resample_poly(y, 100, 20)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
>>> plt.plot(x, y, 'ko-')
>>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
>>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
>>> plt.show()
"""
x = asarray(x)
if up != int(up):
raise ValueError("up must be an integer")
if down != int(down):
raise ValueError("down must be an integer")
up = int(up)
down = int(down)
if up < 1 or down < 1:
raise ValueError('up and down must be >= 1')
# Determine our up and down factors
# Use a rational approimation to save computation time on really long
# signals
g_ = gcd(up, down)
up //= g_
down //= g_
if up == down == 1:
return x.copy()
n_out = x.shape[axis] * up
n_out = n_out // down + bool(n_out % down)
if isinstance(window, (list, np.ndarray)):
window = array(window) # use array to force a copy (we modify it)
if window.ndim > 1:
raise ValueError('window must be 1-D')
half_len = (window.size - 1) // 2
h = window
else:
# Design a linear-phase low-pass FIR filter
max_rate = max(up, down)
f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
half_len = 10 * max_rate # reasonable cutoff for our sinc-like function
h = firwin(2 * half_len + 1, f_c, window=window)
h *= up
# Zero-pad our filter to put the output samples at the center
n_pre_pad = (down - half_len % down)
n_post_pad = 0
n_pre_remove = (half_len + n_pre_pad) // down
# We should rarely need to do this given our filter lengths...
while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],
up, down) < n_out + n_pre_remove:
n_post_pad += 1
h = np.concatenate((np.zeros(n_pre_pad), h, np.zeros(n_post_pad)))
n_pre_remove_end = n_pre_remove + n_out
# filter then remove excess
y = upfirdn(h, x, up, down, axis=axis)
keep = [slice(None), ]*x.ndim
keep[axis] = slice(n_pre_remove, n_pre_remove_end)
return y[keep]
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
:doi:`10.1063/1.3670512`.
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
:doi:`10.1007/s00422-013-0560-8`.
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1000
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, _prod(dshape) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Construct initial conditions for lfilter for step response steady-state.
Compute an initial state `zi` for the `lfilter` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
See Also
--------
lfilter, lfiltic, filtfilt
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Construct initial conditions for sosfilt for step response steady-state.
Compute an initial state `zi` for the `sosfilt` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
Apply a digital filter forward and backward to a signal.
This function applies a linear digital filter twice, once forward and
once backwards. The combined filter has zero phase and a filter order
twice that of the original.
The function provides options for handling the edges of the signal.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
Notes
-----
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# method == "pad"
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=max(len(a), len(b)))
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def _validate_pad(padtype, padlen, x, axis, ntaps):
"""Helper to validate padding for filtfilt"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
return edge, ext
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections.
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = signal.unit_impulse(700)
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos, n_sections = _validate_sos(sos)
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r, got %r.' %
(axis, x.shape, n_sections, x_zi_shape, zi.shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
"""
A forward-backward digital filter using cascaded second-order sections.
See `filtfilt` for more complete information about this method.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is::
3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
(sos[:, 5] == 0).sum()))
The extra subtraction at the end attempts to compensate for poles
and zeros at the origin (e.g. for odd-order filters) to yield
equivalent estimates of `padlen` to those of `filtfilt` for
second-order section filters built with `scipy.signal` functions.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
filtfilt, sosfilt, sosfilt_zi, sosfreqz
Notes
-----
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy.signal import sosfiltfilt, butter
>>> import matplotlib.pyplot as plt
Create an interesting signal to filter.
>>> n = 201
>>> t = np.linspace(0, 1, n)
>>> np.random.seed(123)
>>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*np.random.randn(n)
Create a lowpass Butterworth filter, and use it to filter `x`.
>>> sos = butter(4, 0.125, output='sos')
>>> y = sosfiltfilt(sos, x)
For comparison, apply an 8th order filter using `sosfilt`. The filter
is initialized using the mean of the first four values of `x`.
>>> from scipy.signal import sosfilt, sosfilt_zi
>>> sos8 = butter(8, 0.125, output='sos')
>>> zi = x[:4].mean() * sosfilt_zi(sos8)
>>> y2, zo = sosfilt(sos8, x, zi=zi)
Plot the results. Note that the phase of `y` matches the input, while
`y2` has a significant phase delay.
>>> plt.plot(t, x, alpha=0.5, label='x(t)')
>>> plt.plot(t, y, label='y(t)')
>>> plt.plot(t, y2, label='y2(t)')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.xlabel('t')
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
# `method` is "pad"...
ntaps = 2 * n_sections + 1
ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=ntaps)
# These steps follow the same form as filtfilt with modifications
zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
zi_shape = [1] * x.ndim
zi_shape[axis] = 2
zi.shape = [n_sections] + zi_shape
x_0 = axis_slice(ext, stop=1, axis=axis)
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
y_0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
y = axis_reverse(y, axis=axis)
if edge > 0:
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):
"""
Downsample the signal after applying an anti-aliasing filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with Hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : array_like
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor. When using IIR downsampling, it is recommended
to call `decimate` multiple times for downsampling factors higher than
13.
n : int, optional
The order of the filter (1 less than the length for 'fir'). Defaults to
8 for 'iir' and 20 times the downsampling factor for 'fir'.
ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
of an `dlti` object, uses that object to filter before downsampling.
axis : int, optional
The axis along which to decimate.
zero_phase : bool, optional
Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
when using an IIR filter, and shifting the outputs back by the filter's
group delay when using an FIR filter. The default value of ``True`` is
recommended, since a phase shift is generally not desired.
.. versionadded:: 0.18.0
Returns
-------
y : ndarray
The down-sampled signal.
See Also
--------
resample : Resample up or down using the FFT method.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The ``zero_phase`` keyword was added in 0.18.0.
The possibility to use instances of ``dlti`` as ``ftype`` was added in
0.18.0.
"""
x = asarray(x)
q = operator.index(q)
if n is not None:
n = operator.index(n)
if ftype == 'fir':
if n is None:
half_len = 10 * q # reasonable cutoff for our sinc-like function
n = 2 * half_len
b, a = firwin(n+1, 1. / q, window='hamming'), 1.
elif ftype == 'iir':
if n is None:
n = 8
system = dlti(*cheby1(n, 0.05, 0.8 / q))
b, a = system.num, system.den
elif isinstance(ftype, dlti):
system = ftype._as_tf() # Avoids copying if already in TF form
b, a = system.num, system.den
else:
raise ValueError('invalid ftype')
sl = [slice(None)] * x.ndim
a = np.asarray(a)
if a.size == 1: # FIR case
b = b / a
if zero_phase:
y = resample_poly(x, 1, q, axis=axis, window=b)
else:
# upfirdn is generally faster than lfilter by a factor equal to the
# downsampling factor, since it only calculates the needed outputs
n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
y = upfirdn(b, x, up=1, down=q, axis=axis)
sl[axis] = slice(None, n_out, None)
else: # IIR case
if zero_phase:
y = filtfilt(b, a, x, axis=axis)
else:
y = lfilter(b, a, x, axis=axis)
sl[axis] = slice(None, None, q)
return y[sl]
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/scipy/signal/signaltools.py
|
Python
|
gpl-3.0
| 117,882
|
[
"Gaussian"
] |
b79c72eb22e7ad053e569fd975681191fcc9b0ed587cd2697e3e1ac42e0b4b8d
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# Author: Matteo Giantomassi <matteo.giantomassiNOSPAM AT uclouvain.be>
# Date: October 11, 2016
from spack import *
class Abinit(AutotoolsPackage):
"""ABINIT is a package whose main program allows one to find the total
energy, charge density and electronic structure of systems made of
electrons and nuclei (molecules and periodic solids) within
Density Functional Theory (DFT), using pseudopotentials and a planewave
or wavelet basis.
ABINIT also includes options to optimize the geometry according to the
DFT forces and stresses, or to perform molecular dynamics
simulations using these forces, or to generate dynamical matrices,
Born effective charges, and dielectric tensors, based on Density-Functional
Perturbation Theory, and many more properties. Excited states can be
computed within the Many-Body Perturbation Theory (the GW approximation and
the Bethe-Salpeter equation), and Time-Dependent Density Functional Theory
(for molecules). In addition to the main ABINIT code, different utility
programs are provided.
"""
homepage = 'http://www.abinit.org'
url = 'https://www.abinit.org/sites/default/files/packages/abinit-8.6.3.tar.gz'
version('8.10.3', sha256='ed626424b4472b93256622fbb9c7645fa3ffb693d4b444b07d488771ea7eaa75')
version('8.8.2', sha256='15216703bd56a799a249a112b336d07d733627d3756487a4b1cb48ebb625c3e7')
version('8.6.3', sha256='82e8d071088ab8dc1b3a24380e30b68c544685678314df1213180b449c84ca65')
version('8.2.2', sha256='e43544a178d758b0deff3011c51ef7c957d7f2df2ce8543366d68016af9f3ea1')
# Versions before 8.0.8b are not supported.
version('8.0.8b', sha256='37ad5f0f215d2a36e596383cb6e54de3313842a0390ce8d6b48a423d3ee25af2')
variant('mpi', default=True,
description='Builds with MPI support. Requires MPI2+')
variant('openmp', default=False,
description='Enables OpenMP threads. Use threaded FFTW3')
variant('scalapack', default=False,
description='Enables scalapack support. Requires MPI')
# variant('elpa', default=False,
# description='Uses elpa instead of scalapack. Requires MPI')
# TODO: To be tested.
# It was working before the last `git pull` but now all tests crash.
# For the time being, the default is netcdf3 and the internal fallbacks
# FIXME: rename (trio?) and use multivalued variants to cover
# --with-trio-flavor={netcdf, none}
# Note that Abinit@8: does not support etsf_io anymore because it is not
# compatible with HDF5 and MPI-IO
variant('hdf5', default=False,
description='Enables HDF5+Netcdf4 with MPI. WARNING: experimental')
variant('wannier90', default=False,
description='Enables the Wannier90 library')
# Add dependencies
# currently one cannot forward options to virtual packages, see #1712.
# depends_on('blas', when='~openmp')
# depends_on('blas+openmp', when='+openmp')
depends_on('blas')
depends_on('lapack')
# Require MPI2+
depends_on('mpi@2:', when='+mpi')
depends_on('scalapack', when='+scalapack+mpi')
# depends_on('elpa~openmp', when='+elpa+mpi~openmp')
# depends_on('elpa+openmp', when='+elpa+mpi+openmp')
depends_on('fftw precision=float,double')
depends_on('fftw~openmp', when='~openmp')
depends_on('fftw+openmp', when='+openmp')
depends_on('netcdf-fortran', when='+hdf5')
depends_on('hdf5+mpi', when='+mpi+hdf5') # required for NetCDF-4 support
# pin libxc version
depends_on("libxc@2.2.2")
# Cannot ask for +scalapack if it does not depend on MPI
conflicts('+scalapack', when='~mpi')
depends_on("wannier90+shared", when='+wannier90')
# Elpa is a substitute for scalapack and needs mpi
# conflicts('+elpa', when='~mpi')
# conflicts('+elpa', when='+scalapack')
def configure_args(self):
spec = self.spec
options = []
oapp = options.append
if '+wannier90' in spec:
oapp('--with-wannier90-libs=-L{0}'
.format(spec['wannier90'].prefix.lib + ' -lwannier -lm'))
oapp('--with-wannier90-incs=-I{0}'
.format(spec['wannier90'].prefix.modules))
oapp('--with-wannier90-bins={0}'
.format(spec['wannier90'].prefix.bin))
oapp('--enable-connectors')
oapp('--with-dft-flavor=wannier90')
if '+mpi' in spec:
# MPI version:
# let the configure script auto-detect MPI support from mpi_prefix
oapp('--with-mpi-prefix={0}'.format(spec['mpi'].prefix))
oapp('--enable-mpi=yes')
oapp('--enable-mpi-io=yes')
oapp('MPIFC={0}/mpifc'.format(spec['mpi'].prefix.bin))
if '~wannier90' in spec:
oapp('--with-dft-flavor=atompaw+libxc')
# Activate OpenMP in Abinit Fortran code.
if '+openmp' in spec:
oapp('--enable-openmp=yes')
# BLAS/LAPACK/SCALAPACK-ELPA
linalg = spec['lapack'].libs + spec['blas'].libs
if '+scalapack' in spec:
oapp('--with-linalg-flavor=custom+scalapack')
linalg = spec['scalapack'].libs + linalg
# elif '+elpa' in spec:
else:
oapp('--with-linalg-flavor=custom')
oapp('--with-linalg-libs={0}'.format(linalg.ld_flags))
# FFTW3: use sequential or threaded version if +openmp
fftflavor, fftlibs = 'fftw3', '-lfftw3 -lfftw3f'
if '+openmp' in spec:
fftflavor = 'fftw3-threads'
fftlibs = '-lfftw3_omp -lfftw3 -lfftw3f'
options.extend([
'--with-fft-flavor=%s' % fftflavor,
'--with-fft-incs=-I%s' % spec['fftw'].prefix.include,
'--with-fft-libs=-L%s %s' % (spec['fftw'].prefix.lib, fftlibs),
])
# LibXC library
libxc = spec['libxc:fortran']
options.extend([
'with_libxc_incs={0}'.format(libxc.headers.cpp_flags),
'with_libxc_libs={0}'.format(libxc.libs.ld_flags + ' -lm')
])
# Netcdf4/HDF5
if '+hdf5' in spec:
oapp('--with-trio-flavor=netcdf')
# Since version 8, Abinit started to use netcdf4 + hdf5 and we have
# to link with the high level HDF5 library
hdf5 = spec['hdf5:hl']
netcdff = spec['netcdf-fortran:shared']
options.extend([
'--with-netcdf-incs={0}'.format(netcdff.headers.cpp_flags),
'--with-netcdf-libs={0}'.format(
netcdff.libs.ld_flags + ' ' + hdf5.libs.ld_flags
),
])
else:
# In Spack we do our best to avoid building any internally provided
# dependencies, such as netcdf3 in this case.
oapp('--with-trio-flavor=none')
return options
def check(self):
"""This method is called after the build phase if tests have been
explicitly activated by user.
"""
make('check')
make('tests_in')
|
rspavel/spack
|
var/spack/repos/builtin/packages/abinit/package.py
|
Python
|
lgpl-2.1
| 7,283
|
[
"ABINIT",
"NetCDF",
"Wannier90"
] |
0c1909e6ca70759ca3e0afd231cf7708f9655828f023611fabc52a246714eb18
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
#pylint: skip-file
import numpy as np
from glob import glob
from horton import *
__all__ = [
'get_proatomdb_cp2k', 'get_proatomdb_hf_sto3g',
'get_proatomdb_hf_lan', 'get_fake_co', 'get_fake_pseudo_oo',
'check_names', 'check_proatom_splines',
]
def get_proatomdb_cp2k():
'''Return a proatomdb of pseudo oxygens and one silicon for testing purposes'''
fns = glob(context.get_fn('test/atom_*.cp2k.out'))
return ProAtomDB.from_files(fns)
def get_proatomdb_hf_sto3g():
'''Return a proatomdb of H and O at hf/sto-3g for testing purposes'''
fns = glob(context.get_fn('test/atom_???_???_hf_sto3g.fchk'))
return ProAtomDB.from_files(fns)
def get_proatomdb_hf_lan():
'''Return a proatomdb of H, O, Si at hf/LANL2MB for testing purposes'''
fns = glob(context.get_fn('test/atom_???_???_hf_lan.fchk'))
return ProAtomDB.from_files(fns)
def get_fake_co():
# Define system
numbers = np.array([6, 8])
coordinates = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.132]])
# Load some pro-atoms
proatomdb = ProAtomDB.from_refatoms(numbers=[6, 8], max_kation=1, max_anion=1)
proatomdb.compact(0.02)
# Make fake cube data
origin = np.array([-3.0, -3.0, -3.0])
rvecs = np.identity(3, float)*0.2
shape = np.array([30, 30, 30+11])
ugrid = UniformGrid(origin, rvecs, shape, np.ones(3, int))
moldens = np.zeros(ugrid.shape)
setup = [
(0, {+1: 0.5, 0: 0.4, -1: 0.1}),
(1, {+1: 0.1, 0: 0.4, -1: 0.5}),
]
for i, lico in setup:
n = numbers[i]
c = coordinates[i]
spline = proatomdb.get_spline(n, lico)
ugrid.eval_spline(spline, c, moldens)
return coordinates, numbers, ugrid, moldens, proatomdb
def get_fake_pseudo_oo():
# Define system
numbers = np.array([8, 8])
coordinates = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.132]])
pseudo_numbers = np.array([6.0, 6.0])
# Load some pro-atoms
proatomdb = get_proatomdb_cp2k()
proatomdb.compact(0.02)
# Make fake cube data
origin = np.array([-3.0, -3.0, -3.0])
rvecs = np.identity(3, float)*0.2
shape = np.array([30, 30, 30+11])
ugrid = UniformGrid(origin, rvecs, shape, np.ones(3, int))
moldens = np.zeros(ugrid.shape)
setup = [
(0, {+1: 0.5, 0: 0.4, -1: 0.1}),
(1, {+1: 0.1, 0: 0.4, -1: 0.5}),
]
for i, lico in setup:
n = numbers[i]
c = coordinates[i]
spline = proatomdb.get_spline(n, lico)
ugrid.eval_spline(spline, c, moldens)
return coordinates, numbers, pseudo_numbers, ugrid, moldens, proatomdb
def check_names(names, part):
for name in names:
assert name in part.cache
def check_proatom_splines(part):
for index in xrange(part.natom):
spline = part.get_proatom_spline(index)
grid = part.get_grid(index)
array1 = grid.zeros()
part.eval_spline(index, spline, array1)
array2 = grid.zeros()
part.eval_proatom(index, array2)
assert abs(array1).max() != 0.0
assert abs(array1 - array2).max() < 1e-5
|
eustislab/horton
|
horton/part/test/common.py
|
Python
|
gpl-3.0
| 3,891
|
[
"CP2K"
] |
11713e4b833f78fdbdd0387e404a217ea0943ed453b4402c76d244e799613286
|
#!/usr/bin/env python3
from setuptools import setup
with open('README.rst') as fh:
long_description = fh.read()
setup(name='tacl',
version='2.2.0',
description='Text analyser for corpus linguistics',
long_description=long_description,
author='Jamie Norrish',
author_email='jamie@artefact.org.nz',
url='https://github.com/ajenhl/tacl',
packages=['tacl', 'tacl.command'],
entry_points = {
'console_scripts': [
'tacl=tacl.command.tacl_script:main',
'tacl-helper=tacl.command.tacl_helper_script:main',
],
},
package_data = {
'tacl': ['assets/templates/*.html'],
},
install_requires=['biopython', 'Jinja2', 'lxml', 'pandas>=0.17.0'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Programming Language :: Python :: 3',
'Topic :: Text Processing :: Linguistic',
],
test_suite = 'tests',
)
|
cwittern/tacl
|
setup.py
|
Python
|
gpl-3.0
| 1,108
|
[
"Biopython"
] |
7f173b0357cf565ce4f1c341ad83cd98b61e0650ece34175562fe490259e01b5
|
import sys
import numpy as np
lsun, pc = 3.846e33, 3.085677581467192e18
lightspeed = 2.998e18 #AA/s
#value to go from L_sun/AA to erg/s/cm^2/AA at 10pc
to_cgs = lsun/(4.0 * np.pi * (pc*10)**2 )
def burst_sfh(fwhm_burst=0.05, f_burst=0.5, contrast=5,
sfh=None, bin_res=20., **extras):
"""
Given a binned SFH as a numpy structured array, and burst
parameters, generate a realization of the SFH at high temporal
resolution. The output time resolution will be approximately
fwhm_burst/12 unless no bursts are generated, in which case the
output time resolution is the minimum bin width divided by
bin_res.
:param fwhm_burst: default 0.05
the fwhm of the bursts to add, in Gyr.
:param f_burst: default, 0.5
the fraction of stellar mass formed in each bin that is formed
in the bursts.
:param contrast: default, 5
the approximate maximum height or amplitude of the bursts
above the constant background SFR. This is only approximate
since it is altered to preserve f_burst and fwhm_burst even
though the number of busrsts is quantized.
:param sfh: structured ndarray
A binned sfh in numpy structured array format. Usually the
result of sfhutils.load_angst_sfh()
:param bin_res: default 20
Factor by which to increase the time resolution of the output
grid, relative to the shortest bin width in the supplied SFH.
:returns times: ndarray of shape (nt)
The output linear, regular temporal grid of lookback times.
:returns sfr: ndarray of shape (nt)
The resulting SFR at each time.
:returns f_burst_actual:
In case f_burst changed due to burst number discretezation.
Shouldn't happen though.
"""
a, tburst, A, sigma, f_burst_actual = [],[],[],[],[]
for i,abin in enumerate(sfh):
res = convert_burst_pars(fwhm_burst=fwhm_burst, f_burst=f_burst,
contrast=contrast,
bin_width=(abin['t2']-abin['t1']),
bin_sfr=abin['sfr'])
a += [res[0]]
if len(res[1]) > 0:
tburst += (res[1] + abin['t1']).tolist()
A += len(res[1]) * [res[2]]
sigma += len(res[1]) * [res[3]]
if len(sigma) == 0:
# If there were no bursts, set the time resolution to be
# 1/bin_res of the shortest bin width.
dt = (sfh['t2'] - sfh['t1']).min()/(1.0 * bin_res)
else:
dt = np.min(sigma)/5. #make sure you sample the bursts reasonably well
times = np.arange(np.round(sfh['t2'].max()/dt)) * dt
#times = np.arange(sfh[0]['t1'], sfh['t2'].max() + dt/2, dt)
sfr = gauss(times, tburst, A, sigma)
# Figure out which bin each time is in
bins = [sfh[0]['t1']] + sfh['t2'].tolist()
bin_num = np.digitize(times, bins) - 1
#if np.any(bin_num < 0):
# print('Warning - some times are not in the SFH')
has_bin = bin_num >= 0
sfr[has_bin] += np.array(a)[bin_num[has_bin]]
# Calculate SFR from all components
#sfr = np.array(a)[bin_num] + gauss(times, tburst, A, sigma)
return times, sfr, f_burst_actual
def bursty_sps(lookback_time, lt, sfr, sps, **extras):
"""
Obtain the spectrum of a stellar poluation with arbitrary complex
SFH at a given lookback time. The SFH is provided in terms of SFR
vs t_lookback. Note that this in in contrast to the normal
specification in terms of time since the big bang. Interpolation
of the available SSPs to the time sequence of the SFH is
accomplished by linear interpolation in log t. Highly oscillatory
SFHs require dense sampling of the temporal axis to obtain
accurate results.
:param lookback_time: scalar or ndarray, shape (ntarg)
The lookback time(s) at which to obtain the spectrum. In yrs.
:param lt: ndarray, shape (ntime)
The lookback time sequence of the provided SFH. Assumed to
have have equal linear time intervals.
:param sfr: ndarray, shape (ntime)
The SFR corresponding to each element of lt, in M_sun/yr.
:returns wave: ndarray, shape (nwave)
The wavelength array
:returns int_spec: ndarray, shape(ntarg, nwave)
The integrated spectrum at lookback_time, in L_sun/AA
:returns aw: ndarray, shape(ntarg, nage)
The total weights of each SSP spectrum for each requested
lookback_time. Useful for debugging.
"""
sps.params['sfh'] = 0 #make sure using SSPs
# Get *all* the ssps
zmet = sps.params['zmet']-1
spec, mass, _ = sps.all_ssp_spec(peraa =True, update = True)
spec = spec[:,:,zmet].T
mass = mass[:,zmet]
wave = sps.wavelengths
ssp_ages = 10**sps.ssp_ages #in yrs
target_lt = np.atleast_1d(lookback_time)
aw = sfh_weights(lt, sfr, ssp_ages, lookback_time=target_lt, **extras)
int_spec = (spec[None,:,:] * aw[:,:,None]).sum(axis=1)
mstar = (mass[None,:] * aw).sum(axis=-1)
return wave, int_spec, aw, mstar
def bursty_lf(lookback_time, lt, sfr, sps_lf, **extras):
"""
Obtain the luminosity function of stars for an arbitrary complex
SFH at a given lookback time. The SFH is provided in terms of SFR
vs t_lookback. Note that this in in contrast to the normal
specification in terms of time since the big bang.
:param lookback_time: scalar or ndarray, shape (ntarg)
The lookback time(s) at which to obtain the spectrum. In yrs.
:param lt: ndarray, shape (ntime)
The lookback time sequence of the provided SFH. Assumed to
have have equal linear time intervals.
:param sfr: ndarray, shape (ntime)
The SFR corresponding to each element of lt, in M_sun/yr.
:param sps_lf:
Luminosity function information, as a dictionary. The keys of
the dictionary are 'bins', 'lf' and 'ssp_ages'
:returns bins:
The bins used to define the LF
:returns int_lf: ndarray, shape(ntarg, nbin)
The integrated LF at lookback_time, in L_sun/AA
:returns aw: ndarray, shape(ntarg, nage)
The total weights of each LF for each requested
lookback_time. Useful for debugging.
"""
bins, lf, ssp_ages = sps_lf['bins'], sps_lf['lf'], 10**sps_lf['ssp_ages']
target_lt = np.atleast_1d(lookback_time)
aw = sfh_weights(lt, sfr, ssp_ages, lookback_time=target_lt, **extras)
int_lf = (lf[None,:,:] * aw[:,:,None]).sum(axis=1)
return bins, int_lf, aw
def sfh_weights(lt, sfr, ssp_ages, lookback_time=0,
renormalize=True, **extras):
"""
:param lt: ndarray, shape (ntime)
The lookback time sequence of the provided SFH. Assumed to
have have equal linear time intervals.
:param sfr: ndarray, shape (ntime)
The SFR corresponding to each element of lt, in M_sun/yr.
:param ssp_ages: ndarray, shape (nage)
The ages at which you want weights. Linear yrs.
:param lookback_time: scalar or ndarray, shape (ntarg)
The lookback time(s) at which to obtain the spectrum. In yrs.
:returns aw: ndarray, shape(ntarg, nage)
The total weights of each LF for each requested
lookback_time. Useful for debugging.
"""
dt = lt[1] - lt[0]
target_lt = np.atleast_1d(lookback_time)
aw = np.zeros( [ len(target_lt), len(ssp_ages) ] )
for i,tl in enumerate(target_lt):
valid = (lt >= tl) #only consider time points in the past of this lookback time.
#augment the t_lookback array of the SFH with the SSP ages
sfr_ssp = np.interp(ssp_ages, lt-tl, sfr, left=0.0, right=0.0)
tmp_t = np.concatenate([ssp_ages, lt[valid]-tl])
tmp_sfr = np.concatenate([sfr_ssp, sfr[valid]])
#sort the augmented array by lookback time
order = tmp_t.argsort()
tmp_t = tmp_t[order]
tmp_sfr = tmp_sfr[order]
# get weights to interpolate the log_t array
inds, weights = weights_1DLinear(ssp_ages, tmp_t, **extras)
# aggregate the weights for each ssp time index, after
# accounting for SFR *dt
tmp_dt = np.gradient(tmp_t)
agg_weights = np.bincount( inds.flatten(),
weights = (weights * tmp_sfr[:, None] *
tmp_dt[:, None]).flatten(),
minlength = len(ssp_ages) )
aw[i,:] = agg_weights
if renormalize:
aw[i,:] /= agg_weights.sum()
return aw
def gauss(x, mu, A, sigma):
"""
Project the sum of a sequence of gaussians onto the x vector,
using broadcasting.
:param x: ndarray
The array onto which the gaussians are to be projected.
:param mu:
Sequence of gaussian centers, same units as x.
:param A:
Sequence of gaussian normalization (that is, the area of the
gaussians), same length as mu.
:param sigma:
Sequence of gaussian standard deviations or dispersions, same
length as mu.
:returns value:
The value of the sum of the gaussians at positions x.
"""
mu, A, sigma = np.atleast_2d(mu), np.atleast_2d(A), np.atleast_2d(sigma)
val = A/(sigma * np.sqrt(np.pi * 2)) * np.exp(-(x[:,None] - mu)**2/(2 * sigma**2))
return val.sum(axis = -1)
def convert_burst_pars(fwhm_burst = 0.05, f_burst=0.5, contrast=5,
bin_width=1.0, bin_sfr=1e9):
"""
Perform the conversion from a burst fraction, width, and
'contrast' to to a set of gaussian bursts stochastically
distributed in time, each characterized by a burst time, a width,
and an amplitude. Also returns the SFR in the non-bursting mode.
:param fwhm_burst: default 0.05
The fwhm of the bursts to add, in Gyr.
:param f_burst: default, 0.5
The fraction of stellar mass formed in each bin that is formed
in the bursts.
:param contrast: default, 5
The approximate maximum height or amplitude of the bursts
above the constant background SFR. This is only approximate
since it is altered to preserve f_burst and fwhm_burst even
though the number of busrsts is quantized.
:param bin_width: default, 1.0
The width of the bin in Gyr.
:param bin_sfr:
The average sfr for this time period. The total stellar mass
formed during this bin is just bin_sfr * bin_width.
:returns a:
The sfr of the non bursting constant component
:returns tburst:
A sequence of times, of length nburst, where the time gives
the time of the peak of the gaussian burst
:returns A:
A sequence of normalizations of length nburst. each A value
gives the stellar mass formed in that burst.
:returns sigma:
A sequence of burst widths. This is usually just
fwhm_burst/2.35 repeated nburst times.
"""
width, mstar = bin_width, bin_width * bin_sfr
if width < fwhm_burst * 2:
f_burst = 0.0 #no bursts if bin is short - they are resolved
# Constant SF component
a = mstar * (1 - f_burst) /width
# Determine burst_parameters
sigma = fwhm_burst / 2.355
maxsfr = contrast * a
A = maxsfr * (sigma * np.sqrt(np.pi * 2))
tburst = []
if A > 0:
nburst = np.round(mstar * f_burst / A)
# Recalculate A to preserve total mass formed in the face of
# burst number quantization
if nburst > 0:
A = mstar * f_burst / nburst
tburst = np.random.uniform(0,width, nburst)
else:
A = 0
a = mstar/width
else:
nburst = 0
a = mstar/width
return [a, tburst, A, sigma]
def weights_1DLinear(model_points, target_points,
extrapolate = False, left=0., right=0.,
**extras):
"""The interpolation weights are determined from 1D linear
interpolation.
:param model_points: ndarray, shape(nmod)
The parameter coordinate of the available models. assumed to
be sorted ascending
:param target_points: ndarray, shape(ntarg)
The coordinate to which you wish to interpolate
:returns inds: ndarray, shape(ntarg,2)
The model indices of the interpolates
:returns weights: narray, shape (ntarg,2)
The weights of each model given by ind in the interpolates.
"""
#well this is ugly.
mod_sorted = model_points
x_new_indices = np.searchsorted(mod_sorted, target_points)
x_new_indices = x_new_indices.clip(1, len(mod_sorted)-1).astype(int)
lo = x_new_indices - 1
hi = x_new_indices
x_lo = mod_sorted[lo]
x_hi = mod_sorted[hi]
width = x_hi - x_lo
w_lo = (x_hi - target_points)/width
w_hi = (target_points - x_lo)/width
if extrapolate is False:
#and of course I have these labels backwards
above_scale = w_lo < 0 #fidn places where target is above or below the model range
below_scale = w_hi < 0
lo[above_scale] = hi[above_scale] #set the indices to be indentical in these cases
hi[below_scale] = lo[below_scale]
w_lo[above_scale] = 0 #make the combined weights sum to one
w_hi[above_scale] = left
w_hi[below_scale] = 0
w_lo[below_scale] = right
inds = np.vstack([lo,hi]).T
weights = np.vstack([w_lo, w_hi]).T
#inds = order[inds]
return inds, weights
|
bd-j/magellanic
|
magellanic/sfhs/sputils.py
|
Python
|
gpl-2.0
| 13,757
|
[
"Gaussian"
] |
e6cc3a6a9bbb52834eadbf82d40b0006a8c243760a7226a3f99bf84360baa0e2
|
"""
NetCDF reader/writer module.
This module is used to read and create NetCDF files. NetCDF files are
accessed through the `netcdf_file` object. Data written to and from NetCDF
files are contained in `netcdf_variable` objects. Attributes are given
as member variables of the `netcdf_file` and `netcdf_variable` objects.
This module implements the Scientific.IO.NetCDF API to read and create
NetCDF files. The same API is also used in the PyNIO and pynetcdf
modules, allowing these modules to be used interchangeably when working
with NetCDF files.
Only NetCDF3 is supported here; for NetCDF4 see
`netCDF4-python <http://unidata.github.io/netcdf4-python/>`__,
which has a similar API.
"""
from __future__ import division, print_function, absolute_import
# TODO:
# * properly implement ``_FillValue``.
# * fix character variables.
# * implement PAGESIZE for Python 2.6?
# The Scientific.IO.NetCDF API allows attributes to be added directly to
# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
# between user-set attributes and instance attributes, user-set attributes
# are automatically stored in the ``_attributes`` attribute by overloading
#``__setattr__``. This is the reason why the code sometimes uses
#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
# otherwise the key would be inserted into userspace attributes.
__all__ = ['netcdf_file', 'netcdf_variable']
import sys
import warnings
import weakref
from operator import mul
from collections import OrderedDict
import mmap as mm
import numpy as np
from numpy.compat import asbytes, asstr
from numpy import frombuffer, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce
from scipy._lib.six import integer_types, text_type, binary_type
IS_PYPY = ('__pypy__' in sys.modules)
ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO = b'\x00\x00\x00\x00'
NC_BYTE = b'\x00\x00\x00\x01'
NC_CHAR = b'\x00\x00\x00\x02'
NC_SHORT = b'\x00\x00\x00\x03'
NC_INT = b'\x00\x00\x00\x04'
NC_FLOAT = b'\x00\x00\x00\x05'
NC_DOUBLE = b'\x00\x00\x00\x06'
NC_DIMENSION = b'\x00\x00\x00\n'
NC_VARIABLE = b'\x00\x00\x00\x0b'
NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
FILL_BYTE = b'\x81'
FILL_CHAR = b'\x00'
FILL_SHORT = b'\x80\x01'
FILL_INT = b'\x80\x00\x00\x01'
FILL_FLOAT = b'\x7C\xF0\x00\x00'
FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00'
TYPEMAP = {NC_BYTE: ('b', 1),
NC_CHAR: ('c', 1),
NC_SHORT: ('h', 2),
NC_INT: ('i', 4),
NC_FLOAT: ('f', 4),
NC_DOUBLE: ('d', 8)}
FILLMAP = {NC_BYTE: FILL_BYTE,
NC_CHAR: FILL_CHAR,
NC_SHORT: FILL_SHORT,
NC_INT: FILL_INT,
NC_FLOAT: FILL_FLOAT,
NC_DOUBLE: FILL_DOUBLE}
REVERSE = {('b', 1): NC_BYTE,
('B', 1): NC_CHAR,
('c', 1): NC_CHAR,
('h', 2): NC_SHORT,
('i', 4): NC_INT,
('f', 4): NC_FLOAT,
('d', 8): NC_DOUBLE,
# these come from asarray(1).dtype.char and asarray('foo').dtype.char,
# used when getting the types from generic attributes.
('l', 4): NC_INT,
('S', 1): NC_CHAR}
class netcdf_file(object):
"""
A file object for NetCDF data.
A `netcdf_file` object has two standard attributes: `dimensions` and
`variables`. The values of both are dictionaries, mapping dimension
names to their associated lengths and variable names to variables,
respectively. Application programs should never modify these
dictionaries.
All other attributes correspond to global attributes defined in the
NetCDF file. Global file attributes are created by assigning to an
attribute of the `netcdf_file` object.
Parameters
----------
filename : string or file-like
string -> filename
mode : {'r', 'w', 'a'}, optional
read-write-append mode, default is 'r'
mmap : None or bool, optional
Whether to mmap `filename` when reading. Default is True
when `filename` is a file name, False when `filename` is a
file-like object. Note that when mmap is in use, data arrays
returned refer directly to the mmapped data on disk, and the
file cannot be closed as long as references to it exist.
version : {1, 2}, optional
version of netcdf to read / write, where 1 means *Classic
format* and 2 means *64-bit offset format*. Default is 1. See
`here <https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_introduction.html#select_format>`__
for more info.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Notes
-----
The major advantage of this module over other modules is that it doesn't
require the code to be linked to the NetCDF libraries. This module is
derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_.
NetCDF files are a self-describing binary data format. The file contains
metadata that describes the dimensions and variables in the file. More
details about NetCDF files can be found `here
<https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html>`__. There
are three main sections to a NetCDF data structure:
1. Dimensions
2. Variables
3. Attributes
The dimensions section records the name and length of each dimension used
by the variables. The variables would then indicate which dimensions it
uses and any attributes such as data units, along with containing the data
values for the variable. It is good practice to include a
variable that is the same name as a dimension to provide the values for
that axes. Lastly, the attributes section would contain additional
information such as the name of the file creator or the instrument used to
collect the data.
When writing data to a NetCDF file, there is often the need to indicate the
'record dimension'. A record dimension is the unbounded dimension for a
variable. For example, a temperature variable may have dimensions of
latitude, longitude and time. If one wants to add more temperature data to
the NetCDF file as time progresses, then the temperature variable should
have the time dimension flagged as the record dimension.
In addition, the NetCDF file header contains the position of the data in
the file, so access can be done in an efficient manner without loading
unnecessary data into memory. It uses the ``mmap`` module to create
Numpy arrays mapped to the data on disk, for the same purpose.
Note that when `netcdf_file` is used to open a file with mmap=True
(default for read-only), arrays returned by it refer to data
directly on the disk. The file should not be closed, and cannot be cleanly
closed when asked, if such arrays are alive. You may want to copy data arrays
obtained from mmapped Netcdf file if they are to be processed after the file
is closed, see the example below.
Examples
--------
To create a NetCDF file:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'w')
>>> f.history = 'Created for a test'
>>> f.createDimension('time', 10)
>>> time = f.createVariable('time', 'i', ('time',))
>>> time[:] = np.arange(10)
>>> time.units = 'days since 2008-01-01'
>>> f.close()
Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice
of the time variable allows for the data to be set in the object, rather
than letting ``arange(10)`` overwrite the ``time`` variable.
To read the NetCDF file we just created:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'r')
>>> print(f.history)
b'Created for a test'
>>> time = f.variables['time']
>>> print(time.units)
b'days since 2008-01-01'
>>> print(time.shape)
(10,)
>>> print(time[-1])
9
NetCDF files, when opened read-only, return arrays that refer
directly to memory-mapped data on disk:
>>> data = time[:]
>>> data.base.base
<mmap.mmap object at 0x7fe753763180>
If the data is to be processed after the file is closed, it needs
to be copied to main memory:
>>> data = time[:].copy()
>>> f.close()
>>> data.mean()
4.5
A NetCDF file can also be used as context manager:
>>> from scipy.io import netcdf
>>> with netcdf.netcdf_file('simple.nc', 'r') as f:
... print(f.history)
b'Created for a test'
"""
def __init__(self, filename, mode='r', mmap=None, version=1,
maskandscale=False):
"""Initialize netcdf_file from fileobj (str or file-like)."""
if mode not in 'rwa':
raise ValueError("Mode must be either 'r', 'w' or 'a'.")
if hasattr(filename, 'seek'): # file-like
self.fp = filename
self.filename = 'None'
if mmap is None:
mmap = False
elif mmap and not hasattr(filename, 'fileno'):
raise ValueError('Cannot use file object for mmap')
else: # maybe it's a string
self.filename = filename
omode = 'r+' if mode == 'a' else mode
self.fp = open(self.filename, '%sb' % omode)
if mmap is None:
# Mmapped files on PyPy cannot be usually closed
# before the GC runs, so it's better to use mmap=False
# as the default.
mmap = (not IS_PYPY)
if mode != 'r':
# Cannot read write-only files
mmap = False
self.use_mmap = mmap
self.mode = mode
self.version_byte = version
self.maskandscale = maskandscale
self.dimensions = OrderedDict()
self.variables = OrderedDict()
self._dims = []
self._recs = 0
self._recsize = 0
self._mm = None
self._mm_buf = None
if self.use_mmap:
self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ)
self._mm_buf = np.frombuffer(self._mm, dtype=np.int8)
self._attributes = OrderedDict()
if mode in 'ra':
self._read()
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def close(self):
"""Closes the NetCDF file."""
if hasattr(self, 'fp') and not self.fp.closed:
try:
self.flush()
finally:
self.variables = OrderedDict()
if self._mm_buf is not None:
ref = weakref.ref(self._mm_buf)
self._mm_buf = None
if ref() is None:
# self._mm_buf is gc'd, and we can close the mmap
self._mm.close()
else:
# we cannot close self._mm, since self._mm_buf is
# alive and there may still be arrays referring to it
warnings.warn((
"Cannot close a netcdf_file opened with mmap=True, when "
"netcdf_variables or arrays referring to its data still exist. "
"All data arrays obtained from such files refer directly to "
"data on disk, and must be copied before the file can be cleanly "
"closed. (See netcdf_file docstring for more information on mmap.)"
), category=RuntimeWarning)
self._mm = None
self.fp.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def createDimension(self, name, length):
"""
Adds a dimension to the Dimension section of the NetCDF data structure.
Note that this function merely adds a new dimension that the variables can
reference. The values for the dimension, if desired, should be added as
a variable using `createVariable`, referring to this dimension.
Parameters
----------
name : str
Name of the dimension (Eg, 'lat' or 'time').
length : int
Length of the dimension.
See Also
--------
createVariable
"""
if length is None and self._dims:
raise ValueError("Only first dimension may be unlimited!")
self.dimensions[name] = length
self._dims.append(name)
def createVariable(self, name, type, dimensions):
"""
Create an empty variable for the `netcdf_file` object, specifying its data
type and the dimensions it uses.
Parameters
----------
name : str
Name of the new variable.
type : dtype or str
Data type of the variable.
dimensions : sequence of str
List of the dimension names used by the variable, in the desired order.
Returns
-------
variable : netcdf_variable
The newly created ``netcdf_variable`` object.
This object has also been added to the `netcdf_file` object as well.
See Also
--------
createDimension
Notes
-----
Any dimensions to be used by the variable should already exist in the
NetCDF data structure or should be created by `createDimension` prior to
creating the NetCDF variable.
"""
shape = tuple([self.dimensions[dim] for dim in dimensions])
shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy
type = dtype(type)
typecode, size = type.char, type.itemsize
if (typecode, size) not in REVERSE:
raise ValueError("NetCDF 3 does not support type %s" % type)
data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions,
maskandscale=self.maskandscale)
return self.variables[name]
def flush(self):
"""
Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
See Also
--------
sync : Identical function
"""
if hasattr(self, 'mode') and self.mode in 'wa':
self._write()
sync = flush
def _write(self):
self.fp.seek(0)
self.fp.write(b'CDF')
self.fp.write(array(self.version_byte, '>b').tostring())
# Write headers and data.
self._write_numrecs()
self._write_dim_array()
self._write_gatt_array()
self._write_var_array()
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
if var.isrec and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
def _write_dim_array(self):
if self.dimensions:
self.fp.write(NC_DIMENSION)
self._pack_int(len(self.dimensions))
for name in self._dims:
self._pack_string(name)
length = self.dimensions[name]
self._pack_int(length or 0) # replace None with 0 for record dimension
else:
self.fp.write(ABSENT)
def _write_gatt_array(self):
self._write_att_array(self._attributes)
def _write_att_array(self, attributes):
if attributes:
self.fp.write(NC_ATTRIBUTE)
self._pack_int(len(attributes))
for name, values in attributes.items():
self._pack_string(name)
self._write_att_values(values)
else:
self.fp.write(ABSENT)
def _write_var_array(self):
if self.variables:
self.fp.write(NC_VARIABLE)
self._pack_int(len(self.variables))
# Sort variable names non-recs first, then recs.
def sortkey(n):
v = self.variables[n]
if v.isrec:
return (-1,)
return v._shape
variables = sorted(self.variables, key=sortkey, reverse=True)
# Set the metadata for all variables.
for name in variables:
self._write_var_metadata(name)
# Now that we have the metadata, we know the vsize of
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
if var.isrec])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
else:
self.fp.write(ABSENT)
def _write_var_metadata(self, name):
var = self.variables[name]
self._pack_string(name)
self._pack_int(len(var.dimensions))
for dimname in var.dimensions:
dimid = self._dims.index(dimname)
self._pack_int(dimid)
self._write_att_array(var._attributes)
nc_type = REVERSE[var.typecode(), var.itemsize()]
self.fp.write(asbytes(nc_type))
if not var.isrec:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
try:
vsize = var.data[0].size * var.data.itemsize
except IndexError:
vsize = 0
rec_vars = len([v for v in self.variables.values()
if v.isrec])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
self._pack_int(vsize)
# Pack a bogus begin, and set the real value later.
self.variables[name].__dict__['_begin'] = self.fp.tell()
self._pack_begin(0)
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tostring())
count = var.data.size * var.data.itemsize
self._write_var_padding(var, var._vsize - count)
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
# Resize in-place does not always work since
# the array might not be single-segment
try:
var.data.resize(shape)
except ValueError:
var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tostring())
# Padding
count = rec.size * rec.itemsize
self._write_var_padding(var, var._vsize - count)
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
def _write_var_padding(self, var, size):
encoded_fill_value = var._get_encoded_fill_value()
num_fills = size // len(encoded_fill_value)
self.fp.write(encoded_fill_value * num_fills)
def _write_att_values(self, values):
if hasattr(values, 'dtype'):
nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
else:
types = [(t, NC_INT) for t in integer_types]
types += [
(float, NC_FLOAT),
(str, NC_CHAR)
]
# bytes index into scalars in py3k. Check for "string" types
if isinstance(values, text_type) or isinstance(values, binary_type):
sample = values
else:
try:
sample = values[0] # subscriptable?
except TypeError:
sample = values # scalar
for class_, nc_type in types:
if isinstance(sample, class_):
break
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
# asarray() dies with bytes and '>c' in py3k. Change to 'S'
dtype_ = 'S' if dtype_ == '>c' else dtype_
values = asarray(values, dtype=dtype_)
self.fp.write(asbytes(nc_type))
if values.dtype.char == 'S':
nelems = values.itemsize
else:
nelems = values.size
self._pack_int(nelems)
if not values.shape and (values.dtype.byteorder == '<' or
(values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
values = values.byteswap()
self.fp.write(values.tostring())
count = values.size * values.itemsize
self.fp.write(b'\x00' * (-count % 4)) # pad
def _read(self):
# Check magic bytes and version
magic = self.fp.read(3)
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]
# Read file headers and set data.
self._read_numrecs()
self._read_dim_array()
self._read_gatt_array()
self._read_var_array()
def _read_numrecs(self):
self.__dict__['_recs'] = self._unpack_int()
def _read_dim_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_DIMENSION]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
for dim in range(count):
name = asstr(self._unpack_string())
length = self._unpack_int() or None # None for record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve order
def _read_gatt_array(self):
for k, v in self._read_att_array().items():
self.__setattr__(k, v)
def _read_att_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_ATTRIBUTE]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
attributes = OrderedDict()
for attr in range(count):
name = asstr(self._unpack_string())
attributes[name] = self._read_att_values()
return attributes
def _read_var_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_VARIABLE]:
raise ValueError("Unexpected header.")
begin = 0
dtypes = {'names': [], 'formats': []}
rec_vars = []
count = self._unpack_int()
for var in range(count):
(name, dimensions, shape, attributes,
typecode, size, dtype_, begin_, vsize) = self._read_var()
# https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html
# Note that vsize is the product of the dimension lengths
# (omitting the record dimension) and the number of bytes
# per value (determined from the type), increased to the
# next multiple of 4, for each variable. If a record
# variable, this is the amount of space per record. The
# netCDF "record size" is calculated as the sum of the
# vsize's of all the record variables.
#
# The vsize field is actually redundant, because its value
# may be computed from other information in the header. The
# 32-bit vsize field is not large enough to contain the size
# of variables that require more than 2^32 - 4 bytes, so
# 2^32 - 1 is used in the vsize field for such variables.
if shape and shape[0] is None: # record variable
rec_vars.append(name)
# The netCDF "record size" is calculated as the sum of
# the vsize's of all the record variables.
self.__dict__['_recsize'] += vsize
if begin == 0:
begin = begin_
dtypes['names'].append(name)
dtypes['formats'].append(str(shape[1:]) + dtype_)
# Handle padding with a virtual variable.
if typecode in 'bch':
actual_size = reduce(mul, (1,) + shape[1:]) * size
padding = -actual_size % 4
if padding:
dtypes['names'].append('_padding_%d' % var)
dtypes['formats'].append('(%d,)>b' % padding)
# Data will be set later.
data = None
else: # not a record variable
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.use_mmap:
data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_)
data.shape = shape
else:
pos = self.fp.tell()
self.fp.seek(begin_)
data = frombuffer(self.fp.read(a_size), dtype=dtype_
).copy()
data.shape = shape
self.fp.seek(pos)
# Add variable.
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions, attributes,
maskandscale=self.maskandscale)
if rec_vars:
# Remove padding when only one record variable.
if len(rec_vars) == 1:
dtypes['names'] = dtypes['names'][:1]
dtypes['formats'] = dtypes['formats'][:1]
# Build rec array.
if self.use_mmap:
rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes)
rec_array.shape = (self._recs,)
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = frombuffer(self.fp.read(self._recs*self._recsize),
dtype=dtypes).copy()
rec_array.shape = (self._recs,)
self.fp.seek(pos)
for var in rec_vars:
self.variables[var].__dict__['data'] = rec_array[var]
def _read_var(self):
name = asstr(self._unpack_string())
dimensions = []
shape = []
dims = self._unpack_int()
for i in range(dims):
dimid = self._unpack_int()
dimname = self._dims[dimid]
dimensions.append(dimname)
dim = self.dimensions[dimname]
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._read_att_array()
nc_type = self.fp.read(4)
vsize = self._unpack_int()
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
def _read_att_values(self):
nc_type = self.fp.read(4)
n = self._unpack_int()
typecode, size = TYPEMAP[nc_type]
count = n*size
values = self.fp.read(int(count))
self.fp.read(-count % 4) # read padding
if typecode is not 'c':
values = frombuffer(values, dtype='>%s' % typecode).copy()
if values.shape == (1,):
values = values[0]
else:
values = values.rstrip(b'\x00')
return values
def _pack_begin(self, begin):
if self.version_byte == 1:
self._pack_int(begin)
elif self.version_byte == 2:
self._pack_int64(begin)
def _pack_int(self, value):
self.fp.write(array(value, '>i').tostring())
_pack_int32 = _pack_int
def _unpack_int(self):
return int(frombuffer(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int
def _pack_int64(self, value):
self.fp.write(array(value, '>q').tostring())
def _unpack_int64(self):
return frombuffer(self.fp.read(8), '>q')[0]
def _pack_string(self, s):
count = len(s)
self._pack_int(count)
self.fp.write(asbytes(s))
self.fp.write(b'\x00' * (-count % 4)) # pad
def _unpack_string(self):
count = self._unpack_int()
s = self.fp.read(count).rstrip(b'\x00')
self.fp.read(-count % 4) # read padding
return s
class netcdf_variable(object):
"""
A data object for the `netcdf` module.
`netcdf_variable` objects are constructed by calling the method
`netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
objects behave much like array objects defined in numpy, except that their
data resides in a file. Data is read by indexing and written by assigning
to an indexed subset; the entire array can be accessed by the index ``[:]``
or (for scalars) by using the methods `getValue` and `assignValue`.
`netcdf_variable` objects also have attribute `shape` with the same meaning
as for arrays, but the shape cannot be modified. There is another read-only
attribute `dimensions`, whose value is the tuple of dimension names.
All other attributes correspond to variable attributes defined in
the NetCDF file. Variable attributes are created by assigning to an
attribute of the `netcdf_variable` object.
Parameters
----------
data : array_like
The data array that holds the values for the variable.
Typically, this is initialized as empty, but with the proper shape.
typecode : dtype character code
Desired data-type for the data array.
size : int
Desired element size for the data array.
shape : sequence of ints
The shape of the array. This should match the lengths of the
variable's dimensions.
dimensions : sequence of strings
The names of the dimensions used by the variable. Must be in the
same order of the dimension lengths given by `shape`.
attributes : dict, optional
Attribute values (any type) keyed by string names. These attributes
become attributes for the netcdf_variable object.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Attributes
----------
dimensions : list of str
List of names of dimensions used by the variable object.
isrec, shape
Properties
See also
--------
isrec, shape
"""
def __init__(self, data, typecode, size, shape, dimensions,
attributes=None,
maskandscale=False):
self.data = data
self._typecode = typecode
self._size = size
self._shape = shape
self.dimensions = dimensions
self.maskandscale = maskandscale
self._attributes = attributes or OrderedDict()
for k, v in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
"""Returns whether the variable has a record dimension or not.
A record dimension is a dimension along which additional data could be
easily appended in the netcdf data structure without much rewriting of
the data file. This attribute is a read-only property of the
`netcdf_variable`.
"""
return bool(self.data.shape) and not self._shape[0]
isrec = property(isrec)
def shape(self):
"""Returns the shape tuple of the data variable.
This is a read-only attribute and can not be modified in the
same manner of other numpy arrays.
"""
return self.data.shape
shape = property(shape)
def getValue(self):
"""
Retrieve a scalar value from a `netcdf_variable` of length one.
Raises
------
ValueError
If the netcdf variable is an array of length greater than one,
this exception will be raised.
"""
return self.data.item()
def assignValue(self, value):
"""
Assign a scalar value to a `netcdf_variable` of length one.
Parameters
----------
value : scalar
Scalar value (of compatible type) to assign to a length-one netcdf
variable. This value will be written to file.
Raises
------
ValueError
If the input is not a scalar, or if the destination is not a length-one
netcdf variable.
"""
if not self.data.flags.writeable:
# Work-around for a bug in NumPy. Calling itemset() on a read-only
# memory-mapped array causes a seg. fault.
# See NumPy ticket #1622, and SciPy ticket #1202.
# This check for `writeable` can be removed when the oldest version
# of numpy still supported by scipy contains the fix for #1622.
raise RuntimeError("variable is not writeable")
self.data.itemset(value)
def typecode(self):
"""
Return the typecode of the variable.
Returns
-------
typecode : char
The character typecode of the variable (eg, 'i' for int).
"""
return self._typecode
def itemsize(self):
"""
Return the itemsize of the variable.
Returns
-------
itemsize : int
The element size of the variable (eg, 8 for float64).
"""
return self._size
def __getitem__(self, index):
if not self.maskandscale:
return self.data[index]
data = self.data[index].copy()
missing_value = self._get_missing_value()
data = self._apply_missing_value(data, missing_value)
scale_factor = self._attributes.get('scale_factor')
add_offset = self._attributes.get('add_offset')
if add_offset is not None or scale_factor is not None:
data = data.astype(np.float64)
if scale_factor is not None:
data = data * scale_factor
if add_offset is not None:
data += add_offset
return data
def __setitem__(self, index, data):
if self.maskandscale:
missing_value = (
self._get_missing_value() or
getattr(data, 'fill_value', 999999))
self._attributes.setdefault('missing_value', missing_value)
self._attributes.setdefault('_FillValue', missing_value)
data = ((data - self._attributes.get('add_offset', 0.0)) /
self._attributes.get('scale_factor', 1.0))
data = np.ma.asarray(data).filled(missing_value)
if self._typecode not in 'fd' and data.dtype.kind == 'f':
data = np.round(data)
# Expand data for record vars?
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = (rec_index.start or 0) + len(data)
else:
recs = rec_index + 1
if recs > len(self.data):
shape = (recs,) + self._shape[1:]
# Resize in-place does not always work since
# the array might not be single-segment
try:
self.data.resize(shape)
except ValueError:
self.__dict__['data'] = np.resize(self.data, shape).astype(self.data.dtype)
self.data[index] = data
def _default_encoded_fill_value(self):
"""
The default encoded fill-value for this Variable's data type.
"""
nc_type = REVERSE[self.typecode(), self.itemsize()]
return FILLMAP[nc_type]
def _get_encoded_fill_value(self):
"""
Returns the encoded fill value for this variable as bytes.
This is taken from either the _FillValue attribute, or the default fill
value for this variable's data type.
"""
if '_FillValue' in self._attributes:
fill_value = np.array(self._attributes['_FillValue'],
dtype=self.data.dtype).tostring()
if len(fill_value) == self.itemsize():
return fill_value
else:
return self._default_encoded_fill_value()
else:
return self._default_encoded_fill_value()
def _get_missing_value(self):
"""
Returns the value denoting "no data" for this variable.
If this variable does not have a missing/fill value, returns None.
If both _FillValue and missing_value are given, give precedence to
_FillValue. The netCDF standard gives special meaning to _FillValue;
missing_value is just used for compatibility with old datasets.
"""
if '_FillValue' in self._attributes:
missing_value = self._attributes['_FillValue']
elif 'missing_value' in self._attributes:
missing_value = self._attributes['missing_value']
else:
missing_value = None
return missing_value
@staticmethod
def _apply_missing_value(data, missing_value):
"""
Applies the given missing value to the data array.
Returns a numpy.ma array, with any value equal to missing_value masked
out (unless missing_value is None, in which case the original array is
returned).
"""
if missing_value is None:
newdata = data
else:
try:
missing_value_isnan = np.isnan(missing_value)
except (TypeError, NotImplementedError):
# some data types (e.g., characters) cannot be tested for NaN
missing_value_isnan = False
if missing_value_isnan:
mymask = np.isnan(data)
else:
mymask = (data == missing_value)
newdata = np.ma.masked_where(mymask, data)
return newdata
NetCDFFile = netcdf_file
NetCDFVariable = netcdf_variable
|
gfyoung/scipy
|
scipy/io/netcdf.py
|
Python
|
bsd-3-clause
| 39,528
|
[
"NetCDF"
] |
626167d2c971a30cba626ac1e2caa8f616b8f957459a7d798a5c1f2b3465d019
|
# zh_TW.po
val = {" days." : " 天。",
"(all)" : "",
"(any)" : "",
"(anyone)" : "",
"(available)" : "",
"(blank)" : "",
"(both)" : "",
"(everyone)" : "",
"(master user, not editable)" : "",
"(no change)" : "",
"(no deduction)" : "",
"(none)" : "",
"(unknown)" : "",
"(use system)" : "",
"({0} given, {1} remaining)" : "",
"1 treatment" : "",
"1 week" : "",
"1 year" : "一年",
"2 weeks" : "",
"3 months" : "三個月",
"4 weeks" : "",
"5 Year" : "五年",
"6 months" : "六個月",
"6 weeks" : "",
"8 weeks" : "",
"9 months" : "九個月",
"A (Stray Dog)" : "A (流浪狗)",
"A description or other information about the animal" : "",
"A list of areas this person will homecheck - eg: S60 S61" : "",
"A movement must have a reservation date or type." : "",
"A person is required for this movement type." : "",
"A publish job is already running." : "",
"A short version of the reference number" : "",
"A task is already running." : "",
"A unique number to identify this movement" : "",
"A unique reference for this litter" : "",
"A4" : "",
"ACO" : "",
"AM" : "",
"ASM" : "",
"ASM 3 is compatible with your iPad and other tablets." : "",
"ASM News" : "",
"ASM can track detailed monthly and annual figures for your shelter. Install the Monthly Figures and Annual Figures reports from Settings-Reports-Browse sheltermanager.com" : "",
"ASM comes with a dictionary of 4,000 animal names. Just click the generate random name button when adding an animal." : "",
"ASM will remove this animal from the waiting list after a set number of weeks since the last owner contact date." : "",
"Abandoned" : "",
"Abuse" : "",
"Abyssinian" : "",
"Access System Menu" : "",
"Account" : "",
"Account Types" : "",
"Account code '{0}' has already been used." : "",
"Account code '{0}' is not valid." : "",
"Account code cannot be blank." : "",
"Account disabled." : "",
"Accountant" : "",
"Accounts" : "",
"Accounts need a code." : "",
"Active" : "",
"Active Incidents" : "",
"Active Trap Loans" : "",
"Active users: {0}" : "",
"Add" : "",
"Add Accounts" : "",
"Add Animal" : "",
"Add Animals" : "",
"Add Appointment" : "",
"Add Call" : "",
"Add Citations" : "",
"Add Clinic Appointment" : "",
"Add Cost" : "",
"Add Diary" : "",
"Add Diets" : "",
"Add Document to Repository" : "",
"Add Flag" : "",
"Add Found Animal" : "",
"Add Incidents" : "",
"Add Investigation" : "",
"Add Invoice Item" : "",
"Add Licenses" : "",
"Add Litter" : "",
"Add Log" : "",
"Add Log to Animal" : "",
"Add Lost Animal" : "",
"Add Media" : "",
"Add Medical Records" : "",
"Add Message" : "",
"Add Movement" : "",
"Add Payments" : "",
"Add Person" : "",
"Add Report" : "",
"Add Rota" : "",
"Add Stock" : "",
"Add Tests" : "",
"Add Transport" : "",
"Add Trap Loans" : "",
"Add Users" : "",
"Add Vaccinations" : "",
"Add Vouchers" : "",
"Add Waiting List" : "",
"Add a diary note" : "",
"Add a found animal" : "",
"Add a log entry" : "",
"Add a lost animal" : "",
"Add a medical regimen" : "",
"Add a new animal" : "",
"Add a new log" : "",
"Add a new person" : "",
"Add a person" : "",
"Add a photo" : "",
"Add a test" : "",
"Add a vaccination" : "",
"Add account" : "",
"Add additional field" : "",
"Add an animal to the waiting list" : "",
"Add citation" : "",
"Add cost" : "",
"Add details of this email to the log after sending" : "",
"Add diary" : "",
"Add diary task" : "",
"Add diet" : "",
"Add extra images for use in reports and documents" : "",
"Add form field" : "",
"Add found animal" : "",
"Add investigation" : "",
"Add license" : "",
"Add litter" : "",
"Add log" : "",
"Add lost animal" : "",
"Add medical profile" : "",
"Add medical regimen" : "",
"Add message" : "",
"Add movement" : "",
"Add online form" : "",
"Add payment" : "",
"Add person" : "",
"Add report" : "",
"Add role" : "",
"Add rota item" : "",
"Add stock" : "",
"Add template" : "",
"Add test" : "",
"Add this text to all animal descriptions" : "",
"Add to log" : "",
"Add transport" : "",
"Add trap loan" : "",
"Add user" : "",
"Add vaccination" : "",
"Add voucher" : "",
"Add waiting list" : "",
"Add {0}" : "",
"Added" : "",
"Added by {0} on {1}" : "",
"Additional" : "",
"Additional Fields" : "",
"Additional date field '{0}' contains an invalid date." : "",
"Additional fields" : "",
"Additional fields need a name, label and type." : "",
"Address" : "地址",
"Address Contains" : "",
"Address contains" : "",
"Administered" : "",
"Administering Vet" : "",
"Adopt" : "",
"Adopt an animal" : "",
"Adoptable" : "",
"Adoptable Animal" : "",
"Adoptable and published for the first time" : "",
"Adopted" : "",
"Adopted Animals" : "",
"Adopted Transferred In {0}" : "",
"Adoption" : "",
"Adoption Coordinator" : "",
"Adoption Coordinator and Fosterer" : "",
"Adoption Event" : "",
"Adoption Fee" : "",
"Adoption Number" : "",
"Adoption fee donations" : "",
"Adoption movements must have a valid adoption date." : "",
"Adoption successfully created." : "",
"Adoptions {0}" : "",
"Adult" : "",
"Advanced" : "",
"Advanced find animal screen defaults to on shelter" : "",
"Affenpinscher" : "",
"Afghan Hound" : "阿富汗獵犬",
"African Grey" : "非洲灰",
"After the user presses submit and ASM has accepted the form, redirect the user to this URL" : "",
"Age" : "",
"Age Group" : "",
"Age Group 1" : "",
"Age Group 2" : "",
"Age Group 3" : "",
"Age Group 4" : "",
"Age Group 5" : "",
"Age Group 6" : "",
"Age Group 7" : "",
"Age Group 8" : "",
"Age Groups" : "",
"Age groups are assigned based on the age of an animal. The figure in the left column is the upper limit in years for that group." : "",
"Aged Between" : "",
"Aged From" : "",
"Aged To" : "",
"Aggression" : "",
"Airedale Terrier" : "萬能梗",
"Akbash" : "",
"Akita" : "秋田",
"Alaskan Malamute" : "阿拉斯加雪橇犬",
"Alerts" : "",
"All Animals" : "",
"All On-Shelter Animals" : "",
"All Publishers" : "",
"All accounts" : "",
"All animal care officers on file." : "",
"All animal shelters on file." : "",
"All animals matching current publishing options." : "",
"All animals on the shelter." : "",
"All animals where the hold ends today." : "",
"All animals who are currently held in case of reclaim." : "",
"All animals who are currently quarantined." : "",
"All animals who are flagged as not for adoption." : "",
"All animals who have been on the shelter longer than {0} months." : "",
"All animals who have not been microchipped" : "",
"All banned owners on file." : "",
"All diary notes" : "",
"All donors on file." : "",
"All drivers on file." : "",
"All existing data in your database will be REMOVED before importing the CSV file. This removal cannot be reversed." : "",
"All fields should be completed." : "",
"All fosterers on file." : "",
"All homechecked owners on file." : "",
"All homecheckers on file." : "",
"All members on file." : "",
"All notes upto today" : "",
"All people on file." : "",
"All retailers on file." : "",
"All staff on file." : "",
"All time" : "",
"All vets on file." : "",
"All volunteers on file." : "",
"Allergies" : "過敏",
"Allow a fosterer to be selected" : "",
"Allow an adoption coordinator to be selected" : "",
"Allow creation of payments on the Move-Reserve screen" : "",
"Allow drag and drop to move animals between locations" : "",
"Allow duplicate license numbers" : "",
"Allow duplicate microchip numbers" : "",
"Allow overriding of the movement number on the Move menu screens" : "",
"Allow use of OpenOffice document templates" : "",
"Alphabetically A-Z" : "",
"Alphabetically Z-A" : "",
"Already Signed" : "",
"Already fostered to this person." : "",
"Altered" : "",
"Altered Date" : "",
"Altered Dog - 1 year" : "",
"Altered Dog - 3 year" : "",
"Altering Vet" : "",
"Always show an emblem to indicate the current location" : "",
"Amazon" : "亞馬遜",
"Amber" : "",
"American" : "",
"American Bulldog" : "美國鬥牛犬",
"American Curl" : "美國捲曲",
"American Eskimo Dog" : "美國愛斯基摩犬",
"American Fuzzy Lop" : "",
"American Sable" : "",
"American Shorthair" : "",
"American Staffordshire Terrier" : "",
"American Water Spaniel" : "",
"American Wirehair" : "",
"Amount" : "",
"An age in years, eg: 1, 0.5" : "",
"An animal cannot have multiple open movements." : "",
"An optional comma separated list of email addresses to send the output of this report to" : "",
"Anatolian Shepherd" : "",
"Angora Rabbit" : "",
"Animal" : "",
"Animal '{0}' created with code {1}" : "",
"Animal '{0}' successfully marked deceased." : "",
"Animal (optional)" : "",
"Animal (via animalname field)" : "",
"Animal - Additional" : "",
"Animal - Death" : "",
"Animal - Details" : "",
"Animal - Entry" : "",
"Animal - Health and Identification" : "",
"Animal - Notes" : "",
"Animal Codes" : "",
"Animal Control" : "",
"Animal Control Caller" : "",
"Animal Control Incident" : "",
"Animal Control Officer" : "",
"Animal Control Victim" : "",
"Animal Emblems" : "",
"Animal Flags" : "",
"Animal Links" : "",
"Animal Name" : "",
"Animal Selection" : "",
"Animal Shelter Manager" : "",
"Animal Shelter Manager Login" : "",
"Animal Sponsorship" : "",
"Animal Type" : "",
"Animal Types" : "",
"Animal board costs" : "",
"Animal cannot be deceased before it was brought to the shelter" : "",
"Animal code format" : "",
"Animal comments MUST contain this phrase in order to match." : "",
"Animal control calendar" : "",
"Animal control incidents matching '{0}'." : "",
"Animal defecation" : "",
"Animal descriptions" : "",
"Animal destroyed" : "",
"Animal emblems are the little icons that appear next to animal names in shelter view, the home page and search results." : "",
"Animal food costs" : "",
"Animal picked up" : "",
"Animal shortcode format" : "",
"Animals" : "",
"Animals at large" : "",
"Animals left in vehicle" : "",
"Animals matching '{0}'." : "",
"Animals per page" : "",
"Annual" : "",
"Annually" : "",
"Anonymize" : "",
"Anonymize personal data after this many years" : "",
"Any animal types, species, breeds, colors, locations, etc. in the CSV file that aren't already in the database will be created during the import." : "",
"Any health problems the animal has" : "",
"Any information about the animal" : "",
"Any markings or distinguishing features the animal has" : "",
"Appaloosa" : "",
"Appenzell Mountain Dog" : "",
"Applehead Siamese" : "",
"Appointment" : "",
"Appointment date must be a valid date" : "",
"Appointment {0}. {1} on {2} for {3}" : "",
"Appointments need a date and time." : "",
"Approved" : "",
"Apr" : "",
"April" : "",
"Arabian" : "",
"Area" : "",
"Area Found" : "",
"Area Lost" : "",
"Area Postcode" : "",
"Area where the animal was found" : "",
"Area where the animal was lost" : "",
"Areas" : "",
"Arrived" : "",
"Asset" : "",
"Asset::Premises" : "",
"At least the last name should be completed." : "",
"Attach" : "",
"Attach File" : "",
"Attach Link" : "",
"Attach a file" : "",
"Attach a link to a web resource" : "",
"Attach link" : "",
"Audit Trail" : "",
"Aug" : "",
"August" : "",
"Australian Cattle Dog/Blue Heeler" : "",
"Australian Kelpie" : "",
"Australian Shepherd" : "",
"Australian Terrier" : "",
"Auto log users out after this many minutes of inactivity" : "",
"Auto removed due to lack of owner contact." : "",
"Automatically cancel any outstanding reservations on an animal when it is adopted" : "",
"Automatically remove" : "",
"Automatically return any outstanding foster movements on an animal when it is adopted" : "",
"Automatically return any outstanding foster movements on an animal when it is transferred" : "",
"Available for adoption" : "",
"Available sheltermanager.com reports" : "",
"B (Boarding Animal)" : "",
"Baby" : "",
"Balance" : "",
"Balinese" : "",
"Bank" : "",
"Bank account interest" : "",
"Bank current account" : "",
"Bank deposit account" : "",
"Bank savings account" : "",
"Bank::Current" : "",
"Bank::Deposit" : "",
"Bank::Savings" : "",
"Banned" : "",
"Base Color" : "",
"Basenji" : "",
"Basset Hound" : "",
"Batch" : "",
"Batch Number" : "",
"Beagle" : "",
"Bearded Collie" : "",
"Beauceron" : "",
"Bedlington Terrier" : "",
"Beginning of month" : "",
"Belgian Hare" : "",
"Belgian Shepherd Dog Sheepdog" : "",
"Belgian Shepherd Laekenois" : "",
"Belgian Shepherd Malinois" : "",
"Belgian Shepherd Tervuren" : "",
"Bengal" : "",
"Bernese Mountain Dog" : "",
"Beveren" : "",
"Bichon Frise" : "",
"Bird" : "",
"Birman" : "",
"Bite" : "",
"Biting" : "",
"Black" : "",
"Black Labrador Retriever" : "",
"Black Mouth Cur" : "",
"Black Tortie" : "",
"Black and Brindle" : "",
"Black and Brown" : "",
"Black and Tan" : "",
"Black and Tan Coonhound" : "",
"Black and White" : "",
"Bloodhound" : "",
"Blue" : "",
"Blue Tortie" : "",
"Bluetick Coonhound" : "",
"Board and Food" : "",
"Boarding" : "",
"Boarding Cost" : "",
"Boarding cost type" : "",
"Bobtail" : "",
"Body" : "",
"Bombay" : "",
"Bonded" : "",
"Bonded With" : "",
"Books" : "",
"Border Collie" : "",
"Border Terrier" : "",
"Bordetella" : "",
"Born in Shelter" : "",
"Born on Foster {0}" : "",
"Born on Shelter {0}" : "",
"Borzoi" : "",
"Boston Terrier" : "",
"Both" : "",
"Bouvier des Flanders" : "",
"Boxer" : "",
"Boykin Spaniel" : "",
"Breed" : "",
"Breed to use when publishing to third party services and adoption sites" : "",
"Breeds" : "",
"Briard" : "",
"Brindle" : "",
"Brindle and Black" : "",
"Brindle and White" : "",
"Britannia Petite" : "",
"British Shorthair" : "",
"Brittany Spaniel" : "",
"Brotogeris" : "",
"Brought In" : "",
"Brought In By" : "",
"Brown" : "",
"Brown and Black" : "",
"Brown and White" : "",
"Browse sheltermanager.com" : "",
"Browse sheltermanager.com and install some reports, charts and mail merges into your new system." : "",
"Brussels Griffon" : "",
"Budgie/Budgerigar" : "",
"Bulk Complete Diary" : "",
"Bulk Complete Medical Records" : "",
"Bulk Complete Vaccinations" : "",
"Bulk Complete Waiting List" : "",
"Bulk Regimen" : "",
"Bulk Test" : "",
"Bulk Transport" : "",
"Bulk Vaccination" : "",
"Bulk change animals" : "",
"Bull Terrier" : "",
"Bullmastiff" : "",
"Bunny Rabbit" : "",
"Burmese" : "",
"Burmilla" : "",
"By" : "",
"CC" : "",
"CSV of animal/adopter data" : "",
"CSV of animal/medical data" : "",
"CSV of incident data" : "",
"CSV of license data" : "",
"CSV of payment data" : "",
"CSV of person data" : "",
"Caique" : "",
"Cairn Terrier" : "",
"Calendar View" : "",
"Calendar view" : "",
"Calico" : "",
"Californian" : "",
"Call" : "",
"Call Date/Time" : "",
"Caller" : "",
"Caller Name" : "",
"Caller Phone" : "",
"Camel" : "",
"Can Login" : "",
"Can afford donation?" : "",
"Can't reserve an animal that has an active movement." : "",
"Canaan Dog" : "",
"Canadian Hairless" : "",
"Canary" : "",
"Cancel" : "",
"Cancel holds on animals this many days after the brought in date, or 0 to never cancel" : "",
"Cancel unadopted reservations after" : "",
"Cancel unadopted reservations after this many days, or 0 to never cancel" : "",
"Cancelled" : "",
"Cancelled Reservation" : "",
"Cane Corso Mastiff" : "",
"Carolina Dog" : "",
"Cash" : "",
"Cat" : "",
"Catahoula Leopard Dog" : "",
"Category" : "",
"Cats" : "",
"Cattery" : "",
"Cattle Dog" : "",
"Cavalier King Charles Spaniel" : "",
"Cell" : "",
"Cell Phone" : "",
"Champagne D'Argent" : "",
"Change" : "",
"Change Accounts" : "",
"Change Animals" : "",
"Change Citations" : "",
"Change Clinic Apointment" : "",
"Change Cost" : "",
"Change Date Required" : "",
"Change Diets" : "",
"Change Found Animal" : "",
"Change Incidents" : "",
"Change Investigation" : "",
"Change Licenses" : "",
"Change Litter" : "",
"Change Log" : "",
"Change Lost Animal" : "",
"Change Media" : "",
"Change Medical Records" : "",
"Change Movement" : "",
"Change Password" : "",
"Change Payments" : "",
"Change Person" : "",
"Change Publishing Options" : "",
"Change Report" : "",
"Change Rota" : "",
"Change Stock" : "",
"Change System Options" : "",
"Change Tests" : "",
"Change Transactions" : "",
"Change Transport" : "",
"Change Trap Loans" : "",
"Change User Settings" : "",
"Change Vaccinations" : "",
"Change Vouchers" : "",
"Change Waiting List" : "",
"Change date required on selected treatments" : "",
"Changed Mind" : "",
"Chart" : "",
"Chart (Bar)" : "",
"Chart (Line)" : "",
"Chart (Pie)" : "",
"Chart (Point)" : "",
"Chart (Steps)" : "",
"Chartreux" : "",
"Check" : "",
"Check License" : "",
"Check No" : "",
"Checkbox" : "",
"Checked By" : "",
"Checkered Giant" : "",
"Cheque" : "",
"Chesapeake Bay Retriever" : "",
"Chicken" : "",
"Chihuahua" : "",
"Children" : "",
"Chinchilla" : "",
"Chinese Crested Dog" : "",
"Chinese Foo Dog" : "",
"Chlamydophila" : "",
"Chocolate" : "",
"Chocolate Labrador Retriever" : "",
"Chocolate Tortie" : "",
"Chow Chow" : "",
"Cinnamon" : "",
"Cinnamon Tortoiseshell" : "",
"Citation Type" : "",
"Citation Types" : "",
"Citations" : "",
"City" : "",
"City contains" : "",
"Class" : "",
"Clear" : "",
"Clear and sign again" : "",
"Clear tables before importing" : "",
"Clinic" : "",
"Clinic Calendar" : "",
"Clinic Invoice - {0}" : "",
"Clinic Statuses" : "",
"Clone" : "",
"Clone Animals" : "",
"Clone Rota" : "",
"Clone the rota this week to another week" : "",
"Cloning..." : "",
"Close" : "",
"Clumber Spaniel" : "",
"Clydesdale" : "",
"Coat" : "",
"Coat Type" : "",
"Coat Types" : "",
"Cockapoo" : "",
"Cockatiel" : "",
"Cockatoo" : "",
"Cocker Spaniel" : "",
"Code" : "",
"Code contains" : "",
"Code format tokens:" : "",
"Collie" : "",
"Color" : "",
"Color to use when publishing to third party services and adoption sites" : "",
"Colors" : "",
"Columns" : "",
"Columns displayed" : "",
"Comma separated list of units for this location, eg: 1,2,3,4,Isolation,Pen 5" : "",
"Comments" : "",
"Comments Contain" : "",
"Comments contain" : "",
"Comments copied to web preferred media." : "",
"Complaint" : "",
"Complete" : "",
"Complete Tasks" : "",
"Completed" : "",
"Completed Between" : "",
"Completed Type" : "",
"Completed notes upto today" : "",
"Completion Date" : "",
"Completion Type" : "",
"Configuration" : "",
"Confirm" : "",
"Confirm Password" : "",
"Confirmation message" : "",
"Confirmed" : "",
"Consulting Room" : "",
"Consulting Room - {0}" : "",
"Consumed" : "",
"Contact" : "",
"Contact Contains" : "",
"Conure" : "",
"Convert this reservation to an adoption" : "",
"Coonhound" : "",
"Copy animal comments to the notes field of the web preferred media for this animal" : "",
"Copy from animal comments" : "",
"Copy of {0}" : "",
"Corded" : "",
"Corgi" : "",
"Cornish Rex" : "",
"Cost" : "",
"Cost For" : "",
"Cost Type" : "",
"Cost Types" : "",
"Cost date must be a valid date" : "",
"Cost record" : "",
"Costs" : "",
"Costs need a date and amount." : "",
"Coton de Tulear" : "",
"Could not find animal with name '{0}'" : "",
"Country" : "",
"Courtesy Listing" : "",
"Cow" : "",
"Cream" : "",
"Create" : "",
"Create Animal" : "",
"Create Log" : "",
"Create Payment" : "",
"Create Waiting List" : "",
"Create a cost record" : "",
"Create a due or received payment record from this appointment" : "",
"Create a new animal by copying this one" : "",
"Create a new animal from this found animal record" : "",
"Create a new animal from this incident" : "",
"Create a new animal from this waiting list entry" : "",
"Create a new document" : "",
"Create a new template" : "",
"Create a new template by copying the selected template" : "",
"Create a new waiting list entry from this found animal record" : "",
"Create and edit" : "",
"Create boarding cost record when animal is adopted" : "",
"Create diary notes from a task" : "",
"Create missing lookup values" : "",
"Create note this many days from today, or 9999 to ask" : "",
"Create this message" : "",
"Create this person" : "",
"Created By" : "",
"Creating cost and cost types creates matching accounts and transactions" : "",
"Creating payments and payments types creates matching accounts and transactions" : "",
"Creating..." : "",
"Credit Card" : "",
"Creme D'Argent" : "",
"Criteria:" : "",
"Crossbreed" : "",
"Cruelty Case" : "",
"Culling" : "",
"Curly" : "",
"Current" : "",
"Current Vet" : "",
"Cymric" : "",
"D (Dog)" : "",
"DD = current day" : "",
"DDL dump (DB2)" : "",
"DDL dump (MySQL)" : "",
"DDL dump (PostgreSQL)" : "",
"DHLPP" : "",
"DO NOT use this field to store notes about what the person is looking for." : "",
"DOA {0}" : "",
"DOB" : "",
"Dachshund" : "",
"Daily Boarding Cost" : "",
"Dalmatian" : "",
"Dandi Dinmont Terrier" : "",
"Data" : "",
"Data Protection" : "",
"Database" : "",
"Date" : "",
"Date '{0}' is not valid." : "",
"Date Brought In" : "",
"Date Found" : "",
"Date Lost" : "",
"Date Of Birth" : "",
"Date Put On" : "",
"Date Removed" : "",
"Date Reported" : "",
"Date and notes are mandatory." : "",
"Date brought in cannot be blank" : "",
"Date brought in cannot be in the future." : "",
"Date brought in is not valid" : "",
"Date found cannot be blank" : "",
"Date found cannot be blank." : "",
"Date lost cannot be blank" : "",
"Date lost cannot be blank." : "",
"Date of Birth" : "",
"Date of birth cannot be blank" : "",
"Date of birth cannot be in the future." : "",
"Date of birth is not valid" : "",
"Date of last owner contact" : "",
"Date put on" : "",
"Date put on cannot be blank" : "",
"Date put on list" : "",
"Date removed" : "",
"Date reported cannot be blank" : "",
"Date reported cannot be blank." : "",
"Date/Time" : "",
"Day" : "",
"Day Pivot" : "",
"Days On Shelter" : "",
"Dead On Arrival" : "",
"Dead animal" : "",
"Dead on arrival" : "",
"Death" : "",
"Death Comments" : "",
"Death Reason" : "",
"Death Reasons" : "",
"Debit Card" : "",
"Dec" : "",
"Deceased" : "",
"Deceased Date" : "",
"December" : "",
"Declawed" : "",
"Declined" : "",
"Default Breed" : "",
"Default Brought In By" : "",
"Default Coat Type" : "",
"Default Color" : "",
"Default Cost" : "",
"Default Death Reason" : "",
"Default Diary Person" : "",
"Default Entry Reason" : "",
"Default Incident Type" : "",
"Default Location" : "",
"Default Log Filter" : "",
"Default Log Type" : "",
"Default Payment Method" : "",
"Default Payment Type" : "",
"Default Reservation Status" : "",
"Default Return Reason" : "",
"Default Rota Shift" : "",
"Default Size" : "",
"Default Species" : "",
"Default Test Type" : "",
"Default Type" : "",
"Default Vaccination Type" : "",
"Default Value" : "",
"Default daily boarding cost" : "",
"Default destination account for payments" : "",
"Default image for documents" : "",
"Default image for this record and the web" : "",
"Default source account for costs" : "",
"Default to advanced find animal screen" : "",
"Default to advanced find person screen" : "",
"Default transaction view" : "",
"Default urgency" : "",
"Default video for publishing" : "",
"Default view" : "",
"Defaults" : "",
"Defaults formats for code and shortcode are TYYYYNNN and NNT" : "",
"Delete" : "",
"Delete Accounts" : "",
"Delete Animals" : "",
"Delete Citations" : "",
"Delete Clinic Appointment" : "",
"Delete Cost" : "",
"Delete Diary" : "",
"Delete Diets" : "",
"Delete Document from Repository" : "",
"Delete Found Animal" : "",
"Delete Incidents" : "",
"Delete Incoming Forms" : "",
"Delete Investigation" : "",
"Delete Licenses" : "",
"Delete Litter" : "",
"Delete Log" : "",
"Delete Lost Animal" : "",
"Delete Media" : "",
"Delete Medical Records" : "",
"Delete Movement" : "",
"Delete Payments" : "",
"Delete Person" : "",
"Delete Regimen" : "",
"Delete Report" : "",
"Delete Rota" : "",
"Delete Stock" : "",
"Delete Tests" : "",
"Delete Transport" : "",
"Delete Trap Loans" : "",
"Delete Treatments" : "",
"Delete Vaccinations" : "",
"Delete Vouchers" : "",
"Delete Waiting List" : "",
"Delete all rota entries for this week" : "",
"Delete this animal" : "",
"Delete this incident" : "",
"Delete this person" : "",
"Delete this record" : "",
"Delete this waiting list entry" : "",
"Denied" : "",
"Deposit" : "",
"Deposit Account" : "",
"Deposit Returned" : "",
"Description" : "",
"Description Contains" : "",
"Description cannot be blank" : "",
"Deselect" : "",
"Details" : "",
"Devon Rex" : "",
"Dialog title" : "",
"Diary" : "",
"Diary Task" : "",
"Diary Task: {0}" : "",
"Diary Tasks" : "",
"Diary and Messages" : "",
"Diary calendar" : "",
"Diary date cannot be blank" : "",
"Diary date is not valid" : "",
"Diary for {0}" : "",
"Diary note cannot be blank" : "",
"Diary note {0} marked completed" : "",
"Diary note {0} rediarised for {1}" : "",
"Diary notes for: {0}" : "",
"Diary notes need a date and subject." : "",
"Diary subject cannot be blank" : "",
"Diary task items need a pivot, subject and note." : "",
"Diary tasks need a name." : "",
"Did not ask" : "",
"Did you know?" : "",
"Died" : "",
"Died off shelter" : "",
"Died {0}" : "",
"Diet" : "",
"Diets" : "",
"Diets need a start date." : "",
"Dispatch" : "",
"Dispatch Address" : "",
"Dispatch Between" : "",
"Dispatch Date/Time" : "",
"Dispatch {0}: {1}" : "",
"Dispatched ACO" : "",
"Display" : "",
"Display Index" : "",
"Display a search button at the right side of the search box" : "",
"Distemper" : "",
"Do Not Publish" : "",
"Do Not Register Microchip" : "",
"Do not show" : "",
"Doberman Pinscher" : "",
"Document" : "",
"Document Link" : "",
"Document Repository" : "",
"Document Templates" : "",
"Document file" : "",
"Document signed" : "",
"Document signing request" : "",
"Document templates" : "",
"Documents" : "",
"Dog" : "",
"Dogo Argentino" : "",
"Dogs" : "",
"Dogue de Bordeaux" : "",
"Domestic Long Hair" : "",
"Domestic Medium Hair" : "",
"Domestic Short Hair" : "",
"Don't create a cost record" : "",
"Don't scale" : "",
"Donated" : "",
"Donation" : "",
"Donation?" : "",
"Donations for animals entering the shelter" : "",
"Done" : "",
"Donkey" : "",
"Donkey/Mule" : "",
"Donor" : "",
"Dosage" : "",
"Dove" : "",
"Download" : "",
"Draft" : "",
"Driver" : "",
"Drop files here..." : "",
"Dropoff" : "",
"Duck" : "",
"Due" : "",
"Due in next month" : "",
"Due in next week" : "",
"Due in next year" : "",
"Due today" : "",
"Duration" : "",
"Dutch" : "",
"Dutch Shepherd" : "",
"Dwarf" : "",
"Dwarf Eared" : "",
"E = first letter of animal entry category" : "",
"EE = first and second letter of animal entry category" : "",
"Eclectus" : "",
"Edit" : "",
"Edit All Diary Notes" : "",
"Edit Appointment" : "",
"Edit Diary Tasks" : "",
"Edit HTML publishing templates" : "",
"Edit Header/Footer" : "",
"Edit Invoice Item" : "",
"Edit Lookups" : "",
"Edit My Diary Notes" : "",
"Edit Online Forms" : "",
"Edit Reports" : "",
"Edit Roles" : "",
"Edit Users" : "",
"Edit account" : "",
"Edit additional field" : "",
"Edit citation" : "",
"Edit cost" : "",
"Edit diary" : "",
"Edit diary notes" : "",
"Edit diary task" : "",
"Edit diary tasks" : "",
"Edit diet" : "",
"Edit document" : "",
"Edit form field" : "",
"Edit investigation" : "",
"Edit invoice" : "",
"Edit license" : "",
"Edit litter" : "",
"Edit litters" : "",
"Edit log" : "",
"Edit media notes" : "",
"Edit medical profile" : "",
"Edit medical regimen" : "",
"Edit movement" : "",
"Edit my diary notes" : "",
"Edit my diary notes" : "",
"Edit notes" : "",
"Edit online form" : "",
"Edit online form HTML header/footer" : "",
"Edit payment" : "",
"Edit report" : "",
"Edit report template HTML header/footer" : "",
"Edit role" : "",
"Edit roles" : "",
"Edit rota item" : "",
"Edit stock" : "",
"Edit system users" : "",
"Edit template" : "",
"Edit test" : "",
"Edit the current waiting list" : "",
"Edit transaction" : "",
"Edit transport" : "",
"Edit trap loan" : "",
"Edit user" : "",
"Edit vaccination" : "",
"Edit voucher" : "",
"Edit {0}" : "",
"Egyptian Mau" : "",
"Electricity Bills" : "",
"Email" : "",
"Email Address" : "",
"Email PDF" : "",
"Email Person" : "",
"Email To" : "",
"Email a copy of the selected HTML documents as PDFs" : "",
"Email a copy of the selected media files" : "",
"Email address" : "",
"Email document for electronic signature" : "",
"Email incident notes to ACO" : "",
"Email incoming form submissions to this comma separated list of email addresses" : "",
"Email media" : "",
"Email person" : "",
"Email signature" : "",
"Email submissions to" : "",
"Email this message to all matching users" : "",
"Email this person" : "",
"Email users their diary notes each day" : "",
"Emu" : "",
"Enable FTP uploading" : "",
"Enable accounts functionality" : "",
"Enable location filters" : "",
"Enable lost and found functionality" : "",
"Enable multiple sites" : "",
"Enable the waiting list functionality" : "",
"Enable visual effects" : "",
"Enabled" : "",
"End Of Day" : "",
"End Time" : "",
"End at" : "",
"End of month" : "",
"End of year" : "",
"Ends" : "",
"Ends after" : "",
"English Bulldog" : "",
"English Cocker Spaniel" : "",
"English Coonhound" : "",
"English Lop" : "",
"English Pointer" : "",
"English Setter" : "",
"English Shepherd" : "",
"English Spot" : "",
"English Springer Spaniel" : "",
"English Toy Spaniel" : "",
"Entered (newest first)" : "",
"Entered (oldest first)" : "",
"Entered From" : "",
"Entered To" : "",
"Entered shelter" : "",
"Entering 'activelost' or 'activefound' in the search box will show you lost and found animals reported in the last 30 days." : "",
"Entering 'deceased' in the search box will show you recently deceased animals." : "",
"Entering 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' or 'members' in the search box will show you those groups of people." : "",
"Entering 'notforadoption' in the search box will show you all shelter animals with the not for adoption flag set." : "",
"Entering 'os' in the search box will show you all shelter animals." : "",
"Entlebucher" : "",
"Entry" : "",
"Entry Category" : "",
"Entry Donation" : "",
"Entry Reason" : "",
"Entry Reason Category" : "",
"Entry Reasons" : "",
"Entry reason" : "",
"Error contacting server." : "",
"Escaped" : "",
"Escaped {0}" : "",
"Eskimo Dog" : "",
"Estimate" : "",
"Euthanized" : "",
"Euthanized {0}" : "",
"Every day" : "",
"Exclude animals who are aged under" : "",
"Exclude from bulk email" : "",
"Exclude new animal photos from publishing" : "",
"Exclude this image when publishing" : "",
"Execute" : "",
"Execute Script" : "",
"Execute the SQL in the box below" : "",
"Executing Task" : "",
"Executing..." : "",
"Exotic Shorthair" : "",
"Expense" : "",
"Expense::" : "",
"Expenses::Board" : "",
"Expenses::Electricity" : "",
"Expenses::Food" : "",
"Expenses::Gas" : "",
"Expenses::Phone" : "",
"Expenses::Postage" : "",
"Expenses::Stationary" : "",
"Expenses::Water" : "",
"Expire in next month" : "",
"Expired" : "",
"Expired in the last month" : "",
"Expired in the last week" : "",
"Expires" : "",
"Expiry" : "",
"Expiry date" : "",
"Export" : "",
"Export Animals as CSV" : "",
"Export Report" : "",
"Export Reports as CSV" : "",
"Export a CSV file of animal records that ASM can import into another database." : "",
"Export this database in various formats" : "",
"Exporting the complete database can take some time and generate a very large file, are you sure?" : "",
"Extra Images" : "",
"Extra images" : "",
"Extra-Toes Cat (Hemingway Polydactyl)" : "",
"F (Feral Cat)" : "",
"FECV/FeCoV" : "",
"FIPV" : "",
"FIV" : "",
"FIV Result" : "",
"FIV+" : "",
"FIV/L Test Date" : "",
"FIV/L Tested" : "",
"FLV" : "",
"FLV Result" : "",
"FLV+" : "",
"FTP hostname" : "",
"FTP password" : "",
"FTP username" : "",
"FVRCP" : "",
"Facebook" : "",
"Failed sending email" : "",
"Failed to create payment." : "",
"Failed to renew license." : "",
"Fawn" : "",
"Fawn Tortoiseshell" : "",
"FeLV" : "",
"Features" : "",
"Feb" : "",
"February" : "",
"Fee" : "",
"Female" : "",
"Feral" : "",
"Ferret" : "",
"Field Spaniel" : "",
"Field names should not contain spaces." : "",
"Fila Brasileiro" : "",
"File" : "",
"Filter" : "",
"Financial" : "",
"Finch" : "",
"Find Animal" : "",
"Find Animal/Person" : "",
"Find Found Animal" : "",
"Find Incident" : "",
"Find Lost Animal" : "",
"Find Person" : "",
"Find a found animal" : "",
"Find a lost animal" : "",
"Find aco" : "",
"Find an incident" : "",
"Find animal" : "",
"Find animal columns" : "",
"Find animal control incidents returned {0} results." : "",
"Find animals matching the looking for criteria of this person" : "",
"Find donor" : "",
"Find driver" : "",
"Find fosterer" : "",
"Find found animal returned {0} results." : "",
"Find homechecked" : "",
"Find homechecker" : "",
"Find incident" : "",
"Find lost animal returned {0} results." : "",
"Find member" : "",
"Find person" : "",
"Find person columns" : "",
"Find retailer" : "",
"Find shelter" : "",
"Find staff" : "",
"Find staff/volunteer" : "",
"Find this address on a map" : "",
"Find vet" : "",
"Find volunteer" : "",
"Fine Amount" : "",
"Finnish Lapphund" : "",
"Finnish Spitz" : "",
"First Last" : "",
"First Names" : "",
"First name(s)" : "",
"First offence" : "",
"Fish" : "",
"Flag" : "",
"Flags" : "",
"Flat-coated Retriever" : "",
"Flemish Giant" : "",
"Florida White" : "",
"Followup" : "",
"Followup Between" : "",
"Followup Date/Time" : "",
"Footer" : "",
"For" : "",
"Forbidden" : "",
"Forenames" : "",
"Forget" : "",
"Form URL" : "",
"Forms need a name." : "",
"Foster" : "",
"Foster Book" : "",
"Foster Capacity" : "",
"Foster Transfer" : "",
"Foster an animal" : "",
"Foster book" : "",
"Foster movements must have a valid foster date." : "",
"Foster successfully created." : "",
"Fostered" : "",
"Fostered Animals" : "",
"Fostered to {0} since {1}" : "",
"Fosterer" : "",
"Fosterer (Active Only)" : "",
"Fosterer Medical Report" : "",
"Found" : "",
"Found Animal" : "",
"Found Animal - Additional" : "",
"Found Animal - Details" : "",
"Found Animal Contact" : "",
"Found Animal {0}" : "",
"Found Animal: {0}" : "",
"Found animal - {0} {1} [{2}]" : "",
"Found animal entries matching '{0}'." : "",
"Found animals must have a contact" : "",
"Found animals reported in the last 30 days." : "",
"Found from" : "",
"Found to" : "",
"FoundLost animal entry {0} successfully created." : "",
"Fox Terrier" : "",
"Foxhound" : "",
"Fr" : "",
"French Bulldog" : "",
"French-Lop" : "",
"Frequency" : "",
"Frequently Asked Questions" : "",
"Fri" : "",
"Friday" : "",
"From" : "",
"From Fostering" : "",
"From Other" : "",
"From retailer is only valid on adoption movements." : "",
"Future notes" : "",
"GDPR Contact Opt-In" : "",
"Gaited" : "",
"Gas Bills" : "",
"Gecko" : "",
"General" : "",
"Generate" : "",
"Generate Documents" : "",
"Generate HTML from this SQL" : "",
"Generate Report" : "",
"Generate a document from this animal" : "",
"Generate a document from this incident" : "",
"Generate a document from this movement" : "",
"Generate a document from this person" : "",
"Generate a document from this record" : "",
"Generate a javascript database for the search page" : "",
"Generate a new animal code" : "",
"Generate a random name for this animal" : "",
"Generate document from this appointment" : "",
"Generate document from this license" : "",
"Generate document from this payment" : "",
"Generate document from this transport" : "",
"Generate documentation" : "",
"Generate documents" : "",
"Generate image thumbnails as tn_$$IMAGE$$" : "",
"Generated document '{0}'" : "",
"Gerbil" : "",
"German Pinscher" : "",
"German Shepherd Dog" : "",
"German Shorthaired Pointer" : "",
"German Wirehaired Pointer" : "",
"Get more reports from sheltermanager.com" : "",
"Gift Aid" : "",
"GiftAid" : "",
"Giftaid" : "",
"Ginger" : "",
"Ginger and White" : "",
"Give" : "",
"Give Treatments" : "",
"Give Vaccination" : "",
"Given" : "",
"Glen of Imaal Terrier" : "",
"Go" : "",
"Go the lookup data screen and add/remove breeds, species and animal types according to the animals your shelter deals with." : "",
"Go the options screen and set your shelter's contact details and other settings." : "",
"Go the system users screen and add user accounts for your staff." : "",
"Goat" : "",
"Golden" : "",
"Golden Retriever" : "",
"Goldfish" : "",
"Good With Cats" : "",
"Good With Children" : "",
"Good With Dogs" : "",
"Good with Cats" : "",
"Good with Children" : "",
"Good with Dogs" : "",
"Good with cats" : "",
"Good with children" : "",
"Good with dogs" : "",
"Good with kids" : "",
"Google+" : "",
"Goose" : "",
"Gordon Setter" : "",
"Grade" : "",
"Great Dane" : "",
"Great Pyrenees" : "",
"Greater Swiss Mountain Dog" : "",
"Green" : "",
"Grey" : "",
"Grey and White" : "",
"Greyhound" : "",
"Guinea Pig" : "",
"Guinea fowl" : "",
"HMRC Gift Aid Spreadsheet" : "",
"HTML" : "",
"HTML Publishing Templates" : "",
"HTML/FTP Publisher" : "",
"Hairless" : "",
"Half-Yearly" : "",
"Hamster" : "",
"Harlequin" : "",
"Havana" : "",
"Havanese" : "",
"Header" : "",
"Health Problems" : "",
"Health and Identification" : "",
"Healthy" : "",
"Heartworm" : "",
"Heartworm Test Date" : "",
"Heartworm Test Result" : "",
"Heartworm Tested" : "",
"Heartworm+" : "",
"Hedgehog" : "",
"Held" : "",
"Help" : "",
"Hepatitis" : "",
"Here are some things you should do before you start adding animals and people to your database." : "",
"Hidden" : "",
"Hidden Comments" : "",
"Hidden comments about the animal" : "",
"Hide deceased animals from the home page" : "",
"High" : "",
"Highlight" : "",
"Himalayan" : "",
"History" : "",
"Hold" : "",
"Hold the animal until this date or blank to hold indefinitely" : "",
"Hold until" : "",
"Hold until {0}" : "",
"Holland Lop" : "",
"Home" : "",
"Home Phone" : "",
"Home page" : "",
"Homecheck Areas" : "",
"Homecheck Date" : "",
"Homecheck History" : "",
"Homecheck areas" : "",
"Homechecked" : "",
"Homechecked By" : "",
"Homechecked by" : "",
"Homechecker" : "",
"Horizontal Pitch" : "",
"Horse" : "",
"Hotot" : "",
"Hound" : "",
"Hours" : "",
"Housetrained" : "",
"Hovawart" : "",
"How urgent is it that we take this animal?" : "",
"Husky" : "",
"I've finished, Don't show me this popup again." : "",
"IP Restriction" : "",
"IP restriction is a space-separated list of IP netblocks in CIDR notation that this user is *only* permitted to login from (eg: 192.168.0.0/24 127.0.0.0/8). If left blank, the user can login from any address." : "",
"Ibizan Hound" : "",
"If the shelter provides initial insurance cover to new adopters, the policy number" : "",
"If this form has a populated emailaddress field during submission, send a confirmation email to it" : "",
"If this is the web preferred image, web publishers will use these notes as the animal description" : "",
"If this person is a fosterer, the maximum number of animals they can care for." : "",
"If this person is a member, the date that membership expires." : "",
"If this person is a member, their membership number" : "",
"If this person is a member, their membership number." : "",
"If this stock record is for a drug, the batch number from the container" : "",
"If this stock record is for a perishable good, the expiry date on the container" : "",
"If you assign view or edit roles, only users within those roles will be able to view and edit this account." : "",
"If you don't select any locations, publishers will include animals in all locations." : "",
"Iguana" : "",
"Illyrian Sheepdog" : "",
"Image" : "",
"Image file" : "",
"Import" : "",
"Import a CSV file" : "",
"Import a PayPal CSV file" : "",
"Import from file" : "",
"Important" : "",
"In" : "",
"In SubTotal" : "",
"In the last month" : "",
"In the last quarter" : "",
"In the last week" : "",
"In the last year" : "",
"In-Kind Donation" : "",
"Inactive" : "",
"Inactive - do not include" : "",
"Incident" : "",
"Incident - Additional" : "",
"Incident - Citation" : "",
"Incident - Details" : "",
"Incident - Dispatch" : "",
"Incident - Owner" : "",
"Incident Between" : "",
"Incident Completed Types" : "",
"Incident Date/Time" : "",
"Incident Type" : "",
"Incident Types" : "",
"Incident date cannot be blank" : "",
"Incident followup" : "",
"Incident {0} successfully created." : "",
"Incident {0}, {1}: {2}" : "",
"Incidents" : "",
"Incidents Requiring Followup" : "",
"Include CSV header line" : "",
"Include Removed" : "",
"Include animals in the following locations" : "",
"Include animals on trial adoption" : "",
"Include animals who don't have a description" : "",
"Include animals who don't have a picture" : "",
"Include cruelty case animals" : "",
"Include deceased animals" : "",
"Include fostered animals" : "",
"Include found" : "",
"Include held animals" : "",
"Include incomplete medical records when generating document templates" : "",
"Include incomplete vaccination and test records when generating document templates" : "",
"Include non-shelter animals" : "",
"Include off-shelter animals in medical calendar and books" : "",
"Include preferred photo" : "",
"Include quarantined animals" : "",
"Include reserved animals" : "",
"Include retailer animals" : "",
"Include returned" : "",
"Include this image when publishing" : "",
"Include unaltered animals" : "",
"Income" : "",
"Income from an on-site shop" : "",
"Income::" : "",
"Income::Adoption" : "",
"Income::Donation" : "",
"Income::EntryDonation" : "",
"Income::Interest" : "",
"Income::OpeningBalances" : "",
"Income::Shop" : "",
"Income::Sponsorship" : "",
"Income::WaitingList" : "",
"Incoming" : "",
"Incoming Forms" : "",
"Incoming donations (misc)" : "",
"Incoming forms are online forms that have been completed and submitted by people on the web." : "",
"Incomplete incidents" : "",
"Incomplete notes upto today" : "",
"Index" : "",
"Individual/Couple" : "",
"Induct a new animal" : "",
"Information" : "",
"Initials" : "",
"Install" : "",
"Install the selected reports to your database" : "",
"Insurance" : "",
"Insurance No" : "",
"Intake" : "",
"Intakes {0}" : "",
"Internal Location" : "",
"Internal Locations" : "",
"Invalid email address" : "",
"Invalid email address '{0}'" : "",
"Invalid microchip number length" : "",
"Invalid time '{0}', times should be in 00:00 format" : "",
"Invalid time, times should be in HH:MM format" : "",
"Invalid username or password." : "",
"Investigation" : "",
"Investigations" : "",
"Investigator" : "",
"Invoice Only" : "",
"Invoice items need a description and amount." : "",
"Irish Setter" : "",
"Irish Terrier" : "",
"Irish Water Spaniel" : "",
"Irish Wolfhound" : "",
"Is this a permanent foster?" : "",
"Is this a trial adoption?" : "",
"Issue a new insurance number for this animal/adoption" : "",
"Issue date and expiry date must be valid dates." : "",
"Issued" : "",
"Issued in the last month" : "",
"Issued in the last week" : "",
"Italian Greyhound" : "",
"Italian Spinone" : "",
"Item" : "",
"Jack Russell Terrier" : "",
"Jan" : "",
"January" : "",
"Japanese Bobtail" : "",
"Japanese Chin" : "",
"Javanese" : "",
"Jersey Wooly" : "",
"Jindo" : "",
"Jul" : "",
"July" : "",
"Jump to diary" : "",
"Jump to donations" : "",
"Jump to media" : "",
"Jump to movements" : "",
"Jun" : "",
"June" : "",
"Jurisdiction" : "",
"Jurisdictions" : "",
"Kai Dog" : "",
"Kakariki" : "",
"Karelian Bear Dog" : "",
"Keep table headers visible when scrolling" : "",
"Keeshond" : "",
"Kennel" : "",
"Kerry Blue Terrier" : "",
"Kishu" : "",
"Kittens (under {0} months)" : "",
"Km" : "",
"Komondor" : "",
"Korat" : "",
"Kuvasz" : "",
"Kyi Leo" : "",
"Label" : "",
"Labrador Retriever" : "",
"Lakeland Terrier" : "",
"Lancashire Heeler" : "",
"Large" : "",
"Last First" : "",
"Last Location" : "",
"Last Month" : "",
"Last Name" : "",
"Last Week" : "",
"Last changed by {0} on {1}" : "",
"Last name" : "",
"Last, First" : "",
"Latency" : "",
"Latency Tester" : "",
"Least recently changed" : "",
"Leave" : "",
"Leave of absence" : "",
"Left Margin" : "",
"Left shelter" : "",
"Leonberger" : "",
"Leptospirosis" : "",
"Letter" : "",
"Lhasa Apso" : "",
"Liability" : "",
"Licence for {0} successfully renewed {1} - {2}" : "",
"License" : "",
"License Number" : "",
"License Types" : "",
"License number '{0}' has already been issued." : "",
"License numbers matching '{0}'." : "",
"License requires a number" : "",
"License requires a person" : "",
"License requires issued and expiry dates" : "",
"Licenses" : "",
"Licensing" : "",
"Lifetime" : "",
"Light Amber" : "",
"Lilac" : "",
"Lilac Tortie" : "",
"Limited to {0} matches" : "",
"Link" : "",
"Link an animal" : "",
"Link to an external web resource" : "",
"Link to this animal" : "",
"Links" : "",
"List" : "",
"Litter" : "",
"Litter Ref" : "",
"Litter Reference" : "",
"Littermates" : "",
"Litters" : "",
"Litters need at least a required date and number." : "",
"Live Releases {0}" : "",
"Liver" : "",
"Liver and White" : "",
"Lizard" : "",
"Llama" : "",
"Loading..." : "",
"Loan" : "",
"Local" : "",
"Locale" : "",
"Location" : "",
"Location Filter" : "",
"Location and Species" : "",
"Location and Type" : "",
"Location and Unit" : "",
"Locations" : "",
"Log" : "",
"Log Text" : "",
"Log Type" : "",
"Log Types" : "",
"Log date must be a valid date" : "",
"Log entries need a date and text." : "",
"Log requires a date." : "",
"Log requires a person." : "",
"Log requires an animal." : "",
"Log successfully added." : "",
"Login" : "",
"Logout" : "",
"Long" : "",
"Long term" : "",
"Longest On Shelter" : "",
"Looking For" : "",
"Looking for" : "",
"Lookup" : "",
"Lookup (Multiple Select)" : "",
"Lookup Values" : "",
"Lookup data" : "",
"Lookups" : "",
"Lop Eared" : "",
"Lory/Lorikeet" : "",
"Lost" : "",
"Lost Animal" : "",
"Lost Animal - Additional" : "",
"Lost Animal - Details" : "",
"Lost Animal Contact" : "",
"Lost Animal: {0}" : "",
"Lost and Found" : "",
"Lost and found entries must have a contact" : "",
"Lost animal - {0} {1} [{2}]" : "",
"Lost animal entries matching '{0}'." : "",
"Lost animal entry {0} successfully created." : "",
"Lost animals must have a contact" : "",
"Lost animals reported in the last 30 days." : "",
"Lost from" : "",
"Lost to" : "",
"Lost/Found" : "",
"Lots of reports installed? Clean up the Reports menu with Settings-Options- Display-Show report menu items in collapsed categories." : "",
"Lovebird" : "",
"Low" : "",
"Lowchen" : "",
"Lowest" : "",
"M (Miscellaneous)" : "",
"MM = current month" : "",
"Macaw" : "",
"Mail" : "",
"Mail Merge" : "",
"Mail Merge - {0}" : "",
"Maine Coon" : "",
"Make this the default image when creating documents" : "",
"Make this the default image when viewing this record and publishing to the web" : "",
"Make this the default video link when publishing to the web" : "",
"Male" : "",
"Maltese" : "",
"Manchester Terrier" : "",
"Mandatory" : "",
"Manual" : "",
"Manually enter codes (do not generate)" : "",
"Manufacturer" : "",
"Manx" : "",
"Map" : "",
"Map of active incidents" : "",
"Mar" : "",
"March" : "",
"Maremma Sheepdog" : "",
"Mark Deceased" : "",
"Mark an animal deceased" : "",
"Mark dispatched now" : "",
"Mark new animals as not for adoption" : "",
"Mark responded now" : "",
"Mark selected payments received" : "",
"Mark this owner homechecked" : "",
"Mark treatments given" : "",
"Marketer" : "",
"Markings" : "",
"Markup" : "",
"Marriage/Relationship split" : "",
"Mastiff" : "",
"Match" : "",
"Match Lost and Found" : "",
"Match against other lost/found animals" : "",
"Match lost and found animals" : "",
"Match this animal with the lost and found database" : "",
"Maternity" : "",
"May" : "",
"McNab" : "",
"Media" : "",
"Media Notes" : "",
"Media notes contain" : "",
"Medical" : "",
"Medical Book" : "",
"Medical Profiles" : "",
"Medical book" : "",
"Medical calendar" : "",
"Medical profiles" : "",
"Medical profiles need a profile name, treatment, dosage and frequencies." : "",
"Medical regimens need an animal, name, dosage, a start date and frequencies." : "",
"Medicate" : "",
"Medicate Animal" : "",
"Medium" : "",
"Member" : "",
"Membership Expiry" : "",
"Membership Number" : "",
"Merge" : "",
"Merge Person" : "",
"Merge another animal into this one" : "",
"Merge another person into this one" : "",
"Merge bonded animals into a single record" : "",
"Merge duplicate records" : "",
"Message" : "",
"Message Board" : "",
"Message from {0}" : "",
"Message successfully sent to {0}" : "",
"Messages" : "",
"Messages successfully sent" : "",
"Method" : "",
"Microchip" : "",
"Microchip Date" : "",
"Microchip Number" : "",
"Microchip number {0} has already been allocated to another animal." : "",
"Microchipped" : "",
"Miles" : "",
"Mini Rex" : "",
"Mini-Lop" : "",
"Miniature Pinscher" : "",
"Minutes" : "",
"Missouri Foxtrotter" : "",
"Mixed Breed" : "",
"Mo" : "",
"Mobile signing pad" : "",
"Modify Additional Fields" : "",
"Modify Document Templates" : "",
"Modify Lookups" : "",
"Mon" : "",
"Monday" : "",
"Money" : "",
"Month" : "",
"Monthly" : "",
"More Info Needed" : "",
"More Medications" : "",
"More Tests" : "",
"More Vaccinations" : "",
"More diary notes" : "",
"Morgan" : "",
"Most browsers let you search in dropdowns by typing the first few letters of the item you want." : "",
"Most browsers will let you visit a record you have been to in this session by typing part of its name in the address bar." : "",
"Most recently changed" : "",
"Most relevant" : "",
"Mother" : "",
"Mountain Cur" : "",
"Mountain Dog" : "",
"Mouse" : "",
"Move" : "",
"Move an animal to a retailer" : "",
"Moved to animal record {0}" : "",
"Movement" : "",
"Movement Date" : "",
"Movement Number" : "",
"Movement Type" : "",
"Movement Types" : "",
"Movement dates clash with an existing movement." : "",
"Movement numbers must be unique." : "",
"Movements" : "",
"Movements require an animal" : "",
"Movements require an animal." : "",
"Moving..." : "",
"Multi-Lookup" : "",
"Multiple Treatments" : "",
"Munchkin" : "",
"Munsterlander" : "",
"Mustang" : "",
"My Fosters" : "",
"My Incidents" : "",
"My Undispatched Incidents" : "",
"My diary notes" : "",
"My sheltermanager.com account" : "",
"Mynah" : "",
"N (Non-Shelter Animal)" : "",
"NNN or NN = number unique for this type of animal for this year" : "",
"Name" : "",
"Name Contains" : "",
"Name and Address" : "",
"Name cannot be blank" : "",
"Name contains" : "",
"Neapolitan Mastiff" : "",
"Negative" : "",
"Neglect" : "",
"Netherland Dwarf" : "",
"Neuter/Spay" : "",
"Neutered" : "",
"Neutered/Spayed Non-Shelter Animals In {0}" : "",
"Neutered/Spayed Shelter Animals In {0}" : "",
"New" : "",
"New Account" : "",
"New Appointment" : "",
"New Citation" : "",
"New Cost" : "",
"New Diary" : "",
"New Diet" : "",
"New Document" : "",
"New Field" : "",
"New Fosterer" : "",
"New Guinea Singing Dog" : "",
"New Item" : "",
"New License" : "",
"New Litter" : "",
"New Log" : "",
"New Movement" : "",
"New Owner" : "",
"New Password" : "",
"New Payment" : "",
"New Profile" : "",
"New Record" : "",
"New Regimen" : "",
"New Report" : "",
"New Role" : "",
"New Stock" : "",
"New Task" : "",
"New Template" : "",
"New Test" : "",
"New Transport" : "",
"New Trap Loan" : "",
"New User" : "",
"New Vaccination" : "",
"New Voucher" : "",
"New Waiting List Entry" : "",
"New Zealand" : "",
"New diary task" : "",
"New form field" : "",
"New name" : "",
"New online form" : "",
"New password and confirmation password don't match." : "",
"New task detail" : "",
"New template" : "",
"Newfoundland Dog" : "",
"Next" : "",
"No" : "",
"No adjustment" : "",
"No data to show on the report." : "",
"No data." : "",
"No description" : "",
"No longer retained" : "",
"No matches found." : "",
"No picture" : "",
"No publishers are running." : "",
"No results found." : "",
"No results." : "",
"No tasks are running." : "",
"No view permission for this report" : "",
"Noise" : "",
"Non-Shelter" : "",
"Non-Shelter Animal" : "",
"Non-Shelter Animals" : "",
"Non-shelter Animals" : "",
"None" : "",
"Norfolk Terrier" : "",
"Normal user" : "",
"Norwegian Buhund" : "",
"Norwegian Elkhound" : "",
"Norwegian Forest Cat" : "",
"Norwegian Lundehund" : "",
"Norwich Terrier" : "",
"Not Arrived" : "",
"Not Available For Adoption" : "",
"Not Available for Adoption" : "",
"Not For Adoption" : "",
"Not Microchipped" : "",
"Not Reconciled" : "",
"Not available for adoption" : "",
"Not dispatched" : "",
"Not for adoption" : "",
"Not for adoption flag set" : "",
"Not in chosen publisher location" : "",
"Not reconciled" : "",
"Note" : "",
"Notes" : "",
"Notes about the death of the animal" : "",
"Nov" : "",
"Nova Scotia Duck-Tolling Retriever" : "",
"November" : "",
"Now" : "",
"Number" : "",
"Number in litter" : "",
"Number of Tasks" : "",
"Number of animal links to show" : "",
"Number of fields" : "",
"Number of pets" : "",
"Ocicat" : "",
"Oct" : "",
"October" : "",
"Office" : "",
"Old English Sheepdog" : "",
"Old Password" : "",
"Omit criteria" : "",
"Omit header/footer" : "",
"On Foster (in figures)" : "",
"On Shelter" : "",
"On shelter for {0} days, daily cost {1}, cost record total <b>{2}</b>" : "",
"On shelter for {0} days. Total cost: {1}" : "",
"Once assigned, codes cannot be changed" : "",
"Once signed, this document cannot be edited or tampered with." : "",
"One Off" : "",
"One-Off" : "",
"Online Form: {0}" : "",
"Online Forms" : "",
"Online form fields need a name and label." : "",
"Online forms can be linked to from your website and used to take information from visitors for applications, etc." : "",
"Only PDF, HTML and JPG image files can be attached." : "",
"Only active accounts" : "",
"Only allow users with one of these roles to view this incident" : "",
"Only show account totals for the current period, which starts on " : "",
"Only show declawed" : "",
"Only show pickups" : "",
"Only show special needs" : "",
"Only show transfers" : "",
"Open Incidents" : "",
"Open records in a new browser tab" : "",
"Open reports in a new browser tab" : "",
"Opening balances" : "",
"Optional, the date the vaccination \"wears off\" and needs to be administered again" : "",
"Options" : "",
"Or move this diary on to" : "",
"Order published animals by" : "",
"Organisation" : "",
"Organization" : "",
"Organization name" : "",
"Oriental Long Hair" : "",
"Oriental Short Hair" : "",
"Oriental Tabby" : "",
"Original Owner" : "",
"Ostrich" : "",
"Other Account" : "",
"Other Organisation" : "",
"Other Shelter" : "",
"Otterhound" : "",
"Our shelter does trial adoptions, allow us to mark these on movement screens" : "",
"Out" : "",
"Out Between" : "",
"Out SubTotal" : "",
"Output a deceased animals page" : "",
"Output a page with links to available online forms" : "",
"Output a separate page for each animal type" : "",
"Output a separate page for each species" : "",
"Output an adopted animals page" : "",
"Output an rss.xml page" : "",
"Overdue" : "",
"Overdue medical items" : "",
"Overtime" : "",
"Owl" : "",
"Owner" : "",
"Owner Vet" : "",
"Owner given citation" : "",
"Owners Vet" : "",
"PM" : "",
"Page extension" : "",
"Paid" : "",
"Paint/Pinto" : "",
"Palomino" : "",
"Paper Size" : "",
"Papillon" : "",
"Parainfluenza" : "",
"Parakeet (Other)" : "",
"Parent" : "",
"Parrot (Other)" : "",
"Parrotlet" : "",
"Parvovirus" : "",
"Paso Fino" : "",
"Pass Homecheck" : "",
"Password" : "",
"Password for '{0}' has been reset." : "",
"Password is incorrect." : "",
"Password successfully changed." : "",
"Passwords cannot be blank." : "",
"Path" : "",
"Patterdale Terrier (Fell Terrier)" : "",
"PayPal" : "",
"Payment" : "",
"Payment Book" : "",
"Payment From" : "",
"Payment Methods" : "",
"Payment Type" : "",
"Payment Types" : "",
"Payment book" : "",
"Payment calendar" : "",
"Payment of {0} successfully received ({1})." : "",
"Payments" : "",
"Payments need at least one date, an amount and a person." : "",
"Payments of type" : "",
"Payments require a person" : "",
"Payments require a received date" : "",
"Peacock/Pea fowl" : "",
"Pekingese" : "",
"Pending Adoption" : "",
"Pending Apartment Verification" : "",
"Pending Home Visit" : "",
"Pending Vet Check" : "",
"Pension" : "",
"People" : "",
"People Looking For" : "",
"People matching '{0}'." : "",
"People or animal records that already exist in the database will not be imported again and movement/payment data will be attached to the existing records instead." : "",
"People with active reservations, but no homecheck has been done." : "",
"People with overdue donations." : "",
"Percheron" : "",
"Perform" : "",
"Perform Homecheck" : "",
"Perform Test" : "",
"Performed" : "",
"Permanent Foster" : "",
"Persian" : "",
"Person" : "",
"Person - Additional" : "",
"Person - Name and Address" : "",
"Person - Type" : "",
"Person Flags" : "",
"Person looking for report" : "",
"Person successfully created" : "",
"Personal" : "",
"Peruvian Inca Orchid" : "",
"Peruvian Paso" : "",
"Petit Basset Griffon Vendeen" : "",
"Pharaoh Hound" : "",
"Pheasant" : "",
"Phone" : "",
"Phone contains" : "",
"Photo successfully uploaded." : "",
"Picked Up" : "",
"Picked Up By" : "",
"Pickup" : "",
"Pickup Address" : "",
"Pickup Location" : "",
"Pickup Locations" : "",
"Pig" : "",
"Pig (Farm)" : "",
"Pigeon" : "",
"Pinterest" : "",
"Pionus" : "",
"Pit Bull Terrier" : "",
"Pixie-Bob" : "",
"Please click the Sign button when you are finished." : "",
"Please see the manual for more information." : "",
"Please select a PDF, HTML or JPG image file to attach" : "",
"Please tighten the scope of your email campaign to {0} emails or less." : "",
"Please use the links below to electronically sign these documents." : "",
"Plott Hound" : "",
"Poicephalus/Senegal" : "",
"Pointer" : "",
"Points for being found within 2 weeks of being lost" : "",
"Points for matching age group" : "",
"Points for matching breed" : "",
"Points for matching color" : "",
"Points for matching features" : "",
"Points for matching lost/found area" : "",
"Points for matching sex" : "",
"Points for matching species" : "",
"Points for matching zipcode" : "",
"Points required to appear on match report" : "",
"Polish" : "",
"Polish Lowland Sheepdog" : "",
"Pomeranian" : "",
"Pony" : "",
"Poodle" : "",
"Portugese Podengo" : "",
"Portuguese Water Dog" : "",
"Positive" : "",
"Positive for Heartworm, FIV or FLV" : "",
"Positive/Negative" : "",
"Post" : "",
"Postage costs" : "",
"Pot Bellied" : "",
"Prairie Dog" : "",
"Prefill new media notes for animal images with animal comments if left blank" : "",
"Prefill new media notes with the filename if left blank" : "",
"Premises" : "",
"Presa Canario" : "",
"Press F11 in HTML or SQL code editing boxes to edit in fullscreen mode" : "",
"Preview" : "",
"Previous" : "",
"Previous Adopter" : "",
"Print" : "",
"Print Preview" : "",
"Print selected forms" : "",
"Printable Manual" : "",
"Printing word processor documents uses hidden iframe and window.print" : "",
"Priority" : "",
"Priority Floor" : "",
"Produce a CSV File" : "",
"Produce a PDF of printable labels" : "",
"Profile" : "",
"Profile name cannot be blank" : "",
"Public Holiday" : "",
"Publish Animals to the Internet" : "",
"Publish HTML via FTP" : "",
"Publish now" : "",
"Publish to folder" : "",
"Published to Website" : "",
"Publisher" : "",
"Publisher Breed" : "",
"Publisher Color" : "",
"Publisher Logs" : "",
"Publisher Species" : "",
"Publishing" : "",
"Publishing History" : "",
"Publishing Logs" : "",
"Publishing Options" : "",
"Publishing complete." : "",
"Publishing template" : "",
"Pug" : "",
"Puli" : "",
"Pumi" : "",
"Puppies (under {0} months)" : "",
"Purchased" : "",
"Qty" : "",
"Quaker Parakeet" : "",
"Quantity" : "",
"Quarantine" : "",
"Quarterhorse" : "",
"Quarterly" : "",
"Quick Links" : "",
"Quicklinks" : "",
"Quicklinks are shown on the home page and allow quick access to areas of the system." : "",
"R" : "",
"Rabbit" : "",
"Rabies" : "",
"Rabies Tag" : "",
"RabiesTag" : "",
"Radio Buttons" : "",
"Ragamuffin" : "",
"Ragdoll" : "",
"Rank" : "",
"Rat" : "",
"Rat Terrier" : "",
"Raw Markup" : "",
"Read the manual for more information about Animal Shelter Manager." : "",
"Real name" : "",
"Reason" : "",
"Reason For Appointment" : "",
"Reason Not From Owner" : "",
"Reason for Entry" : "",
"Reason for entry" : "",
"Reason not from Owner" : "",
"Reason the owner did not bring in the animal themselves" : "",
"Recalculate ALL animal ages/times" : "",
"Recalculate ALL animal locations" : "",
"Recalculate on-shelter animal locations" : "",
"Receipt No" : "",
"Receipt/Invoice" : "",
"Receive" : "",
"Receive a donation" : "",
"Receive a payment" : "",
"Received" : "",
"Received in last day" : "",
"Received in last month" : "",
"Received in last week" : "",
"Received in last year" : "",
"Received today" : "",
"Recently Adopted" : "",
"Recently Changed" : "",
"Recently Entered Shelter" : "",
"Recently Fostered" : "",
"Recently deceased" : "",
"Recently deceased shelter animals (last 30 days)." : "",
"Reception" : "",
"Reclaim" : "",
"Reclaim an animal" : "",
"Reclaim movements must have a valid reclaim date." : "",
"Reclaim successfully created." : "",
"Reclaimed" : "",
"Reconcile" : "",
"Reconciled" : "",
"Redbone Coonhound" : "",
"Rediarised" : "",
"Redirect to URL after POST" : "",
"Reference" : "",
"Refresh" : "",
"Regenerate 'Match lost and found animals' report" : "",
"Regenerate 'Person looking for' report" : "",
"Regenerate annual animal figures for" : "",
"Regenerate monthly animal figures for" : "",
"Regenerate person names in selected format" : "",
"Register Microchip" : "",
"Register microchips after" : "",
"Released To Wild" : "",
"Released To Wild {0}" : "",
"Reload" : "",
"Remaining" : "",
"Remember me on this computer" : "",
"Removal" : "",
"Removal Reason" : "",
"Removal reason" : "",
"Remove" : "",
"Remove HTML and PDF document media after this many years" : "",
"Remove clinic functionality from screens and menus" : "",
"Remove fine-grained animal control incident permissions" : "",
"Remove holds after" : "",
"Remove move menu and the movements tab from animal and person screens" : "",
"Remove personally identifiable data" : "",
"Remove previously published files before uploading" : "",
"Remove retailer functionality from the movement screens and menus" : "",
"Remove short shelter code box from the animal details screen" : "",
"Remove the FIV/L test fields from animal health details" : "",
"Remove the Litter ID field from animal details" : "",
"Remove the Rabies Tag field from animal health details" : "",
"Remove the adoption coordinator field from animal entry details" : "",
"Remove the adoption fee field from animal details" : "",
"Remove the animal control functionality from menus and screens" : "",
"Remove the bonded with fields from animal entry details" : "",
"Remove the city/state fields from person details" : "",
"Remove the coat type field from animal details" : "",
"Remove the declawed box from animal health details" : "",
"Remove the document repository functionality from menus" : "",
"Remove the good with fields from animal notes" : "",
"Remove the heartworm test fields from animal health details" : "",
"Remove the insurance number field from the movement screens" : "",
"Remove the location unit field from animal details" : "",
"Remove the microchip fields from animal identification details" : "",
"Remove the neutered fields from animal health details" : "",
"Remove the online form functionality from menus" : "",
"Remove the picked up fields from animal entry details" : "",
"Remove the rota functionality from menus and screens" : "",
"Remove the size field from animal details" : "",
"Remove the stock control functionality from menus and screens" : "",
"Remove the tattoo fields from animal identification details" : "",
"Remove the transport functionality from menus and screens" : "",
"Remove the trap loan functionality from menus and screens" : "",
"Remove the weight field from animal details" : "",
"Removed" : "",
"Rename" : "",
"Renew License" : "",
"Renew licence" : "",
"Renew license" : "",
"Report" : "",
"Report Title" : "",
"Report a new incident" : "",
"Reports" : "",
"Request signature by email" : "",
"Requested" : "",
"Require followup" : "",
"Required" : "",
"Required date must be a valid date" : "",
"Reschedule" : "",
"Reservation" : "",
"Reservation Book" : "",
"Reservation Cancelled" : "",
"Reservation Date" : "",
"Reservation For" : "",
"Reservation Status" : "",
"Reservation Statuses" : "",
"Reservation book" : "",
"Reservation date cannot be after cancellation date." : "",
"Reservation successfully created." : "",
"Reservations must have a valid reservation date." : "",
"Reserve" : "",
"Reserve an animal" : "",
"Reserved" : "",
"Reset" : "",
"Reset Password" : "",
"Respond" : "",
"Responded" : "",
"Responded Between" : "",
"Responded Date/Time" : "",
"Result" : "",
"Results" : "",
"Results for '{0}'." : "",
"Retailer" : "",
"Retailer Animals" : "",
"Retailer Book" : "",
"Retailer book" : "",
"Retailer movement successfully created." : "",
"Retailer movements must have a valid movement date." : "",
"Retriever" : "",
"Return" : "",
"Return Category" : "",
"Return Date" : "",
"Return a transferred animal" : "",
"Return an animal from adoption" : "",
"Return an animal from another movement" : "",
"Return an animal from transfer" : "",
"Return date cannot be before the movement date." : "",
"Return this movement and bring the animal back to the shelter" : "",
"Returned" : "",
"Returned By" : "",
"Returned To Owner" : "",
"Returned from" : "",
"Returned to" : "",
"Returned to Owner {0}" : "",
"Returning" : "",
"Returns {0}" : "",
"Reupload animal images every time" : "",
"Rex" : "",
"Rhea" : "",
"Rhinelander" : "",
"Rhodesian Ridgeback" : "",
"Ringneck/Psittacula" : "",
"Role is in use and cannot be deleted." : "",
"Roles" : "",
"Roles need a name." : "",
"Rosella" : "",
"Rostered day off" : "",
"Rota" : "",
"Rota Types" : "",
"Rota cloned successfully." : "",
"Rotate image 90 degrees anticlockwis" : "",
"Rotate image 90 degrees clockwise" : "",
"Rottweiler" : "",
"Rough" : "",
"Rows" : "",
"Ruddy" : "",
"Russian Blue" : "",
"S (Stray Cat)" : "",
"S = first letter of animal species" : "",
"SM Account" : "",
"SMS" : "",
"SQL" : "",
"SQL Interface" : "",
"SQL dump" : "",
"SQL dump (ASM2 HSQLDB Format)" : "",
"SQL editor: Press F11 to go full screen and press CTRL+SPACE to autocomplete table and column names" : "",
"SQL interface" : "",
"SQL is syntactically correct." : "",
"SS = first and second letter of animal species" : "",
"Sa" : "",
"Saddlebred" : "",
"Saint Bernard St. Bernard" : "",
"Sales Tax" : "",
"Saluki" : "",
"Samoyed" : "",
"Sat" : "",
"Satin" : "",
"Saturday" : "",
"Save" : "",
"Save and leave" : "",
"Save this incident" : "",
"Save this person" : "",
"Save this record" : "",
"Save this waiting list entry" : "",
"Saving..." : "",
"Scale published animal images to" : "",
"Scheduled" : "",
"Schipperke" : "",
"Schnauzer" : "",
"Scottish Deerhound" : "",
"Scottish Fold" : "",
"Scottish Terrier Scottie" : "",
"Script" : "",
"Seal" : "",
"Sealyham Terrier" : "",
"Search" : "",
"Search Results for '{0}'" : "",
"Search returned {0} results." : "",
"Search sort order" : "",
"Searchable" : "",
"Second offence" : "",
"Select" : "",
"Select a person" : "",
"Select a person to attach this form to." : "",
"Select a person to merge into this record. The selected person will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "",
"Select all" : "",
"Select an animal" : "",
"Select an animal to attach this form to." : "",
"Select an animal to merge into this record. The selected animal will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "",
"Select animal to merge" : "",
"Select animals" : "",
"Select date for diary task" : "",
"Select person to merge" : "",
"Select recommended" : "",
"Selected On-Shelter Animals" : "",
"Selkirk Rex" : "",
"Send" : "",
"Send Emails" : "",
"Send a weekly email to fosterers with medical information about their animals" : "",
"Send confirmation email to form submitter" : "",
"Send emails" : "",
"Send mass emails and perform mail merges" : "",
"Send via email" : "",
"Sending {0} emails is considered abusive and will damage the reputation of the email server." : "",
"Sending..." : "",
"Senior" : "",
"Sent to mobile signing pad." : "",
"Sep" : "",
"Separate waiting list rank by species" : "",
"September" : "",
"Server clock adjustment" : "",
"Set publishing options" : "",
"Set this to 0 to never automatically remove." : "",
"Set to 0 to never update urgencies." : "",
"Set wether or not this user account can log in to the user interface." : "",
"Setter" : "",
"Setting a location filter will prevent this user seeing animals who are not in these locations on shelterview, find animal and search." : "",
"Settings" : "",
"Settings, Lookup data" : "",
"Settings, Options" : "",
"Settings, Reports" : "",
"Settings, System user accounts" : "",
"Sex" : "",
"Sex and Species" : "",
"Sexes" : "",
"Shar Pei" : "",
"Share" : "",
"Shared weblink" : "",
"Shares" : "",
"Sheep" : "",
"Sheep Dog" : "",
"Shelter" : "",
"Shelter Animal" : "",
"Shelter Animals" : "",
"Shelter Details" : "",
"Shelter animal {0} '{1}'" : "",
"Shelter animals" : "",
"Shelter code cannot be blank" : "",
"Shelter code {0} has already been allocated to another animal." : "",
"Shelter stats (all time)" : "",
"Shelter stats (this month)" : "",
"Shelter stats (this week)" : "",
"Shelter stats (this year)" : "",
"Shelter stats (today)" : "",
"Shelter view" : "",
"Shepherd" : "",
"Shetland Sheepdog Sheltie" : "",
"Shiba Inu" : "",
"Shift" : "",
"Shih Tzu" : "",
"Short" : "",
"Show GDPR Contact Opt-In field on person screens" : "",
"Show PDF files inline instead of sending them as attachments" : "",
"Show a cost field on medical/test/vaccination screens" : "",
"Show a minimap of the address on person screens" : "",
"Show a separate paid date field with costs" : "",
"Show alerts on the home page" : "",
"Show animal thumbnails in movement and medical books" : "",
"Show animals adopted" : "",
"Show codes on the shelter view screen" : "",
"Show complete comments in table views" : "",
"Show empty locations" : "",
"Show on new record screens" : "",
"Show quick links on all pages" : "",
"Show quick links on the home page" : "",
"Show report menu items in collapsed categories" : "",
"Show short shelter codes on screens" : "",
"Show the adoption fee field" : "",
"Show the altered fields" : "",
"Show the breed fields" : "",
"Show the brought in by field" : "",
"Show the color field" : "",
"Show the date brought in field" : "",
"Show the entry category field" : "",
"Show the full diary (instead of just my notes) on the home page" : "",
"Show the hold fields" : "",
"Show the internal location field" : "",
"Show the litter ID field" : "",
"Show the location unit field" : "",
"Show the microchip fields" : "",
"Show the original owner field" : "",
"Show the size field" : "",
"Show the tattoo fields" : "",
"Show the time brought in field" : "",
"Show the transfer in field" : "",
"Show the weight field" : "",
"Show timeline on the home page" : "",
"Show tips on the home page" : "",
"Show transactions from" : "",
"Show weight as lb rather than kg" : "",
"Showing {0} timeline events." : "",
"Siamese" : "",
"Siberian" : "",
"Siberian Husky" : "",
"Sick leave" : "",
"Sick/Injured" : "",
"Sick/injured animal" : "",
"Sign" : "",
"Sign document" : "",
"Sign on screen" : "",
"Signature" : "",
"Signed" : "",
"Signing" : "",
"Signing Pad" : "",
"Signup" : "",
"Silky Terrier" : "",
"Silver" : "",
"Silver Fox" : "",
"Silver Marten" : "",
"Similar Animal" : "",
"Similar Person" : "",
"Simple" : "",
"Singapura" : "",
"Single Treatment" : "",
"Site" : "",
"Sites" : "",
"Size" : "",
"Sizes" : "",
"Skunk" : "",
"Skye Terrier" : "",
"Sloughi" : "",
"Small" : "",
"SmartTag PETID" : "",
"Smooth Fox Terrier" : "",
"Snake" : "",
"Snowshoe" : "",
"Social" : "",
"Softbill (Other)" : "",
"Sold" : "",
"Somali" : "",
"Some batch processes may take a few minutes to run and could prevent other users being able to use the system for a short time." : "",
"Some browsers allow shortcut keys, press SHIFT+ALT+A in Chrome or Firefox to jump to the animal adoption screen." : "",
"Some info text" : "",
"Sorrel" : "",
"Sorrel Tortoiseshell" : "",
"Sorry, this document has already been signed" : "",
"South Russian Ovcharka" : "",
"Spaniel" : "",
"Special Needs" : "",
"Species" : "",
"Species A-Z" : "",
"Species Z-A" : "",
"Species to use when publishing to third party services and adoption sites" : "",
"Specifying a reschedule date will make copies of the selected vaccinations and mark them to be given on the reschedule date. Example: If this vaccination needs to be given every year, set the reschedule date to be 1 year from today." : "",
"Sphynx (hairless cat)" : "",
"Spitz" : "",
"Split baby/adult age at" : "",
"Split species pages with a baby/adult prefix" : "",
"Sponsorship donations" : "",
"Staff" : "",
"Staff Rota" : "",
"Staff record" : "",
"Staff rota" : "",
"Staffordshire Bull Terrier" : "",
"Standard" : "",
"Standardbred" : "",
"Start Date" : "",
"Start Of Day" : "",
"Start Time" : "",
"Start at" : "",
"Start date" : "",
"Start date must be a valid date" : "",
"Start of year" : "",
"Started" : "",
"Starts" : "",
"State" : "",
"State contains" : "",
"Stationary costs" : "",
"Stats" : "",
"Stats period" : "",
"Stats show running figures for the selected period of animals entering and leaving the shelter on the home page." : "",
"Status" : "",
"Status and Species" : "",
"Stay" : "",
"Stock" : "",
"Stock Control" : "",
"Stock Levels" : "",
"Stock Locations" : "",
"Stock Take" : "",
"Stock Usage Type" : "",
"Stock level must have a name" : "",
"Stock level must have a unit" : "",
"Stock needs a name and unit." : "",
"Stocktake" : "",
"Stolen" : "",
"Stolen {0}" : "",
"Stop" : "",
"Stop Publishing" : "",
"Stores" : "",
"Stray" : "",
"Su" : "",
"SubTotal" : "",
"Subject" : "",
"Submission received: {0}" : "",
"Success" : "",
"Successfully attached to {0}" : "",
"Sugar Glider" : "",
"Sun" : "",
"Sunday" : "",
"Super user" : "",
"Superuser" : "",
"Surname" : "",
"Surrender" : "",
"Surrender Pickup" : "",
"Suspect" : "",
"Suspect 1" : "",
"Suspect 2" : "",
"Suspect 3" : "",
"Suspect/Animal" : "",
"Swan" : "",
"Swedish Vallhund" : "",
"Syntax check this SQL" : "",
"System" : "",
"System Admin" : "",
"System Options" : "",
"System user accounts" : "",
"T = first letter of animal type" : "",
"TNR" : "",
"TNR - Trap/Neuter/Release" : "",
"TT = first and second letter of animal type" : "",
"Tabby" : "",
"Tabby and White" : "",
"Take another payment" : "",
"Taken By" : "",
"Tan" : "",
"Tan and Black" : "",
"Tan and White" : "",
"Task complete." : "",
"Task items are executed in order of index, lowest to highest" : "",
"Tattoo" : "",
"Tattoo Date" : "",
"Tattoo Number" : "",
"Tax" : "",
"Tax Amount" : "",
"Tax Rate %" : "",
"Telephone" : "",
"Telephone Bills" : "",
"Template" : "",
"Template Name" : "",
"Template names can include a path portion with /, eg: Vets/Rabies Certificate" : "",
"Tennessee Walker" : "",
"Terrapin" : "",
"Terrier" : "",
"Test" : "",
"Test Animal" : "",
"Test Book" : "",
"Test Performed" : "",
"Test Results" : "",
"Test Types" : "",
"Test book" : "",
"Test marked as performed for {0} - {1}" : "",
"Tests" : "",
"Tests need an animal and at least a required date." : "",
"Text" : "",
"Text Encoding" : "",
"Th" : "",
"Thai Ridgeback" : "",
"Thank you for choosing Animal Shelter Manager for your shelter!" : "",
"Thank you, the document is now signed." : "",
"That animal is already linked to the incident" : "",
"The CSV file should be created by PayPal's \"All Activity\" report." : "",
"The SmartTag PETID number" : "",
"The SmartTag type" : "",
"The URL is the address of a web resource, eg: www.youtube.com/watch?v=xxxxxx" : "",
"The animal name" : "",
"The animal record to merge must be different from the original." : "",
"The animal sex" : "",
"The base color of this animal" : "",
"The coat type of this animal" : "",
"The confirmation email message to send to the form submitter. Leave blank to send a copy of the completed form." : "",
"The database will be inaccessible to all users while the export is in progress." : "",
"The date reported to the shelter" : "",
"The date the animal died" : "",
"The date the animal was FIV/L tested" : "",
"The date the animal was adopted" : "",
"The date the animal was altered" : "",
"The date the animal was born" : "",
"The date the animal was brought into the shelter" : "",
"The date the animal was heartworm tested" : "",
"The date the animal was microchipped" : "",
"The date the animal was reclaimed" : "",
"The date the animal was tattooed" : "",
"The date the foster animal will be returned if known" : "",
"The date the foster is effective from" : "",
"The date the litter entered the shelter" : "",
"The date the owner last contacted the shelter" : "",
"The date the payment was received" : "",
"The date the reservation is effective from" : "",
"The date the retailer movement is effective from" : "",
"The date the transfer is effective from" : "",
"The date the trial adoption is over" : "",
"The date the vaccination is required/due to be administered" : "",
"The date the vaccination was administered" : "",
"The date this animal was found" : "",
"The date this animal was lost" : "",
"The date this animal was put on the waiting list" : "",
"The date this animal was removed from the waiting list" : "",
"The date this animal was reserved" : "",
"The date this animal was returned to its owner" : "",
"The date this person was homechecked." : "",
"The default username is 'user' with the password 'letmein'" : "",
"The entry reason for this animal" : "",
"The litter this animal belongs to" : "",
"The locale determines the language ASM will use when displaying text, dates and currencies." : "",
"The location where the animal was picked up" : "",
"The microchip number" : "",
"The movement number '{0}' is not unique." : "",
"The number of stock records to create" : "",
"The period in days before waiting list urgency is increased" : "",
"The person record to merge must be different from the original." : "",
"The primary breed of this animal" : "",
"The reason the owner wants to part with the animal" : "",
"The reason this animal was removed from the waiting list" : "",
"The remaining units in the container" : "",
"The result of the FIV test" : "",
"The result of the FLV test" : "",
"The result of the heartworm test" : "",
"The retail/resale price per unit" : "",
"The secondary breed of this animal" : "",
"The selected file is not an image." : "",
"The shelter category for this animal" : "",
"The shelter reference number" : "",
"The sheltermanager.com admin account password cannot be changed here, please visit {0}" : "",
"The size of this animal" : "",
"The species of this animal" : "",
"The tattoo number" : "",
"The type of unit in the container, eg: tablet, vial, etc." : "",
"The veterinary license number." : "",
"The wholesale/trade price the container was bought for" : "",
"There is not enough information in the form to attach to a shelter animal record (need an animal name)." : "",
"There is not enough information in the form to create a found animal record (need a description and area found)." : "",
"There is not enough information in the form to create a lost animal record (need a description and area lost)." : "",
"There is not enough information in the form to create a person record (need a surname)." : "",
"There is not enough information in the form to create a transport record (need animalname)." : "",
"There is not enough information in the form to create a transport record (need pickupdate and dropoffdate)." : "",
"There is not enough information in the form to create a waiting list record (need a description)." : "",
"There is not enough information in the form to create an incident record (need call notes and dispatch address)." : "",
"These are the HTML headers and footers used when displaying online forms." : "",
"These are the HTML headers and footers used when generating reports." : "",
"These are the default values for these fields when creating new records." : "",
"These batch processes are run each night by the system and should not need to be run manually." : "",
"These fields allow you to deduct stock for the test(s) given. This single deduction should cover the selected tests being performed." : "",
"These fields allow you to deduct stock for the treatment(s) given. This single deduction should cover the selected treatments being administered." : "",
"These fields allow you to deduct stock for the vaccination(s) given. This single deduction should cover the selected vaccinations being administered." : "",
"These fields determine which columns are shown on the find animal and find person screens." : "",
"These numbers are for shelters who have agreements with insurance companies and are given blocks of policy numbers to allocate." : "",
"These options change the behaviour of the search box at the top of the page." : "",
"These values are required for correct operation of the system. ONLY change them if you are translating to another language." : "",
"Third offence" : "",
"This Month" : "",
"This Week" : "",
"This Year" : "",
"This animal already has an active reservation." : "",
"This animal has a SmartTag PETID" : "",
"This animal has a tattoo" : "",
"This animal has active reservations, they will be cancelled." : "",
"This animal has an adoption fee of {0}" : "",
"This animal has been FIV/L tested" : "",
"This animal has been altered" : "",
"This animal has been declawed" : "",
"This animal has been heartworm tested" : "",
"This animal has movements and cannot be removed." : "",
"This animal has not been altered." : "",
"This animal has not been microchipped." : "",
"This animal has special needs" : "",
"This animal has the same name as another animal recently added to the system." : "",
"This animal is a crossbreed" : "",
"This animal is bonded with {0}" : "",
"This animal is bonded with {0}. Adoption movement records will be created for all bonded animals." : "",
"This animal is currently at a retailer, it will be automatically returned first." : "",
"This animal is currently fostered, it will be automatically returned first." : "",
"This animal is currently held and cannot be adopted." : "",
"This animal is currently quarantined and should not leave the shelter." : "",
"This animal is marked not for adoption." : "",
"This animal is microchipped" : "",
"This animal is not on the shelter." : "",
"This animal is part of a cruelty case and should not leave the shelter." : "",
"This animal should be held in case it is reclaimed" : "",
"This animal should not be shown in figures and is not in the custody of the shelter" : "",
"This animal was dead on arrival to the shelter" : "",
"This animal was euthanized" : "",
"This animal was picked up" : "",
"This animal was transferred from another shelter" : "",
"This code has already been used." : "",
"This database is locked and in read-only mode. You cannot add, change or delete records." : "",
"This database is locked." : "",
"This date of birth is an estimate" : "",
"This expense account is the source for costs of this type" : "",
"This income account is the source for payments received of this type" : "",
"This item is referred to in the database ({0}) and cannot be deleted until it is no longer in use." : "",
"This many years after creation of a person record, the name, address and telephone data will be anonymized." : "",
"This month" : "",
"This movement cannot be from a retailer when the animal has no prior retailer movements." : "",
"This person has an animal control incident against them" : "",
"This person has an animal control incident against them." : "",
"This person has been banned from adopting animals" : "",
"This person has been banned from adopting animals." : "",
"This person has been under investigation" : "",
"This person has been under investigation." : "",
"This person has movements and cannot be removed." : "",
"This person has not passed a homecheck" : "",
"This person has not passed a homecheck." : "",
"This person has payments and cannot be removed." : "",
"This person has previously surrendered an animal." : "",
"This person is linked to a waiting list record and cannot be removed." : "",
"This person is linked to an animal and cannot be removed." : "",
"This person is linked to an investigation and cannot be removed." : "",
"This person is linked to animal control and cannot be removed." : "",
"This person is linked to animal licenses and cannot be removed." : "",
"This person is linked to animal transportation and cannot be removed." : "",
"This person is linked to citations and cannot be removed." : "",
"This person is linked to found animals and cannot be removed." : "",
"This person is linked to lost animals and cannot be removed." : "",
"This person is linked to trap loans and cannot be removed." : "",
"This person is not flagged as a fosterer and cannot foster animals." : "",
"This person is not flagged as a retailer and cannot handle retailer movements." : "",
"This person is very similar to another person on file, carry on creating this record?" : "",
"This person lives in the same area as the person who brought the animal to the shelter." : "",
"This record has been changed by another user, please reload." : "",
"This report cannot be sent by email as it requires criteria to run." : "",
"This screen allows you to add extra documents to your database, for staff training, reference materials, etc." : "",
"This screen allows you to add extra images to your database, for use in reports and documents." : "",
"This type of movement requires a date." : "",
"This type of movement requires a person." : "",
"This week" : "",
"This will permanently remove the selected records, are you sure?" : "",
"This will permanently remove the selected roles, are you sure?" : "",
"This will permanently remove the selected user accounts. Are you sure?" : "",
"This will permanently remove this account and ALL TRANSACTIONS HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"This will permanently remove this additional field and ALL DATA CURRENTLY HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"This will permanently remove this animal, are you sure?" : "",
"This will permanently remove this incident, are you sure?" : "",
"This will permanently remove this person, are you sure?" : "",
"This will permanently remove this record, are you sure?" : "",
"This will permanently remove this waiting list entry, are you sure?" : "",
"This will remove ALL rota entries for the week beginning {0}. This action is irreversible, are you sure?" : "",
"This year" : "",
"Thoroughbred" : "",
"Thu" : "",
"Thumbnail size" : "",
"Thursday" : "",
"Tibetan Mastiff" : "",
"Tibetan Spaniel" : "",
"Tibetan Terrier" : "",
"Tiger" : "",
"Time" : "",
"Time Brought In" : "",
"Time On List" : "",
"Time On Shelter" : "",
"Time on list" : "",
"Time on shelter" : "",
"Timeline" : "",
"Timeline ({0})" : "",
"Times should be in HH:MM format, eg: 09:00, 16:30" : "",
"Title" : "",
"Title First Last" : "",
"Title Initials Last" : "",
"To" : "",
"To Adoption" : "",
"To Fostering" : "",
"To Other" : "",
"To Retailer" : "",
"To add people to the rota, create new person records with the staff or volunteer flag." : "",
"To continue using ASM, please renew {0}" : "",
"To week beginning" : "",
"Today" : "",
"Tonkinese" : "",
"Too Many Animals" : "",
"Tooltip" : "",
"Top Margin" : "",
"Tortie" : "",
"Tortie and White" : "",
"Tortoise" : "",
"Tosa Inu" : "",
"Total" : "",
"Total number of units in the container" : "",
"Total payments" : "",
"Toucan" : "",
"Toy Fox Terrier" : "",
"Training" : "",
"Transactions" : "",
"Transactions need a date and description." : "",
"Transfer" : "",
"Transfer In" : "",
"Transfer To" : "",
"Transfer an animal" : "",
"Transfer from Municipal Shelter" : "",
"Transfer from Other Shelter" : "",
"Transfer successfully created." : "",
"Transfer?" : "",
"Transferred" : "",
"Transferred From" : "",
"Transferred In" : "",
"Transferred In {0}" : "",
"Transferred Out" : "",
"Transferred Out {0}" : "",
"Transfers must have a valid transfer date." : "",
"Transport" : "",
"Transport Book" : "",
"Transport Types" : "",
"Transport book" : "",
"Transport requires an animal" : "",
"Transports must have valid pickup and dropoff dates and times." : "",
"Trap Loans" : "",
"Trap Number" : "",
"Trap Types" : "",
"Trap loan" : "",
"Trap loans" : "",
"Treat animals at retailers as part of the shelter inventory" : "",
"Treat foster animals as part of the shelter inventory" : "",
"Treat trial adoptions as part of the shelter inventory" : "",
"Treatment" : "",
"Treatment Given" : "",
"Treatment marked as given for {0} - {1}" : "",
"Treatment name cannot be blank" : "",
"Treatments" : "",
"Treeing Walker Coonhound" : "",
"Trial Adoption" : "",
"Trial adoption" : "",
"Trial adoption book" : "",
"Trial ends on" : "",
"Tricolour" : "",
"Trigger Batch Processes" : "",
"Tu" : "",
"Tue" : "",
"Tuesday" : "",
"Tumblr" : "",
"Turkey" : "",
"Turkish Angora" : "",
"Turkish Van" : "",
"Turtle" : "",
"Twitter" : "",
"Type" : "",
"Type of animal links to show" : "",
"U (Unwanted Cat)" : "",
"UK Giftaid" : "",
"URL" : "",
"UUUUUUUUUU or UUUU = unique number" : "",
"Unable to Afford" : "",
"Unable to Cope" : "",
"Unaltered" : "",
"Unaltered Adopted Animals" : "",
"Unaltered Dog - 1 year" : "",
"Unaltered Dog - 3 year" : "",
"Unavailable" : "",
"Under {0} weeks old" : "",
"Unit" : "",
"Unit Price" : "",
"Unit within the location, eg: pen or cage number" : "",
"Units" : "",
"Unknown" : "",
"Unknown microchip brand" : "",
"Unpaid Fines" : "",
"Unreserved" : "",
"Unsaved Changes" : "",
"Unspecified" : "",
"Unsuitable Accomodation" : "",
"Up for adoption" : "",
"Upcoming medical items" : "",
"Update" : "",
"Update publishing options" : "",
"Update system options" : "",
"Update the daily boarding cost for this animal" : "",
"Updated database to version {0}" : "",
"Updated." : "",
"Updating..." : "",
"Upload" : "",
"Upload Document" : "",
"Upload ODT" : "",
"Upload Photo" : "",
"Upload a new OpenOffice template" : "",
"Upload all available images for animals" : "",
"Upload an SQL script" : "",
"Upload splash.jpg and logo.jpg to override the login screen image and logo at the top left of ASM." : "",
"Uploading..." : "",
"Urgencies" : "",
"Urgency" : "",
"Urgent" : "",
"Usage Date" : "",
"Usage Type" : "",
"Usage explains why this stock record was created or adjusted. Usage records will only be created if the balance changes." : "",
"Use Automatic Insurance Numbers" : "",
"Use HTML5 client side image scaling where available to speed up image uploads" : "",
"Use SQL Interface" : "",
"Use a single breed field" : "",
"Use animal comments" : "",
"Use fancy tooltips" : "",
"Use notes from preferred photo" : "",
"Use the icon in the lower right of notes fields to view them in a separate window." : "",
"User Accounts" : "",
"User Roles" : "",
"User accounts that will only ever call the Service API should set this to No." : "",
"User roles" : "",
"Username" : "",
"Username '{0}' already exists" : "",
"Users" : "",
"Users need a username, password and at least one role or the superuser flag setting." : "",
"Vacation" : "",
"Vaccinate" : "",
"Vaccinate Animal" : "",
"Vaccination" : "",
"Vaccination Book" : "",
"Vaccination Given" : "",
"Vaccination Types" : "",
"Vaccination book" : "",
"Vaccination marked as given for {0} - {1}" : "",
"Vaccinations" : "",
"Vaccinations need an animal and at least a required date." : "",
"Vaccinations require an animal" : "",
"Vaccinations: {0}, Tests: {1}, Medical Treatments: {2}, Transport: {3}, Costs: {4}, Total Costs: {5} Total Payments: {6}, Balance: {7}" : "",
"Valid tokens for the subject and text" : "",
"Value" : "",
"Various" : "",
"Vertical Pitch" : "",
"Very Large" : "",
"Vet" : "",
"Vet Visit" : "",
"Victim" : "",
"Victim Name" : "",
"Video Link" : "",
"Vietnamese Pot Bellied" : "",
"View" : "",
"View Accounts" : "",
"View Animals" : "",
"View Audit Trail" : "",
"View Citations" : "",
"View Clinic Appointment" : "",
"View Cost" : "",
"View Diary" : "",
"View Diets" : "",
"View Document" : "",
"View Document Repository" : "",
"View Found Animal" : "",
"View Incidents" : "",
"View Incoming Forms" : "",
"View Investigations" : "",
"View Licenses" : "",
"View Litter" : "",
"View Log" : "",
"View Lost Animal" : "",
"View Manual" : "",
"View Media" : "",
"View Medical Records" : "",
"View Movement" : "",
"View PDF" : "",
"View Payments" : "",
"View Person" : "",
"View Person Links" : "",
"View Report" : "",
"View Roles" : "",
"View Rota" : "",
"View Shelter Animals" : "",
"View Staff Person Records" : "",
"View Stock" : "",
"View Tests" : "",
"View Training Videos" : "",
"View Transport" : "",
"View Trap Loans" : "",
"View Vaccinations" : "",
"View Volunteer Person Records" : "",
"View Vouchers" : "",
"View Waiting List" : "",
"View animals matching publishing options" : "",
"View littermates" : "",
"View matching records" : "",
"View media" : "",
"View publishing logs" : "",
"Visual Theme" : "",
"Vizsla" : "",
"Volunteer" : "",
"Voucher Types" : "",
"Vouchers" : "",
"Vouchers need an issue and expiry date." : "",
"WARNING: This animal has not been microchipped" : "",
"WARNING: This animal is over 6 months old and has not been neutered/spayed" : "",
"Waiting" : "",
"Waiting List" : "",
"Waiting List - Additional" : "",
"Waiting List - Details" : "",
"Waiting List - Removal" : "",
"Waiting List Contact" : "",
"Waiting List Donation" : "",
"Waiting List {0}" : "",
"Waiting List: {0}" : "",
"Waiting Room" : "",
"Waiting for documents..." : "",
"Waiting list donations" : "",
"Waiting list entries matching '{0}'." : "",
"Waiting list entries must have a contact" : "",
"Waiting list entry for {0} ({1})" : "",
"Waiting list entry successfully added." : "",
"Waiting list urgency update period in days" : "",
"Warmblood" : "",
"Warn if the name of the new animal is similar to one entered recently" : "",
"Warn when adopting an animal who has not been microchipped" : "",
"Warn when adopting an unaltered animal" : "",
"Warn when adopting to a person who has been banned from adopting animals" : "",
"Warn when adopting to a person who has not been homechecked" : "",
"Warn when adopting to a person who has previously brought an animal to the shelter" : "",
"Warn when adopting to a person who lives in the same area as the original owner" : "",
"Warn when creating multiple reservations on the same animal" : "",
"Warnings" : "",
"Wasted" : "",
"Water Bills" : "",
"We" : "",
"Wed" : "",
"Wednesday" : "",
"Week" : "",
"Week beginning {0}" : "",
"Weekly" : "",
"Weight" : "",
"Weimaraner" : "",
"Welcome!" : "",
"Welsh Corgi" : "",
"Welsh Springer Spaniel" : "",
"Welsh Terrier" : "",
"West Highland White Terrier Westie" : "",
"Wheaten Terrier" : "",
"When" : "",
"When ASM should stop showing this message" : "",
"When I change the location of an animal, make a note of it in the log with this type" : "",
"When I change the weight of an animal, make a note of it in the log with this type" : "",
"When I generate a document, make a note of it in the log with this type" : "",
"When I mark an animal held, make a note of it in the log with this type" : "",
"When I set a new GDPR Opt-In contact option, make a note of it in the log with this type" : "",
"When a message is created, email it to each matching user" : "",
"When creating payments from the Move menu screens, mark them due instead of received" : "",
"When displaying calendars, the first day of the week is" : "",
"When displaying person names, use the format" : "",
"When entering dates, hold down CTRL and use the cursor keys to move around the calendar. Press t to go to today." : "",
"When entering vaccinations, default the last batch number and manufacturer for that type" : "",
"When matching lost animals, include shelter animals" : "",
"When publishing to third party services, add this extra text to the bottom of all animal descriptions" : "",
"When receiving multiple payments, allow the due and received dates to be set" : "",
"When receiving payments, allow a quantity and unit price to be set" : "",
"When receiving payments, allow recording of sales tax with a default rate of" : "",
"When receiving payments, allow the deposit account to be overridden" : "",
"When you use Move > Adopt an animal, ASM will automatically return any open foster or retailer movement before creating the adoption." : "",
"When you use Move > Foster an animal, ASM will automatically return any open foster movement before moving the animal to its new home." : "",
"Where this animal is located within the shelter" : "",
"Whippet" : "",
"White" : "",
"White German Shepherd" : "",
"White and Black" : "",
"White and Brindle" : "",
"White and Brown" : "",
"White and Grey" : "",
"White and Liver" : "",
"White and Tabby" : "",
"White and Tan" : "",
"White and Torti" : "",
"Will this owner give a donation?" : "",
"Wire-haired Pointing Griffon" : "",
"Wirehaired Terrier" : "",
"With Vet" : "",
"With overnight batch" : "",
"Withdrawal" : "",
"Wk" : "",
"Work" : "",
"Work Phone" : "",
"Work Types" : "",
"XXX or XX = number unique for this year" : "",
"Xoloitzcuintle/Mexican Hairless" : "",
"YY or YYYY = current year" : "",
"Yellow Labrador Retriever" : "",
"Yellow and Grey" : "",
"Yes" : "",
"Yes/No" : "",
"Yes/No/Unknown" : "",
"Yorkshire Terrier Yorkie" : "",
"You can bookmark search results, animals, people and most data entry screens." : "",
"You can drag and drop animals in shelter view to change their locations." : "",
"You can middle click a link to open it in a new browser tab (push the wheel on most modern mice)." : "",
"You can override the search result sort by adding one of the following to the end of your search - sort:az, sort:za, sort:mr, sort:lr" : "",
"You can prefix your term in the search box with a: to search only animals, p: to search only people, wl: to search waiting list entries, la: to search lost animals and fa: to search found animals." : "",
"You can set a default amount for different payment types in the Settings- Lookup Data screen. Very handy when creating adoptions." : "",
"You can sort tables by clicking on the column headings." : "",
"You can upload images called logo.jpg and splash.jpg to the Settings- Reports-Extra Images screen to override the login splash screen and logo in the upper left corner of the application." : "",
"You can use incoming forms to create new records or attach them to existing records." : "",
"You can't have a return without a movement." : "",
"You didn't specify any search criteria, so an on-shelter search was assumed." : "",
"You have unsaved changes, are you sure you want to leave this page?" : "",
"You must supply a code." : "",
"Young Adult" : "",
"Your CSV file should have a header row with field names ASM recognises." : "",
"Your sheltermanager.com account is due to expire on {0}, please renew {1}" : "",
"Zipcode" : "",
"Zipcode contains" : "",
"[None]" : "",
"after connecting, chdir to" : "",
"and" : "",
"are sent to" : "",
"at" : "",
"cm" : "",
"days" : "",
"estimate" : "",
"filters: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, aco, banned, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound" : "",
"inches" : "",
"invalid" : "",
"kg" : "",
"lb" : "",
"less" : "",
"mins" : "",
"months" : "三個月",
"more" : "",
"on" : "",
"or" : "",
"or estimated age in years" : "",
"oz" : "",
"to" : "",
"today" : "",
"treatments" : "",
"treatments, every" : "",
"weekdays" : "",
"weeks" : "",
"weeks after last contact." : "",
"years" : "",
"yesterday" : "",
"{0} (under {1} months)" : "",
"{0} - {1} ({2} {3} aged {4})" : "",
"{0} - {1} {2}" : "",
"{0} - {1} {2} ({3}), contact {4} ({5}) - lost in {6}, postcode {7}, on {8}" : "",
"{0} animals successfully updated." : "",
"{0} cannot be blank" : "",
"{0} fine, paid" : "",
"{0} fine, unpaid" : "",
"{0} incurred in costs" : "",
"{0} is running ({1}% complete)." : "",
"{0} payment records created." : "",
"{0} received" : "",
"{0} record(s) match the mail merge." : "",
"{0} results." : "",
"{0} rows affected." : "",
"{0} selected" : "",
"{0} treatments every {1} days" : "",
"{0} treatments every {1} months" : "",
"{0} treatments every {1} weekdays" : "",
"{0} treatments every {1} weeks" : "",
"{0} treatments every {1} years" : "",
"{0} {1} ({2} treatments)" : "",
"{0} {1} aged {2}" : "",
"{0} {1} {2} aged {3}" : "",
"{0} {1}: Moved from {2} to {3}" : "",
"{0} {1}: adopted by {2}" : "",
"{0} {1}: altered" : "",
"{0} {1}: available for adoption" : "",
"{0} {1}: died ({2})" : "",
"{0} {1}: entered the shelter" : "",
"{0} {1}: escaped" : "",
"{0} {1}: euthanised ({2})" : "",
"{0} {1}: fostered to {2}" : "",
"{0} {1}: held" : "",
"{0} {1}: microchipped" : "",
"{0} {1}: not available for adoption" : "",
"{0} {1}: quarantined" : "",
"{0} {1}: received {2}" : "",
"{0} {1}: reclaimed by {2}" : "",
"{0} {1}: released" : "",
"{0} {1}: reserved by {2}" : "",
"{0} {1}: returned by {2}" : "",
"{0} {1}: sent to retailer {2}" : "",
"{0} {1}: stolen" : "",
"{0} {1}: tested positive for FIV" : "",
"{0} {1}: tested positive for FeLV" : "",
"{0} {1}: tested positive for Heartworm" : "",
"{0} {1}: transferred to {2}" : "",
"{0}, Week {1}" : "",
"{0}: Entered shelter {1}, Last changed on {2} by {3}. {4} {5} {6} aged {7}" : "",
"{0}: closed {1} ({2})" : "",
"{0}: opened {1}" : "",
"{0}: waiting list - {1}" : "",
"{0}: {1} {2} - {3} {4}" : "",
"{2}: found in {1}: {0}" : "",
"{2}: lost in {1}: {0}" : "",
"{plural0} animal as dead on arrival" : "",
"{plural0} animal control call due for followup today" : "",
"{plural0} animal died" : "",
"{plural0} animal entered the shelter" : "",
"{plural0} animal has a hold ending today" : "",
"{plural0} animal has been on the shelter longer than {0} months" : "",
"{plural0} animal is not available for adoption" : "",
"{plural0} animal was adopted" : "",
"{plural0} animal was euthanized" : "",
"{plural0} animal was reclaimed by its owner" : "",
"{plural0} animal was transferred to another shelter" : "",
"{plural0} day." : "",
"{plural0} incomplete animal control call" : "",
"{plural0} item of stock expires in the next month" : "",
"{plural0} item of stock has expired" : "",
"{plural0} medical treatment needs to be administered today" : "",
"{plural0} month." : "",
"{plural0} new online form submission" : "",
"{plural0} person has an overdue payment" : "",
"{plural0} person with an active reservation has not been homechecked" : "",
"{plural0} potential match for a lost animal" : "",
"{plural0} recent publisher run had errors" : "",
"{plural0} reservation has been active over a week without adoption" : "",
"{plural0} result found in {1} seconds. Order: {2}" : "",
"{plural0} shelter animal has not been microchipped" : "",
"{plural0} shelter animal has people looking for them" : "",
"{plural0} test needs to be performed today" : "",
"{plural0} transport does not have a driver assigned" : "",
"{plural0} trap is overdue for return" : "",
"{plural0} trial adoption has ended" : "",
"{plural0} unaltered animal has been adopted in the last month" : "",
"{plural0} undispatched animal control call" : "",
"{plural0} unpaid fine" : "",
"{plural0} urgent entry on the waiting list" : "",
"{plural0} vaccination has expired" : "",
"{plural0} vaccination needs to be administered today" : "",
"{plural0} week." : "",
"{plural0} year." : "",
"{plural1} animal control calls due for followup today" : "",
"{plural1} animals are not available for adoption" : "",
"{plural1} animals died" : "",
"{plural1} animals entered the shelter" : "",
"{plural1} animals have been on the shelter longer than {0} months" : "",
"{plural1} animals have holds ending today" : "",
"{plural1} animals were adopted" : "",
"{plural1} animals were dead on arrival" : "",
"{plural1} animals were euthanized" : "",
"{plural1} animals were reclaimed by their owners" : "",
"{plural1} animals were transferred to other shelters" : "",
"{plural1} days." : "",
"{plural1} incomplete animal control calls" : "",
"{plural1} items of stock expire in the next month" : "",
"{plural1} items of stock have expired" : "",
"{plural1} medical treatments need to be administered today" : "",
"{plural1} months." : "",
"{plural1} new online form submissions" : "",
"{plural1} people have overdue payments" : "",
"{plural1} people with active reservations have not been homechecked" : "",
"{plural1} potential matches for lost animals" : "",
"{plural1} recent publisher runs had errors" : "",
"{plural1} reservations have been active over a week without adoption" : "",
"{plural1} results found in {1} seconds. Order: {2}" : "",
"{plural1} shelter animals have not been microchipped" : "",
"{plural1} shelter animals have people looking for them" : "",
"{plural1} tests need to be performed today" : "",
"{plural1} transports do not have a driver assigned" : "",
"{plural1} traps are overdue for return" : "",
"{plural1} trial adoptions have ended" : "",
"{plural1} unaltered animals have been adopted in the last month" : "",
"{plural1} undispatched animal control calls" : "",
"{plural1} unpaid fines" : "",
"{plural1} urgent entries on the waiting list" : "",
"{plural1} vaccinations have expired" : "",
"{plural1} vaccinations need to be administered today" : "",
"{plural1} weeks." : "",
"{plural1} years." : "",
"{plural2} animal control calls due for followup today" : "",
"{plural2} animals are not available for adoption" : "",
"{plural2} animals died" : "",
"{plural2} animals entered the shelter" : "",
"{plural2} animals have been on the shelter longer than {0} months" : "",
"{plural2} animals have holds ending today" : "",
"{plural2} animals were adopted" : "",
"{plural2} animals were dead on arrival" : "",
"{plural2} animals were euthanized" : "",
"{plural2} animals were reclaimed by their owners" : "",
"{plural2} animals were transferred to other shelters" : "",
"{plural2} days." : "",
"{plural2} incomplete animal control calls" : "",
"{plural2} items of stock expire in the next month" : "",
"{plural2} items of stock have expired" : "",
"{plural2} medical treatments need to be administered today" : "",
"{plural2} months." : "",
"{plural2} new online form submissions" : "",
"{plural2} people have overdue payments" : "",
"{plural2} people with active reservations have not been homechecked" : "",
"{plural2} potential matches for lost animals" : "",
"{plural2} recent publisher runs had errors" : "",
"{plural2} reservations have been active over a week without adoption" : "",
"{plural2} results found in {1} seconds. Order: {2}" : "",
"{plural2} shelter animals have not been microchipped" : "",
"{plural2} shelter animals have people looking for them" : "",
"{plural2} tests need to be performed today" : "",
"{plural2} transports do not have a driver assigned" : "",
"{plural2} traps are overdue for return" : "",
"{plural2} trial adoptions have ended" : "",
"{plural2} unaltered animals have been adopted in the last month" : "",
"{plural2} undispatched animal control calls" : "",
"{plural2} unpaid fines" : "",
"{plural2} urgent entries on the waiting list" : "",
"{plural2} vaccinations have expired" : "",
"{plural2} vaccinations need to be administered today" : "",
"{plural2} weeks." : "",
"{plural2} years." : "",
"{plural3} animal control calls due for followup today" : "",
"{plural3} animals are not available for adoption" : "",
"{plural3} animals died" : "",
"{plural3} animals entered the shelter" : "",
"{plural3} animals have been on the shelter longer than {0} months" : "",
"{plural3} animals have holds ending today" : "",
"{plural3} animals were adopted" : "",
"{plural3} animals were dead on arrival" : "",
"{plural3} animals were euthanized" : "",
"{plural3} animals were reclaimed by their owners" : "",
"{plural3} animals were transferred to other shelters" : "",
"{plural3} days." : "",
"{plural3} incomplete animal control calls" : "",
"{plural3} items of stock expire in the next month" : "",
"{plural3} items of stock have expired" : "",
"{plural3} medical treatments need to be administered today" : "",
"{plural3} months." : "",
"{plural3} new online form submissions" : "",
"{plural3} people have overdue payments" : "",
"{plural3} people with active reservations have not been homechecked" : "",
"{plural3} potential matches for lost animals" : "",
"{plural3} recent publisher runs had errors" : "",
"{plural3} reservations have been active over a week without adoption" : "",
"{plural3} results found in {1} seconds. Order: {2}" : "",
"{plural3} shelter animals have not been microchipped" : "",
"{plural3} shelter animals have people looking for them" : "",
"{plural3} tests need to be performed today" : "",
"{plural3} transports do not have a driver assigned" : "",
"{plural3} traps are overdue for return" : "",
"{plural3} trial adoptions have ended" : "",
"{plural3} unaltered animals have been adopted in the last month" : "",
"{plural3} undispatched animal control calls" : "",
"{plural3} unpaid fines" : "",
"{plural3} urgent entries on the waiting list" : "",
"{plural3} vaccinations have expired" : "",
"{plural3} vaccinations need to be administered today" : "",
"{plural3} weeks." : ""
}
|
bobintetley/asm3
|
src/asm3/locales/locale_zh_TW.py
|
Python
|
gpl-3.0
| 108,725
|
[
"Amber",
"VisIt"
] |
0fbf12331a1479e068bb70a0c54b44adb1723bd01d3442164bbcd874eddbaee5
|
################################################################################
# $HeadURL $
################################################################################
__RCSID__ = "$Id$"
"""
fake AgentModule class. Every function can simply return S_OK()
"""
from DIRAC import S_OK
class AgentModule:
def __init__( self, agentName, baseAgentName, properties = {} ):
pass
def am_initialize( self, *initArgs ):
return S_OK()
def am_setOption( self, optionName, value ):
return S_OK()
def am_getOption( self, optionName, defaultValue = False ):
return 8
################################################################################
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
################################################################################
'''
HOW DOES THIS WORK.
will come soon...
'''
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
Sbalbp/DIRAC
|
ResourceStatusSystem/test/fake_AgentModule.py
|
Python
|
gpl-3.0
| 1,080
|
[
"DIRAC"
] |
5561db7979856109c012a4f9902b38b60cd5e3025be79fd2facca5a3cdb69b81
|
"""
Unix-like Command style parent
Evennia contribution, Vincent Le Geoff 2017
This module contains a command class that allows for unix-style command syntax in-game, using
--options, positional arguments and stuff like -n 10 etc similarly to a unix command. It might not
the best syntax for the average player but can be really useful for builders when they need to have
a single command do many things with many options. It uses the ArgumentParser from Python's standard
library under the hood.
To use, inherit `UnixCommand` from this module from your own commands. You need
to override two methods:
- The `init_parser` method, which adds options to the parser. Note that you should normally
*not* override the normal `parse` method when inheriting from `UnixCommand`.
- The `func` method, called to execute the command once parsed (like any Command).
Here's a short example:
```python
class CmdPlant(UnixCommand):
'''
Plant a tree or plant.
This command is used to plant something in the room you are in.
Examples:
plant orange -a 8
plant strawberry --hidden
plant potato --hidden --age 5
'''
key = "plant"
def init_parser(self):
"Add the arguments to the parser."
# 'self.parser' inherits `argparse.ArgumentParser`
self.parser.add_argument("key",
help="the key of the plant to be planted here")
self.parser.add_argument("-a", "--age", type=int,
default=1, help="the age of the plant to be planted")
self.parser.add_argument("--hidden", action="store_true",
help="should the newly-planted plant be hidden to players?")
def func(self):
"func is called only if the parser succeeded."
# 'self.opts' contains the parsed options
key = self.opts.key
age = self.opts.age
hidden = self.opts.hidden
self.msg("Going to plant '{}', age={}, hidden={}.".format(
key, age, hidden))
```
To see the full power of argparse and the types of supported options, visit
[the documentation of argparse](https://docs.python.org/2/library/argparse.html).
"""
import argparse
import shlex
from textwrap import dedent
from evennia import Command, InterruptCommand
from evennia.utils.ansi import raw
class ParseError(Exception):
"""An error occurred during parsing."""
pass
class UnixCommandParser(argparse.ArgumentParser):
"""A modifier command parser for unix commands.
This parser is used to replace `argparse.ArgumentParser`. It
is aware of the command calling it, and can more easily report to
the caller. Some features (like the "brutal exit" of the original
parser) are disabled or replaced. This parser is used by UnixCommand
and creating one directly isn't recommended nor necessary. Even
adding a sub-command will use this replaced parser automatically.
"""
def __init__(self, prog, description="", epilog="", command=None, **kwargs):
"""
Build a UnixCommandParser with a link to the command using it.
Args:
prog (str): the program name (usually the command key).
description (str): a very brief line to show in the usage text.
epilog (str): the epilog to show below options.
command (Command): the command calling the parser.
Kwargs:
Additional keyword arguments are directly sent to
`argparse.ArgumentParser`. You will find them on the
[parser's documentation](https://docs.python.org/2/library/argparse.html).
Note:
It's doubtful you would need to create this parser manually.
The `UnixCommand` does that automatically. If you create
sub-commands, this class will be used.
"""
prog = prog or command.key
super(UnixCommandParser, self).__init__(
prog=prog, description=description,
conflict_handler='resolve', add_help=False, **kwargs)
self.command = command
self.post_help = epilog
def n_exit(code=None, msg=None):
raise ParseError(msg)
self.exit = n_exit
# Replace the -h/--help
self.add_argument("-h", "--hel", nargs=0, action=HelpAction,
help="display the command help")
def format_usage(self):
"""Return the usage line.
Note:
This method is present to return the raw-escaped usage line,
in order to avoid unintentional color codes.
"""
return raw(super(UnixCommandParser, self).format_usage())
def format_help(self):
"""Return the parser help, including its epilog.
Note:
This method is present to return the raw-escaped help,
in order to avoid unintentional color codes. Color codes
in the epilog (the command docstring) are supported.
"""
autohelp = raw(super(UnixCommandParser, self).format_help())
return "\n" + autohelp + "\n" + self.post_help
def print_usage(self, file=None):
"""Print the usage to the caller.
Args:
file (file-object): not used here, the caller is used.
Note:
This method will override `argparse.ArgumentParser`'s in order
to not display the help on stdout or stderr, but to the
command's caller.
"""
if self.command:
self.command.msg(self.format_usage().strip())
def print_help(self, file=None):
"""Print the help to the caller.
Args:
file (file-object): not used here, the caller is used.
Note:
This method will override `argparse.ArgumentParser`'s in order
to not display the help on stdout or stderr, but to the
command's caller.
"""
if self.command:
self.command.msg(self.format_help().strip())
class HelpAction(argparse.Action):
"""Override the -h/--help action in the default parser.
Using the default -h/--help will call the exit function in different
ways, preventing the entire help message to be provided. Hence
this override.
"""
def __call__(self, parser, namespace, values, option_string=None):
"""If asked for help, display to the caller."""
if parser.command:
parser.command.msg(parser.format_help().strip())
parser.exit(0, "")
class UnixCommand(Command):
"""
Unix-type commands, supporting short and long options.
This command syntax uses the Unix-style commands with short options
(-X) and long options (--something). The `argparse` module is
used to parse the command.
In order to use it, you should override two methods:
- `init_parser`: this method is called when the command is created.
It can be used to set options in the parser. `self.parser`
contains the `argparse.ArgumentParser`, so you can add arguments
here.
- `func`: this method is called to execute the command, but after
the parser has checked the arguments given to it are valid.
You can access the namespace of valid arguments in `self.opts`
at this point.
The help of UnixCommands is derived from the docstring, in a
slightly different way than usual: the first line of the docstring
is used to represent the program description (the very short
line at the top of the help message). The other lines below are
used as the program's "epilog", displayed below the options. It
means in your docstring, you don't have to write the options.
They will be automatically provided by the parser and displayed
accordingly. The `argparse` module provides a default '-h' or
'--help' option on the command. Typing |whelp commandname|n will
display the same as |wcommandname -h|n, though this behavior can
be changed.
"""
def __init__(self, **kwargs):
"""
The lockhandler works the same as for objects.
optional kwargs will be set as properties on the Command at runtime,
overloading evential same-named class properties.
"""
super(UnixCommand, self).__init__(**kwargs)
# Create the empty UnixCommandParser, inheriting argparse.ArgumentParser
lines = dedent(self.__doc__.strip("\n")).splitlines()
description = lines[0].strip()
epilog = "\n".join(lines[1:]).strip()
self.parser = UnixCommandParser(None, description, epilog, command=self)
# Fill the argument parser
self.init_parser()
def init_parser(self):
"""
Configure the argument parser, adding in options.
Note:
This method is to be overridden in order to add options
to the argument parser. Use `self.parser`, which contains
the `argparse.ArgumentParser`. You can, for instance,
use its `add_argument` method.
"""
pass
def func(self):
"""Override to handle the command execution."""
pass
def get_help(self, caller, cmdset):
"""
Return the help message for this command and this caller.
Args:
caller (Object or Player): the caller asking for help on the command.
cmdset (CmdSet): the command set (if you need additional commands).
Returns:
docstring (str): the help text to provide the caller for this command.
"""
return self.parser.format_help()
def parse(self):
"""
Process arguments provided in `self.args`.
Note:
You should not override this method. Consider overriding
`init_parser` instead.
"""
try:
self.opts = self.parser.parse_args(shlex.split(self.args))
except ParseError as err:
msg = str(err)
if msg:
self.msg(msg)
raise InterruptCommand
|
feend78/evennia
|
evennia/contrib/unixcommand.py
|
Python
|
bsd-3-clause
| 10,008
|
[
"VisIt"
] |
380beca81fe1a30ed898ce93b4b97c3ae4b1a26f0288e5d729908104fecec4a6
|
import numpy as np
import os
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import matplotlib.pyplot as plt
import time
from datetime import datetime
from matplotlib.dates import date2num, num2date
import pyroms
import pyroms_toolbox
import _remapping
class nctime(object):
pass
def remap(src_file, src_varname, src_grd, dst_grd, dmax=0, cdepth=0, kk=0, dst_dir='./'):
ystart=240
# get time
nctime.long_name = 'time'
nctime.units = 'days since 1900-01-01 00:00:00'
# time reference "days since 1900-01-01 00:00:00"
ref = datetime(1900, 1, 1, 0, 0, 0)
ref = date2num(ref)
# For IC
tag = src_file.rsplit('/')[-1].rsplit('_')[-1].rsplit('-')[0]
year = int(tag[:4])
month = int(tag[4:6])
day = int(tag[6:])
time = datetime(year, month, day, 0, 0, 0)
# For CLM
# year = int(src_file.rsplit('/')[-1].rsplit('_')[-2])
# month = int(src_file.rsplit('/')[-1].rsplit('_')[-1][0:2])
# day = np.array([15,14,15,15,15,15,15,15,15,15,15,15])
# hour = np.array([12,0,12,0,12,0,12,12,0,12,0,12])
# if year%4 == 0:
# hour = np.array([12,12,12,0,12,0,12,12,0,12,0,12])
# time = datetime(year, month, day[month-1], hour[month-1], 0, 0)
time = date2num(time)
time = time - ref
time = time + 2.5 # 5-day average
# create IC file
dst_file = src_file.rsplit('/')[-1]
dst_file = dst_dir + dst_file[:-4] + '_' + src_varname + '_ic_' + dst_grd.name + '.nc'
print '\nCreating file', dst_file
if os.path.exists(dst_file) is True:
os.remove(dst_file)
pyroms_toolbox.nc_create_roms_file(dst_file, dst_grd, nctime)
# open IC file
nc = netCDF.Dataset(dst_file, 'a', format='NETCDF3_64BIT')
#load var
cdf = netCDF.Dataset(src_file)
src_var = cdf.variables[src_varname]
#get missing value
spval = src_var._FillValue
# determine variable dimension
ndim = len(src_var.dimensions)
# global grid
if ndim == 3:
src_var = src_var[:]
src_var = src_var[:,np.r_[ystart:np.size(src_var,1),-1],:]
elif ndim == 2:
src_var = src_var[:]
src_var = src_var[np.r_[ystart:np.size(src_var,0),-1],:]
if src_varname == 'ssh':
Bpos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_SODA_2.1.6_to_ARCTIC2_bilinear_t_to_rho.nc'
dst_varname = 'zeta'
dimensions = ('ocean_time', 'eta_rho', 'xi_rho')
long_name = 'free-surface'
units = 'meter'
field = 'free-surface, scalar, series'
elif src_varname == 'temp':
Bpos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_SODA_2.1.6_to_ARCTIC2_bilinear_t_to_rho.nc'
dst_varname = 'temp'
dimensions = ('ocean_time', 's_rho', 'eta_rho', 'xi_rho')
long_name = 'potential temperature'
units = 'Celsius'
field = 'temperature, scalar, series'
elif src_varname == 'salt':
Bpos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_SODA_2.1.6_to_ARCTIC2_bilinear_t_to_rho.nc'
dst_varname = 'salt'
dimensions = ('ocean_time', 's_rho', 'eta_rho', 'xi_rho')
long_name = 'salinity'
units = 'PSU'
field = 'salinity, scalar, series'
else:
raise ValueError, 'Undefined src_varname'
if ndim == 3:
# build intermediate zgrid
zlevel = -z[::-1,0,0]
nzlevel = len(zlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# create variable in file
print 'Creating variable', dst_varname
nc.createVariable(dst_varname, 'f8', dimensions, fill_value=spval)
nc.variables[dst_varname].long_name = long_name
nc.variables[dst_varname].units = units
nc.variables[dst_varname].field = field
# remapping
print 'remapping', dst_varname, 'from', src_grd.name, \
'to', dst_grd.name
print 'time =', time
if ndim == 3:
# flood the grid
print 'flood the grid'
src_varz = pyroms_toolbox.BGrid_SODA.flood(src_var, src_grd, Bpos=Bpos, spval=spval, \
dmax=dmax, cdepth=cdepth, kk=kk)
else:
src_varz = src_var
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_varz = pyroms.remapping.remap(src_varz, wts_file, \
spval=spval)
if ndim == 3:
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_var = pyroms.remapping.z2roms(dst_varz[::-1,:,:], dst_grdz, \
dst_grd, Cpos=Cpos, spval=spval, flood=False)
else:
dst_var = dst_varz
# write data in destination file
print 'write data in destination file'
nc.variables['ocean_time'][0] = time
nc.variables[dst_varname][0] = dst_var
# close destination file
nc.close()
if src_varname == 'SSH':
return dst_varz
|
kshedstrom/pyroms
|
examples/Arctic2/remap.py
|
Python
|
bsd-3-clause
| 5,296
|
[
"NetCDF"
] |
c262c2beb95b92629cab84bf907f46fa575c1cea10e04a2547a5f9203923f83c
|
#!/usr/bin/env python
"""
Psty HTTP Server. The P is silent, as in Psmith.
Psty does three things:
- Exposes local FS to pigshell.
- Proxy HTTP(s) server for pigshell's Ajax requests to jump the same-origin
barrier
- Websocket server to let pigshell pipe data through Unix commands
Psty is deliberately structured as a single file depending only on a standard
Python 2.7 installation. This makes it easy to use and easy to read.
Won't work on Windows without some porting.
Copyright (C) 2013-2014 by Coriolis Technologies Pvt Ltd.
This program is free software - see the file COPYING for license details.
"""
__version__ = "0.5"
import os
import sys
import getopt
import errno
import stat
import select
import struct
import socket
import subprocess
import posixpath
import BaseHTTPServer
import SocketServer
import urllib
import urlparse
import cgi
import mimetypes
import json
import base64
import re
import traceback
from httplib import HTTPConnection, HTTPSConnection
from hashlib import sha1, md5
from cookielib import CookieJar, Cookie
from urllib2 import Request
import sqlite3
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
psty_options = {
"allow_delete": True, # Make this true to allow rm
"follow_symlinks": False, # TODO Symlinks are followed outside cwd
"enable_fileserver": False, # Enable exporting current directory via Psty
# protocol
"export_path": os.getcwd(), # Directory exported via fileserver
"enable_wsh": False, # Enables remote command execution over
# websocket
"enable_proxy": False, # Enable proxy
"enable_cookies": False, # Borrow cookies from Chrome/Firefox,
# set False to disable sending all cookies
"cors_allow": "http://pigshell.com"
# Change the cors_allow setting if you are running pigshell on your own
# site. *** DO NOT, UNDER ANY CIRCUMSTANCES, SET THIS TO '*'. ***
# That will allow any site you visit to access your data and use your
# proxy.
}
BUFLEN = 8192
SELECT_TIMEOUT = 3
PROXY_CORS_HEADER = "Access-Control-Allow-Origin: %s\r\n" + \
"X-Psty-Location: %s\r\n" + \
"Access-Control-Expose-Headers: Content-Length, Content-Range, X-Psty-Location\r\n"
DIRMIME = 'application/vnd.pigshell.dir+json'
FILEMIME = 'application/vnd.pigshell.file+json'
LINKMIME = 'application/vnd.pigshell.link+json'
class PException(Exception):
def __init__(self, code, response):
self.code = code
self.msg = response
class WException(Exception):
def __init__(self, code):
self.code = code
def guard(f):
def decorator(self, *args, **kwargs):
origin = self.headers.getheader("origin") or self.headers.getheader("referer") or ""
if not origin or origin.find(psty_options["cors_allow"]) != 0:
self.send_error(403, "Bad origin")
if self.proxy_re.match(self.path):
if not psty_options["enable_proxy"]:
return self.send_error(403, "Proxy service not enabled")
if self.command == 'OPTIONS':
return f(self, *args, **kwargs)
return self.do_proxy()
upgrade = self.headers.getheader("upgrade")
if self.command == 'GET' and upgrade and upgrade.lower() == "websocket":
if not psty_options["enable_wsh"]:
return self.send_error(403, "Websocket service not enabled")
return self.do_websocket()
if not psty_options["enable_fileserver"]:
return self.send_error(403, "Fileserver not enabled")
return f(self, *args, **kwargs)
return decorator
class PstyRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "Psty/" + __version__
proxy_address = None
proxy_re = re.compile(r'^/(http|https|ftp)')
# use unbuffered readlines - we don't want anything remaining in the
# buffer when we go into our select()/recv() loop
rbufsize = 0
def send_cors_headers(self):
self.send_header("Access-Control-Allow-Origin", psty_options["cors_allow"])
self.send_header("Access-Control-Expose-Headers",
"Content-Length, Content-Range")
@guard
def do_OPTIONS(self):
self.send_response(200)
rh = self.headers.getheader("access-control-request-headers")
headers = [("Access-Control-Allow-Origin", psty_options["cors_allow"]),
("Access-Control-Allow-Methods", "DELETE, POST, PUT, GET, OPTIONS"),
("Access-Control-Allow-Headers", rh),
("Access-Control-Max-Age", 864000),
("Connection", "close")]
for h in headers:
self.send_header(h[0], h[1])
self.end_headers()
@guard
def do_DELETE(self):
self.send_error(403, "DELETE not implemented")
@guard
def do_GET(self):
"""Serve a GET request."""
self.send_head()
@guard
def do_HEAD(self):
"""Serve a HEAD request."""
self.send_head()
def op_mkdir(self, fs):
path = self.ppath
filename = fs['filename'].value
if filename.find('/') != -1:
raise Exception("Invalid filename")
path = os.path.join(path, filename)
os.mkdir(path)
# We should be doing a 201 Created and sending the dir as a response
# body like in op_put, but lazy for now
self.send_json_response(rcode=204)
def op_rm(self, fs):
"""
Remove file or directory
"""
if not psty_options["allow_delete"]:
return self.send_error(403, "Deletion not allowed")
relpath, path = self.prelpath, self.ppath
filename = fs['filename'].value
if filename.find('/') != -1:
raise Exception("Invalid filename")
path = os.path.join(path, filename)
if os.path.isdir(path):
os.rmdir(path)
else:
os.remove(path)
self.send_json_response(rcode=204)
def op_link(self, fs):
relpath, path = self.prelpath, self.ppath
data = fs['data'].value
name = fs['name'].value
try:
meta = json.loads(data)
ident = meta['ident']
except:
raise PException(400, "Bad request")
os.symlink(ident, os.path.join(path, name))
self.send_json_response(rcode=204)
def op_put(self, fs):
"""
Write to file, truncating existing one if necessary
"""
relpath, path = self.prelpath, self.ppath
filename = fs['filename'].value
if filename.find('/') != -1:
raise Exception("Invalid filename")
data = fs['data'].file
path = os.path.join(path, filename)
relpath = os.path.join(relpath, filename)
with open(path, 'wb') as f:
self.copyfile(data, f)
entry = self.get_pathinfo(path, relpath, filename)
self.send_json_response(entry, rcode=201, location=relpath,
ctype=FILEMIME, lm=entry["mtime"] / 1000)
def op_append(self, fs):
"""
Append to file
"""
relpath, path, filename = self.prelpath, self.ppath, self.pfilename
data = fs['data'].file
with open(path, 'ab') as f:
self.copyfile(data, f)
entry = self.get_pathinfo(path, relpath, filename)
self.send_json_response(entry, rcode=200, location=relpath,
ctype=FILEMIME, lm=entry["mtime"] / 1000)
def op_rename(self, fs):
x, srcpath = self.translate_path(fs['src'].value)
x, dstpath = self.translate_path(fs['dst'].value)
os.rename(srcpath, dstpath)
self.send_json_response(rcode=204)
def get_pathinfo(self, path, relpath, filename):
entry = {}
sf = os.lstat(path)
if stat.S_ISLNK(sf.st_mode):
link = os.readlink(path)
ctype = LINKMIME
entry["href"] = link
else:
ctype = self.get_mime(path)
ident = urllib.quote(relpath)
if ctype == DIRMIME and not ident.endswith('/'):
ident = ident + '/'
entry.update({"name": filename, "ident": ident, "size": sf.st_size,
"mtime": sf.st_mtime * 1000, "atime": sf.st_atime * 1000,
"mime": ctype, "readable": readable(sf),
"writable": writable(sf)})
return entry
def send_json_response(self, data=None, rcode=200, location=None,
ctype=None, lm=None, cc="private, no-cache"):
self.send_response(rcode)
lm = None if lm is None else self.date_time_string(lm)
for k, v in [("Location", location), ("Content-Type", ctype),
("Last-Modified", lm), ("Cache-Control", cc)]:
if v:
self.send_header(k, v)
self.send_cors_headers()
self.send_header("Connection", "close")
if 200 <= rcode < 300:
self.send_header("Accept-Ranges", "bytes")
if data is None:
self.end_headers()
return
f = StringIO()
f.write(json.dumps(data))
length = f.tell()
f.seek(0)
self.send_header("Content-Length", str(length))
self.end_headers()
self.copyfile(f, self.wfile)
f.close()
def op_stat(self):
"""
Return metadata of a file
"""
relpath, path, filename = self.prelpath, self.ppath, self.pfilename
if os.path.isdir(path):
entry, mtime = self.list_directory()
del entry["files"]
ctype = DIRMIME
else:
entry = self.get_pathinfo(path, relpath, filename)
mtime = entry["mtime"] / 1000
ctype = FILEMIME
self.send_json_response(entry, rcode=200, location=relpath,
ctype=ctype, lm=mtime)
@guard
def do_POST(self):
self.translate_path(self.path)
try:
ctype, pdict = cgi.parse_header(self.headers.getheader("content-type"))
if ctype != 'multipart/form-data':
raise PException(403, "Unexpected content type")
# Default rfile is unbuffered so the select() loop paths don't
# miss data. Here we use a buffered rfile as recvfrom()ing a huge
# payload 1 byte at a time is very expensive in time and CPU.
rfile = self.connection.makefile()
fs = cgi.FieldStorage(fp=rfile, headers=self.headers,
environ={'REQUEST_METHOD': 'POST'})
op = fs['op'].value
if not hasattr(self, 'op_' + op):
raise PException(403, "Invalid op")
method = getattr(self, 'op_' + op)
method(fs)
except PException, e:
print "PEXCEPT %s %s" % (e.code, e.msg)
self.send_error(e.code, e.msg)
except Exception, e:
print "EXCEPT", str(e)
print traceback.print_exc()
if hasattr(e, 'errno'):
self.send_error(403, errno.errorcode[e.errno])
else:
self.send_error(500, "Server exception")
def send_head_dir(self):
dirinfo, maxmtime = self.list_directory()
#dts = self.date_time_string(maxmtime)
#if self.headers.getheader('if-modified-since') == dts:
# self.send_json_response(rcode=304)
# return
data = dirinfo if self.command == 'GET' else None
self.send_json_response(data, rcode=200, ctype=DIRMIME, lm=maxmtime)
def get_range(self):
rh = self.headers.getheader('range') or ''
m = re.match(r'bytes=([0-9]+)-([0-9]+)?', rh.strip())
if m:
g = m.groups()
end = -1 if g[1] is None else int(g[1])
return [int(g[0]), end]
return None
def send_head(self):
relpath, path = self.translate_path(self.path)
try:
if self.pquery.get('op', None):
op = self.pquery['op'][0]
if not hasattr(self, 'op_' + op):
raise PException(403, "Invalid op")
method = getattr(self, 'op_' + op)
return method()
if os.path.isdir(path):
if self.path.endswith('/'):
return self.send_head_dir()
# redirect browser - doing basically what apache does
self.send_json_response(rcode=301, location=self.path + "/")
return
sf = os.lstat(path)
if not readable(sf):
raise PException(403, "Permission denied")
#dts = self.date_time_string(os.lstat(path).st_mtime)
#if self.headers.getheader('if-modified-since') == dts:
# self.send_json_response(rcode=304)
# return
entry = self.get_pathinfo(path, relpath, self.pfilename)
my_range = self.get_range()
filesize = entry["size"]
if my_range:
if my_range[1] == -1:
my_range[1] = filesize - 1
if my_range[0] < 0 or my_range[0] > filesize - 1 or my_range[1] > filesize - 1 or my_range[1] < 0 or \
my_range[1] < my_range[0]:
self.send_error(416, headers=[("Content-Range", "*/" + str(filesize))])
return
self.send_response(206)
self.send_header("Content-Range", "bytes %d-%d/%d" % (my_range[0], my_range[1], filesize))
else:
my_range = (0, filesize - 1)
self.send_response(200)
reqsize = my_range[1] - my_range[0] + 1
self.send_header("Cache-Control", "private, no-cache")
self.send_header("Connection", "close")
self.send_header("Content-type", entry["mime"])
self.send_header("Content-Length", str(reqsize))
self.send_header("Last-Modified", self.date_time_string(entry["mtime"] / 1000))
self.send_cors_headers()
self.end_headers()
if self.command == 'GET':
if stat.S_ISLNK(sf.st_mode):
f = StringIO()
f.write(os.readlink(path))
f.seek(0)
else:
f = open(path, 'rb')
written = self.copyfile(f, self.wfile, file_range=my_range)
f.close()
except PException, e:
print "PEXCEPT %s %s" % (e.code, e.msg)
self.send_error(e.code, e.msg)
except Exception, e:
print "EXCEPT", str(e)
traceback.print_exc()
if hasattr(e, 'errno'):
if e.errno == errno.EPERM:
self.send_error(401, "Permission denied")
self.send_error(404, "File not found")
else:
self.send_error(500, "Server exception")
def send_error(self, code, message=None, headers=None):
"""
Copied from BaseHttpServer. Need to add CORS headers even to
error response.
"""
try:
short, long_code = self.responses[code]
except KeyError:
short, long_code = '???', '???'
if message is None:
message = short
explain = long_code
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.send_cors_headers()
if headers:
for h in headers:
self.send_header(h[0], h[1])
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
def list_directory(self):
relpath, path, filename = self.prelpath, self.ppath, self.pfilename
try:
file_list = os.listdir(path)
except:
raise PException(404, "No permission to list directory")
file_list.sort(key=lambda a: a.lower())
files = []
siglist = []
maxmtime = 0
for name in file_list:
fullname = os.path.join(path, name)
relname = os.path.join(relpath, name)
sf = os.lstat(fullname)
if not (stat.S_ISREG(sf.st_mode) or stat.S_ISDIR(sf.st_mode) or stat.S_ISLNK(sf.st_mode)):
continue
entry = self.get_pathinfo(fullname, relname, name)
if entry["mtime"] > maxmtime:
maxmtime = entry["mtime"]
files.append(entry)
siglist.append("%s%d%d" % (entry["name"], entry["size"], entry["mtime"]))
dirinfo = self.get_pathinfo(path, relpath, filename)
if dirinfo["mtime"] > maxmtime:
maxmtime = dirinfo["mtime"]
siglist.append("%s%d%d" % (dirinfo["name"], dirinfo["size"], dirinfo["mtime"]))
dirinfo["files"] = files
#dirinfo["cookie"] = str(maxmtime / 1000)
dirinfo["cookie"] = md5("".join(sorted(siglist))).hexdigest()
return dirinfo, maxmtime / 1000
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# TODO Symlink verification
pathcomps = urlparse.urlsplit(path)
relpath = path = urllib.unquote(pathcomps.path)
querystr = pathcomps.query
self.pquery = urlparse.parse_qs(querystr, keep_blank_values=True)
posix_path = posixpath.normpath(path)
words = posix_path.split('/')
words = filter(None, words)
path = psty_options["export_path"]
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
self.prelpath = relpath
self.ppath = path
fname = os.path.basename(posix_path)
self.pfilename = fname if fname else '/'
return relpath, path
def copyfile(self, source, outputfile, file_range=None, buflen=16 * 1024):
left = sys.maxint
written = 0
if file_range:
left = file_range[1] - file_range[0] + 1
source.seek(file_range[0])
while left > 0:
rlen = left if left < buflen else buflen
buf = source.read(rlen)
if not buf:
break
outputfile.write(buf)
left -= len(buf)
written += len(buf)
return written
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
if os.path.isdir(path):
return DIRMIME
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited: # try to read system mime.types
mimetypes.init()
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
'.js': 'text/plain',
})
def get_mime(self, path):
sf = os.stat(path)
if stat.S_ISDIR(sf.st_mode):
return DIRMIME
try:
output = subprocess.check_output(["file", "--mime-type", path])
mime = output.split(': ')[-1].strip()
except:
mime = self.guess_type(path)
return mime
def proxy_transform_header(self, header):
path = self.path[1:]
comps = header.split(': ')
name = comps[0].lower()
if name in ("alternate-protocol", "access-control-allow-origin"):
return None
elif name == "set-cookie":
# Add cookie to cookie jar object and don't return to browser
if psty_options["enable_cookies"]:
cookiejar.store_cookie(header, path)
return None
elif name == "location" and self._pstatus / 100 == 3:
# XHR redirects are transparently handled by the browser without
# notifying the XHR caller. We doctor 3xx Location headers
# so the browser comes back to the proxy for the redirected URL
if not urlparse.urlsplit(comps[1]).scheme:
comps[1] = urlparse.urljoin(path, comps[1])
comps[1] = "http://%s:%d/" % self.proxy_address + comps[1]
elif name == "connection":
comps[1] = "close\r\n"
return ": ".join(comps)
def do_shutdown(self):
# Shutdown and bleed the socket dry in RFC-approved manner
try:
self.connection.shutdown(socket.SHUT_WR)
while 1:
data = self.connection.recv(BUFLEN)
if len(data) == 0:
break
self.connection.close()
except:
pass
def do_websocket(self):
if not psty_options["enable_wsh"]:
return self.send_error(403, "Websocket shell not enabled")
key = self.headers.getheader("sec-websocket-key")
version = self.headers.getheader("sec-websocket-version")
if version != "13":
self.send_error(404) # TODO Figure out the RFC way to say FO
relpath, path = self.translate_path(self.path)
sha_hash = sha1(key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11").digest()
self.send_response(101)
self.send_header("Connection", "Upgrade")
self.send_header("Upgrade", "WebSocket")
self.send_header("Sec-WebSocket-Accept", base64.b64encode(sha_hash))
self.end_headers()
self.ws_state = "open"
try:
os.chdir(path)
cmdlist = self.pquery.get('cmd[]', None)
if not cmdlist:
raise Exception("No command")
p = subprocess.Popen(cmdlist, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception, e:
if hasattr(e, 'errno'):
retcode = e.errno
errstr = e.strerror
else:
retcode = 1
errstr = str(e)
self.ws_send_chunk(errstr, 2)
self.ws_send_chunk('', 1, eof=True, retcode=retcode)
self.ws_close(1000)
self.do_shutdown()
return
try:
self.ws_chat(p)
except WException, e:
print "WEXCEPT %s" % e.code
traceback.print_exc()
self.ws_close(e.code)
except Exception, e:
print "EXCEPT1", str(e)
traceback.print_exc()
self.ws_close(1011)
self.do_shutdown()
def ws_chat(self, p):
def endgame():
p.stdout.close()
p.stdin.close()
p.stderr.close()
p.wait()
if len(errbuf):
self.ws_send_chunk(errbuf, 2)
self.ws_send_chunk(outbuf, 1, eof=True, retcode=p.returncode)
self.ws_close(1000)
# cmd = self.pquery.get('cmd[]')[0]
self.ws_buffer = '' # Raw input from client
eof_in = False # EOF sent from client
inbuf = '' # Unmasked data to be dribbled to the process
outbuf = '' # Staging stdout to client
errbuf = '' # Staging stderr to client
while 1:
wfiles = []
if len(inbuf) and not p.stdin.closed:
wfiles.append(p.stdin)
if len(outbuf) or len(errbuf):
wfiles.append(self.connection)
efiles = [p.stdout, p.stderr, self.connection]
if not p.stdin.closed:
efiles.append(p.stdin)
rlist, wlist, errlist = select.select([p.stdout, p.stderr,
self.connection], wfiles, efiles, SELECT_TIMEOUT)
if errlist:
p.terminate()
return endgame()
if rlist:
for f in rlist:
if f is p.stdout:
data = os.read(p.stdout.fileno(), BUFLEN)
#print "R STDOUT", len(data), cmd
if len(data) == 0:
return endgame()
outbuf += data
elif f is p.stderr:
data = os.read(p.stderr.fileno(), BUFLEN)
if len(data):
#print "STDERR ", data
errbuf += data
elif f is self.connection:
data = self.connection.recv(BUFLEN)
#print "R CONNECTION", len(data), cmd
if len(data):
if eof_in:
raise Exception("No data expected after EOF")
self.ws_buffer += data
else:
raise
else:
#print "R UNKNOWN", cmd
raise WException(1011)
if wlist:
for f in wlist:
if f is p.stdin:
l = min(select.PIPE_BUF, max(select.PIPE_BUF,
len(inbuf)))
#print "W STDIN", l, cmd
if l:
written = os.write(p.stdin.fileno(), inbuf[0:l])
inbuf = inbuf[written:]
elif f is self.connection:
#print "W CONNECTION", cmd
if len(errbuf):
self.ws_send_chunk(errbuf, 2)
errbuf = ''
if len(outbuf):
self.ws_send_chunk(outbuf, 1)
outbuf = ''
else:
#print "W UNKNOWN", cmd
raise WException(1011)
while 1:
meta, data = self.ws_get_chunk()
if not meta:
break
if meta["fd"] != 0: # only stdin supported now
#print "TERMINATE", cmd
p.terminate()
return endgame()
if meta.get("eof", None):
eof_in = True
inbuf += data
if len(inbuf) == 0 and eof_in and not p.stdin.closed:
#print "GOT EOF_IN", cmd
p.stdin.close()
def ws_send_chunk(self, buf, fd, eof=False, retcode=None):
meta = {"pwsver": "1.0", "enc": "base64", "fd": fd}
if eof:
meta["eof"] = True
if retcode:
meta["retcode"] = retcode
meta = json.dumps(meta)
if len(meta) > 128:
raise Exception("Header too large")
padding = " " * (128 - len(meta))
header = meta + padding
payload = header + base64.b64encode(buf)
self.ws_send_frame(payload)
def ws_send_frame(self, payload, opcode=0x1):
data = chr(0x80 | opcode)
l = len(payload)
if l < 126:
data += chr(l)
elif l < 65536:
data += chr(126)
data += struct.pack("!H", l)
else:
data += chr(127)
data += struct.pack("!Q", l)
data += payload
self.connection.sendall(data)
def ws_close(self, code):
#print "WS CLOSE in ", self.ws_state
if self.ws_state == "gotclose":
self.ws_send_frame('', opcode=0x8)
self.connection.close()
self.ws_state = "closed"
elif self.ws_state != "sentclose":
self.ws_send_frame('', opcode=0x8)
self.ws_state = "sentclose"
else:
self.connection.close()
self.ws_state = "closed"
def ws_get_chunk(self):
"""
Strip our JSON header from the WS frame, decode base64 if required
"""
chunk = self.ws_decode_frame()
if len(chunk) < 128:
return None, None
try:
metachunk = chunk[0:128].strip()
meta = json.loads(metachunk)
if meta["pwsver"] != "1.0" or meta["enc"] != "base64" or meta["fd"] != 0:
raise
data = chunk[128:]
if len(data):
data = base64.b64decode(data)
else:
data = '' # avoid mysterious invisible man in unicode
except:
traceback.print_exc()
raise WException(1002)
return meta, data
def ws_decode_frame(self):
"""
Decodes Websocket frame as per RFC 6455
"""
buf = self.ws_buffer
if len(buf) < 14:
return ''
start = 2
opcode = ord(buf[0]) & 0xf
if opcode == 0x8: # close frame
if self.ws_state == "open":
self.ws_state = "gotclose"
raise WException(1000)
length = ord(buf[1]) & 0x7f
if length == 126:
length, = struct.unpack("!H", buf[2:4])
start += 2
elif length == 127:
length, = struct.unpack("!Q", buf[2:10])
start += 8
mask = [ord(b) for b in buf[start:start + 4]]
start += 4
if len(buf) < start + length:
return ''
payload = buf[start:start + length]
self.ws_buffer = buf[start + length:]
clear = ''
for i in range(len(payload)):
clear += chr(mask[i % 4] ^ ord(payload[i]))
if opcode == 0x1:
clear = clear.decode("UTF8")
return clear
def do_proxy(self):
if not psty_options['enable_proxy']:
return self.send_error(403, "Proxy not enabled")
urlcomps = urlparse.urlsplit(self.path[1:])
host = urlcomps.hostname
port = urlcomps.port
klass = HTTPSConnection if urlcomps.scheme == 'https' else HTTPConnection
headers = self.headers.headers
headers = [h for h in headers if not re.match(r'^(connection:|origin:|host:)', h.lower())]
headers.insert(0, "Connection: close\r\n")
headers.insert(0, "Host: %s\r\n" % urlcomps.netloc)
#if cookie jar has cookies for this domain, add them to header
if psty_options["enable_cookies"]:
cookie = cookiejar.get_cookie(self.path[1:])
if cookie:
headers.insert(0, cookie)
target = klass(host, port)
target.connect()
outbuf = []
path = urlparse.urlunsplit(('', '', urlcomps.path, urlcomps.query, '')) or '/'
outbuf.append("%s %s %s\r\n" % (self.command, path, self.request_version))
for h in headers:
outbuf.append(h)
outbuf.append("\r\n")
target.send("".join(outbuf))
time_out_max = 100
client_sock = self.connection
server_sock = target.sock
server_fp = server_sock.makefile('rb', 0)
socks = [client_sock, server_sock]
count = 0
byte_length = 0
RESP_READSTATUS = 1
RESP_READHEADERS100 = 2
RESP_READHEADERS = 3
RESP_READBODY = 4
self._pstate = RESP_READSTATUS
self._pstatus = 0
while 1:
count += 1
(recv, _, error) = select.select(socks, [], socks, 3)
if error:
break
if recv:
for sock in recv:
if sock is client_sock:
out = server_sock
data = client_sock.recv(BUFLEN)
if data == '':
socks.remove(client_sock)
#return self.do_shutdown()
else:
out = client_sock
if self._pstate == RESP_READSTATUS:
data = server_fp.readline()
#print "Got from %s: %s" % (self.path[1:], data)
comps = data.split()
if comps[1] == "100":
self._pstate = RESP_READHEADERS100
else:
self._pstate = RESP_READHEADERS
self._pstatus = int(comps[1])
elif self._pstate == RESP_READHEADERS:
data = server_fp.readline()
#print "Got from %s: %s" % (self.path[1:], data)
if data.strip() == "":
data = PROXY_CORS_HEADER % (psty_options["cors_allow"], self.path[1:]) + "\r\n"
self._pstate = RESP_READBODY
else:
data = self.proxy_transform_header(data)
elif self._pstate == RESP_READHEADERS100:
data = server_fp.readline()
if data.strip() == "":
self._pstate = RESP_READSTATUS
else:
data = server_sock.recv(BUFLEN)
if data:
byte_length += len(data)
if data == '':
self.log_message('"Proxy %s %s" %s %s',
self.command, self.path[1:], str(self._pstatus),
str(byte_length))
socks.remove(server_sock)
return self.do_shutdown()
if data:
out.sendall(data)
count = 0
if count == time_out_max:
break
class PstyServer(SocketServer.ForkingTCPServer):
allow_reuse_address = 1
def readable(sf):
return stat.S_IMODE(sf.st_mode) & stat.S_IRUSR != 0
def writable(sf):
return stat.S_IMODE(sf.st_mode) & stat.S_IWUSR != 0
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class FakeMessage:
def __init__(self, header):
self._header = [header]
def getheaders(self, name):
if name == 'Set-Cookie':
return self._header
return []
class FakeResponse:
def __init__(self, header):
self._message = FakeMessage(header)
def info(self):
return self._message
class SQLiteCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a SQLite DB."""
def __init__(self, filename=None, policy=None):
"""
Cookies are NOT loaded from the named file until either the .load() or
.revert() method is called.
"""
CookieJar.__init__(self, policy)
#Chrome on Mac OS X
self.CHROMEMACDB = os.path.expanduser('~') + '/Library/Application Support/Google/Chrome/Default/Cookies'
self.GET_QUERY_CHROME = 'select host_key as domain,name,value,path,expires_utc as expires,secure from cookies'
#TODO Add Firefox and Linux/Windows support
self.PSTYDB = os.path.expanduser('~') + '/.pstydb'
self.GET_QUERY_PSTY = 'select * from cookies'
self.GET_QUERY = self.GET_QUERY_PSTY
#All writes are in PSTYDB format, not Chrome or Firefox
self.SET_QUERY = 'insert into cookies (domain,name,value,path,expires,secure) values (?,?,?,?,?,?)'
self.TRUNCATE_QUERY = 'delete from cookies'
self.CREATE_SCHEMA = 'CREATE TABLE cookies (' \
'domain TEXT NOT NULL,' \
'name TEXT NOT NULL,' \
'value TEXT NOT NULL,' \
'path TEXT NOT NULL,' \
'expires INTEGER NOT NULL,' \
'secure INTEGER NOT NULL' \
')'
if filename is not None:
try:
filename + ""
except:
raise ValueError("filename must be string-like")
else:
if os.path.isfile(self.PSTYDB):
filename = self.PSTYDB
self.GET_QUERY = self.GET_QUERY_PSTY
else:
if os.path.isfile(self.CHROMEMACDB):
filename = self.CHROMEMACDB
self.GET_QUERY = self.GET_QUERY_CHROME
self.filename = filename
def save(self, filename=None):
"""Save cookies to DB."""
create_schema = False
if filename is None:
filename = self.PSTYDB
if not os.path.isfile(self.PSTYDB):
create_schema = True
conn = sqlite3.connect(filename)
try:
cursor = conn.cursor()
if create_schema:
cursor.execute(self.CREATE_SCHEMA)
cursor.execute(self.TRUNCATE_QUERY)
cursor.executemany(self.SET_QUERY, self._cookies_as_tuples())
conn.commit()
finally:
conn.close()
def load(self, filename=None):
"""Load cookies from DB."""
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError('File name missing')
conn = sqlite3.connect(filename)
try:
cursor = conn.cursor()
cursor.execute(self.GET_QUERY)
for cookie in self._cookies_from_cursor(cursor):
self.set_cookie(cookie)
finally:
conn.close()
def store_cookie(self, header, path):
header = re.sub(r'[^:].*: ', u'', header)
if len(self._cookies) == 0:
self.load()
self.extract_cookies(FakeResponse(header), Request(path))
self.save()
def get_cookie(self, path):
request = Request(path)
if len(self._cookies) == 0:
self.load()
self.add_cookie_header(request)
if request.get_header('Cookie'):
return "Cookie: %s\r\n" % request.get_header('Cookie')
return None
def _cookies_from_cursor(self, cursor):
cookies = []
for row in cursor:
domain = row[0]
name = row[1]
value = row[2]
path = row[3]
expires = row[4]
secure = row[5]
cookies.append(Cookie(None, name, value, None, None, domain, True, bool(domain.startswith(".")),
path, True, secure, expires, False, None, None, None))
return cookies
def _cookies_as_tuples(self):
tuples = []
for domain in self._cookies:
for path in self._cookies[domain]:
for cookiekey in self._cookies[domain][path]:
cookie = self._cookies[domain][path][cookiekey]
tuples.append(
(cookie.domain, cookie.name, cookie.value, cookie.path, cookie.expires if cookie.expires else 0,
cookie.secure))
return tuples
cookiejar = SQLiteCookieJar()
def usage():
u = """
Usage: %s (-a|-pwfc) [-d <dir>] [<port>]
%s -h
Options:
-h print usage
-p enable web proxy
-f enable file server
-w enable websocket shell
-c enable cookies
-a enable all services
-d <dir> export <dir> via file server (default: current directory)
<port> server port (default: 50937)
"""
print u.strip() % (sys.argv[0], sys.argv[0])
sys.exit(1)
if __name__ == '__main__':
port = 50937
try:
opts, args = getopt.getopt(sys.argv[1:], 'apwfd:c')
for o, a in opts:
if o == '-h':
usage()
elif o in ('-p', '-w', '-f', '-c', '-a'):
opt_map = {'-p': ['enable_proxy'], '-w': ['enable_wsh'],
'-f': ['enable_fileserver'], '-c': ['enable_cookies'],
'-a': ['enable_proxy', 'enable_wsh', 'enable_fileserver']}
for m in opt_map[o]:
psty_options[m] = True
elif o == '-d':
if os.path.isdir(a):
psty_options['export_path'] = a
else:
raise Exception("Directory %s not found" % a)
if len(args) > 1:
usage()
if len(args) == 1:
port = int(args[0])
except Exception, e:
print str(e)
usage()
if not (psty_options['enable_proxy'] or psty_options['enable_wsh'] or
psty_options['enable_fileserver']):
print "At least one of proxy, wsh, fileserver must be anabled"
usage()
# DO NOT CHANGE localhost! Psty is a completely open proxy and should
# not be exposed on the LAN, let alone the Internet.
server_address = ('localhost', port)
PstyRequestHandler.protocol_version = "HTTP/1.1"
PstyRequestHandler.proxy_address = server_address
httpd = PstyServer(server_address, PstyRequestHandler)
sa = httpd.socket.getsockname()
services = ", ".join([k[7:] for (k, v) in psty_options.items() if k.startswith('enable') and v])
print "Serving", services, "on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
|
pigshell/pigshell
|
psty.py
|
Python
|
gpl-3.0
| 42,770
|
[
"VisIt"
] |
cb5b702aadeb5164f77b7165c648903c04572d9dadc421589a16dfcec9fb5437
|
# proxy module
from __future__ import absolute_import
from mayavi.modules.ui.surface import *
|
enthought/etsproxy
|
enthought/mayavi/modules/ui/surface.py
|
Python
|
bsd-3-clause
| 94
|
[
"Mayavi"
] |
447bbf4358a564ea5d3e070b6379d77c8b002605c44b2a1492a4d2c43d027388
|
"""
Numerical python functions written for compatibility with MATLAB
commands with the same names. Most numerical python functions can be found in
the `numpy` and `scipy` libraries. What remains here is code for performing
spectral computations.
Spectral functions
-------------------
`cohere`
Coherence (normalized cross spectral density)
`csd`
Cross spectral density using Welch's average periodogram
`detrend`
Remove the mean or best fit line from an array
`psd`
Power spectral density using Welch's average periodogram
`specgram`
Spectrogram (spectrum over segments of time)
`complex_spectrum`
Return the complex-valued frequency spectrum of a signal
`magnitude_spectrum`
Return the magnitude of the frequency spectrum of a signal
`angle_spectrum`
Return the angle (wrapped phase) of the frequency spectrum of a signal
`phase_spectrum`
Return the phase (unwrapped angle) of the frequency spectrum of a signal
`detrend_mean`
Remove the mean from a line.
`detrend_linear`
Remove the best fit line from a line.
`detrend_none`
Return the original line.
`stride_windows`
Get all windows in an array in a memory-efficient manner
`stride_repeat`
Repeat an array in a memory-efficient manner
`apply_window`
Apply a window along a given axis
"""
import functools
from numbers import Number
import numpy as np
import matplotlib.cbook as cbook
from matplotlib import docstring
def window_hanning(x):
"""
Return x times the hanning window of len(x).
See Also
--------
window_none : Another window algorithm.
"""
return np.hanning(len(x))*x
def window_none(x):
"""
No window function; simply return x.
See Also
--------
window_hanning : Another window algorithm.
"""
return x
@cbook.deprecated("3.2")
def apply_window(x, window, axis=0, return_window=None):
"""
Apply the given window to the given 1D or 2D array along the given axis.
Parameters
----------
x : 1D or 2D array or sequence
Array or sequence containing the data.
window : function or array.
Either a function to generate a window or an array with length
*x*.shape[*axis*]
axis : int
The axis over which to do the repetition.
Must be 0 or 1. The default is 0
return_window : bool
If true, also return the 1D values of the window that was applied
"""
x = np.asarray(x)
if x.ndim < 1 or x.ndim > 2:
raise ValueError('only 1D or 2D arrays can be used')
if axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
xshape = list(x.shape)
xshapetarg = xshape.pop(axis)
if np.iterable(window):
if len(window) != xshapetarg:
raise ValueError('The len(window) must be the same as the shape '
'of x for the chosen axis')
windowVals = window
else:
windowVals = window(np.ones(xshapetarg, dtype=x.dtype))
if x.ndim == 1:
if return_window:
return windowVals * x, windowVals
else:
return windowVals * x
xshapeother = xshape.pop()
otheraxis = (axis+1) % 2
windowValsRep = stride_repeat(windowVals, xshapeother, axis=otheraxis)
if return_window:
return windowValsRep * x, windowVals
else:
return windowValsRep * x
def detrend(x, key=None, axis=None):
"""
Return x with its trend removed.
Parameters
----------
x : array or sequence
Array or sequence containing the data.
key : {'default', 'constant', 'mean', 'linear', 'none'} or function
The detrending algorithm to use. 'default', 'mean', and 'constant' are
the same as `detrend_mean`. 'linear' is the same as `detrend_linear`.
'none' is the same as `detrend_none`. The default is 'mean'. See the
corresponding functions for more details regarding the algorithms. Can
also be a function that carries out the detrend operation.
axis : int
The axis along which to do the detrending.
See Also
--------
detrend_mean : Implementation of the 'mean' algorithm.
detrend_linear : Implementation of the 'linear' algorithm.
detrend_none : Implementation of the 'none' algorithm.
"""
if key is None or key in ['constant', 'mean', 'default']:
return detrend(x, key=detrend_mean, axis=axis)
elif key == 'linear':
return detrend(x, key=detrend_linear, axis=axis)
elif key == 'none':
return detrend(x, key=detrend_none, axis=axis)
elif callable(key):
x = np.asarray(x)
if axis is not None and axis + 1 > x.ndim:
raise ValueError(f'axis(={axis}) out of bounds')
if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1):
return key(x)
# try to use the 'axis' argument if the function supports it,
# otherwise use apply_along_axis to do it
try:
return key(x, axis=axis)
except TypeError:
return np.apply_along_axis(key, axis=axis, arr=x)
else:
raise ValueError(
f"Unknown value for key: {key!r}, must be one of: 'default', "
f"'constant', 'mean', 'linear', or a function")
def detrend_mean(x, axis=None):
"""
Return x minus the mean(x).
Parameters
----------
x : array or sequence
Array or sequence containing the data
Can have any dimensionality
axis : int
The axis along which to take the mean. See numpy.mean for a
description of this argument.
See Also
--------
detrend_linear : Another detrend algorithm.
detrend_none : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
return x - x.mean(axis, keepdims=True)
def detrend_none(x, axis=None):
"""
Return x: no detrending.
Parameters
----------
x : any object
An object containing the data
axis : int
This parameter is ignored.
It is included for compatibility with detrend_mean
See Also
--------
detrend_mean : Another detrend algorithm.
detrend_linear : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
return x
def detrend_linear(y):
"""
Return x minus best fit line; 'linear' detrending.
Parameters
----------
y : 0-D or 1-D array or sequence
Array or sequence containing the data
axis : int
The axis along which to take the mean. See numpy.mean for a
description of this argument.
See Also
--------
detrend_mean : Another detrend algorithm.
detrend_none : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
# This is faster than an algorithm based on linalg.lstsq.
y = np.asarray(y)
if y.ndim > 1:
raise ValueError('y cannot have ndim > 1')
# short-circuit 0-D array.
if not y.ndim:
return np.array(0., dtype=y.dtype)
x = np.arange(y.size, dtype=float)
C = np.cov(x, y, bias=1)
b = C[0, 1]/C[0, 0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
def stride_windows(x, n, noverlap=None, axis=0):
"""
Get all windows of x with length n as a single array,
using strides to avoid data duplication.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory,
so modifying one value may change others.
Parameters
----------
x : 1D array or sequence
Array or sequence containing the data.
n : int
The number of data points in each window.
noverlap : int
The overlap between adjacent windows.
Default is 0 (no overlap)
axis : int
The axis along which the windows will run.
References
----------
`stackoverflow: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>`_
`stackoverflow: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>`_
"""
if noverlap is None:
noverlap = 0
if noverlap >= n:
raise ValueError('noverlap must be less than n')
if n < 1:
raise ValueError('n cannot be less than 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1 and noverlap == 0:
if axis == 0:
return x[np.newaxis]
else:
return x[np.newaxis].transpose()
if n > x.size:
raise ValueError('n cannot be greater than the length of x')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. noverlap or n. See #3845.
noverlap = int(noverlap)
n = int(n)
step = n - noverlap
if axis == 0:
shape = (n, (x.shape[-1]-noverlap)//step)
strides = (x.strides[0], step*x.strides[0])
else:
shape = ((x.shape[-1]-noverlap)//step, n)
strides = (step*x.strides[0], x.strides[0])
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
@cbook.deprecated("3.2")
def stride_repeat(x, n, axis=0):
"""
Repeat the values in an array in a memory-efficient manner. Array x is
stacked vertically n times.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory, so
modifying one value may change others.
Parameters
----------
x : 1D array or sequence
Array or sequence containing the data.
n : int
The number of time to repeat the array.
axis : int
The axis along which the data will run.
References
----------
`stackoverflow: Repeat NumPy array without replicating data?
<http://stackoverflow.com/a/5568169>`_
"""
if axis not in [0, 1]:
raise ValueError('axis must be 0 or 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1:
if axis == 0:
return np.atleast_2d(x)
else:
return np.atleast_2d(x).T
if n < 1:
raise ValueError('n cannot be less than 1')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. n. See #3845.
n = int(n)
if axis == 0:
shape = (n, x.size)
strides = (0, x.strides[0])
else:
shape = (x.size, n)
strides = (x.strides[0], 0)
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, mode=None):
"""
Private helper implementing the common parts between the psd, csd,
spectrogram and complex, magnitude, angle, and phase spectrums.
"""
if y is None:
# if y is None use x for y
same_data = True
else:
# The checks for if y is x are so that we can use the same function to
# implement the core of psd(), csd(), and spectrogram() without doing
# extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
if Fs is None:
Fs = 2
if noverlap is None:
noverlap = 0
if detrend_func is None:
detrend_func = detrend_none
if window is None:
window = window_hanning
# if NFFT is set to None use the whole signal
if NFFT is None:
NFFT = 256
if mode is None or mode == 'default':
mode = 'psd'
cbook._check_in_list(
['default', 'psd', 'complex', 'magnitude', 'angle', 'phase'],
mode=mode)
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
# Make sure we're dealing with a numpy array. If y and x were the same
# object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
if sides is None or sides == 'default':
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
cbook._check_in_list(['default', 'onesided', 'twosided'], sides=sides)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x) < NFFT:
n = len(x)
x = np.resize(x, NFFT)
x[n:] = 0
if not same_data and len(y) < NFFT:
n = len(y)
y = np.resize(y, NFFT)
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if mode != 'psd':
scale_by_freq = False
elif scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if sides == 'twosided':
numFreqs = pad_to
if pad_to % 2:
freqcenter = (pad_to - 1)//2 + 1
else:
freqcenter = pad_to//2
scaling_factor = 1.
elif sides == 'onesided':
if pad_to % 2:
numFreqs = (pad_to + 1)//2
else:
numFreqs = pad_to//2 + 1
scaling_factor = 2.
if not np.iterable(window):
window = window(np.ones(NFFT, x.dtype))
if len(window) != NFFT:
raise ValueError(
"The window length must match the data's first dimension")
result = stride_windows(x, NFFT, noverlap, axis=0)
result = detrend(result, detrend_func, axis=0)
result = result * window.reshape((-1, 1))
result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
if not same_data:
# if same_data is False, mode must be 'psd'
resultY = stride_windows(y, NFFT, noverlap)
resultY = detrend(resultY, detrend_func, axis=0)
resultY = resultY * window.reshape((-1, 1))
resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
result = np.conj(result) * resultY
elif mode == 'psd':
result = np.conj(result) * result
elif mode == 'magnitude':
result = np.abs(result) / np.abs(window).sum()
elif mode == 'angle' or mode == 'phase':
# we unwrap the phase later to handle the onesided vs. twosided case
result = np.angle(result)
elif mode == 'complex':
result /= np.abs(window).sum()
if mode == 'psd':
# Also include scaling factors for one-sided densities and dividing by
# the sampling frequency, if desired. Scale everything, except the DC
# component and the NFFT/2 component:
# if we have a even number of frequencies, don't scale NFFT/2
if not NFFT % 2:
slc = slice(1, -1, None)
# if we have an odd number, just don't scale DC
else:
slc = slice(1, None, None)
result[slc] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
result /= Fs
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
result /= (np.abs(window)**2).sum()
else:
# In this case, preserve power in the segment, not amplitude
result /= np.abs(window).sum()**2
t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
if sides == 'twosided':
# center the frequency range at zero
freqs = np.roll(freqs, -freqcenter, axis=0)
result = np.roll(result, -freqcenter, axis=0)
elif not pad_to % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=0)
return result, freqs, t
def _single_spectrum_helper(
mode, x, Fs=None, window=None, pad_to=None, sides=None):
"""
Private helper implementing the commonality between the complex, magnitude,
angle, and phase spectrums.
"""
cbook._check_in_list(['complex', 'magnitude', 'angle', 'phase'], mode=mode)
if pad_to is None:
pad_to = len(x)
spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
detrend_func=detrend_none, window=window,
noverlap=0, pad_to=pad_to,
sides=sides,
scale_by_freq=False,
mode=mode)
if mode != 'complex':
spec = spec.real
if spec.ndim == 2 and spec.shape[1] == 1:
spec = spec[:, 0]
return spec, freqs
# Split out these keyword docs so that they can be used elsewhere
docstring.interpd.update(
Spectral="""\
Fs : float, default: 2
The sampling frequency (samples per time unit). It is used to calculate
the Fourier frequencies, *freqs*, in cycles per time unit.
window : callable or ndarray, default: `.window_hanning`
A function or a vector of length *NFFT*. To create window vectors see
`.window_hanning`, `.window_none`, `numpy.blackman`, `numpy.hamming`,
`numpy.bartlett`, `scipy.signal`, `scipy.signal.get_window`, etc. If a
function is passed as the argument, it must take a data segment as an
argument and return the windowed version of the segment.
sides : {'default', 'onesided', 'twosided'}, optional
Which sides of the spectrum to return. 'default' is one-sided for real
data and two-sided for complex data. 'onesided' forces the return of a
one-sided spectrum, while 'twosided' forces two-sided.""",
Single_Spectrum="""\
pad_to : int, optional
The number of points to which the data segment is padded when performing
the FFT. While not increasing the actual resolution of the spectrum (the
minimum distance between resolvable peaks), this can give more points in
the plot, allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to* equal to
the length of the input signal (i.e. no padding).""",
PSD="""\
pad_to : int, optional
The number of points to which the data segment is padded when performing
the FFT. This can be different from *NFFT*, which specifies the number
of data points used. While not increasing the actual resolution of the
spectrum (the minimum distance between resolvable peaks), this can give
more points in the plot, allowing for more detail. This corresponds to
the *n* parameter in the call to fft(). The default is None, which sets
*pad_to* equal to *NFFT*
NFFT : int, default: 256
The number of data points used in each block for the FFT. A power 2 is
most efficient. This should *NOT* be used to get zero padding, or the
scaling of the result will be incorrect; use *pad_to* for this instead.
detrend : {'none', 'mean', 'linear'} or callable, default 'none'
The function applied to each segment before fft-ing, designed to remove
the mean or linear trend. Unlike in MATLAB, where the *detrend* parameter
is a vector, in Matplotlib is it a function. The :mod:`~matplotlib.mlab`
module defines `.detrend_none`, `.detrend_mean`, and `.detrend_linear`,
but you can use a custom function as well. You can also use a string to
choose one of the functions: 'none' calls `.detrend_none`. 'mean' calls
`.detrend_mean`. 'linear' calls `.detrend_linear`.
scale_by_freq : bool, default: True
Whether the resulting density values should be scaled by the scaling
frequency, which gives density in units of Hz^-1. This allows for
integration over the returned frequency values. The default is True for
MATLAB compatibility.""")
@docstring.dedent_interpd
def psd(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
r"""
Compute the power spectral density.
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : int
The number of points of overlap between segments.
The default value is 0 (no overlap).
Returns
-------
Pxx : 1-D array
The values for the power spectrum :math:`P_{xx}` (real valued)
freqs : 1-D array
The frequencies corresponding to the elements in *Pxx*
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
Wiley & Sons (1986)
See Also
--------
specgram
`specgram` differs in the default overlap; in not returning the mean of
the segment periodograms; and in returning the times of the segments.
magnitude_spectrum : returns the magnitude spectrum.
csd : returns the spectral density between two signals.
"""
Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
return Pxx.real, freqs
@docstring.dedent_interpd
def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the cross-spectral density.
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
Parameters
----------
x, y : 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
noverlap : int
The number of points of overlap between segments.
The default value is 0 (no overlap).
Returns
-------
Pxy : 1-D array
The values for the cross spectrum :math:`P_{xy}` before scaling (real
valued)
freqs : 1-D array
The frequencies corresponding to the elements in *Pxy*
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
Wiley & Sons (1986)
See Also
--------
psd : equivalent to setting ``y = x``.
"""
if NFFT is None:
NFFT = 256
Pxy, freqs, _ = _spectral_helper(x=x, y=y, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
mode='psd')
if Pxy.ndim == 2:
if Pxy.shape[1] > 1:
Pxy = Pxy.mean(axis=1)
else:
Pxy = Pxy[:, 0]
return Pxy, freqs
_single_spectrum_docs = """\
Compute the {quantity} of *x*.
Data is padded to a length of *pad_to* and the windowing function *window* is
applied to the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
{Spectral}
{Single_Spectrum}
Returns
-------
spectrum : 1-D array
The {quantity}.
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*.
See Also
--------
psd
Returns the power spectral density.
complex_spectrum
Returns the complex-valued frequency spectrum.
magnitude_spectrum
Returns the absolute value of the `complex_spectrum`.
angle_spectrum
Returns the angle of the `complex_spectrum`.
phase_spectrum
Returns the phase (unwrapped angle) of the `complex_spectrum`.
specgram
Can return the complex spectrum of segments within the signal.
"""
complex_spectrum = functools.partial(_single_spectrum_helper, "complex")
complex_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="complex-valued frequency spectrum",
**docstring.interpd.params)
magnitude_spectrum = functools.partial(_single_spectrum_helper, "magnitude")
magnitude_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="magnitude (absolute value) of the frequency spectrum",
**docstring.interpd.params)
angle_spectrum = functools.partial(_single_spectrum_helper, "angle")
angle_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="angle of the frequency spectrum (wrapped phase spectrum)",
**docstring.interpd.params)
phase_spectrum = functools.partial(_single_spectrum_helper, "phase")
phase_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="phase of the frequency spectrum (unwrapped phase spectrum)",
**docstring.interpd.params)
@docstring.dedent_interpd
def specgram(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
mode=None):
"""
Compute a spectrogram.
Compute and plot a spectrogram of data in x. Data are split into
NFFT length segments and the spectrum of each section is
computed. The windowing function window is applied to each
segment, and the amount of overlap of each segment is
specified with noverlap.
Parameters
----------
x : array-like
1-D array or sequence.
%(Spectral)s
%(PSD)s
noverlap : int, optional
The number of points of overlap between blocks. The default
value is 128.
mode : str, default: 'psd'
What sort of spectrum to use:
'psd'
Returns the power spectral density.
'complex'
Returns the complex-valued frequency spectrum.
'magnitude'
Returns the magnitude spectrum.
'angle'
Returns the phase spectrum without unwrapping.
'phase'
Returns the phase spectrum with unwrapping.
Returns
-------
spectrum : array-like
2-D array, columns are the periodograms of successive segments.
freqs : array-like
1-D array, frequencies corresponding to the rows in *spectrum*.
t : array-like
1-D array, the times corresponding to midpoints of segments
(i.e the columns in *spectrum*).
See Also
--------
psd : differs in the overlap and in the return values.
complex_spectrum : similar, but with complex valued frequencies.
magnitude_spectrum : similar single segment when mode is 'magnitude'.
angle_spectrum : similar to single segment when mode is 'angle'.
phase_spectrum : similar to single segment when mode is 'phase'.
Notes
-----
detrend and scale_by_freq only apply when *mode* is set to 'psd'.
"""
if noverlap is None:
noverlap = 128 # default in _spectral_helper() is noverlap = 0
if NFFT is None:
NFFT = 256 # same default as in _spectral_helper()
if len(x) <= NFFT:
cbook._warn_external("Only one segment is calculated since parameter "
"NFFT (=%d) >= signal length (=%d)." %
(NFFT, len(x)))
spec, freqs, t = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if mode != 'complex':
spec = spec.real # Needed since helper implements generically
return spec, freqs, t
@docstring.dedent_interpd
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
r"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \frac{|P_{xy}|^2}{P_{xx}P_{yy}}
Parameters
----------
x, y
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : int
The number of points of overlap between blocks. The default value
is 0 (no overlap).
Returns
-------
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect,
since the factors cancel out.
See Also
--------
:func:`psd`, :func:`csd` :
For information about the methods used to compute :math:`P_{xy}`,
:math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x) < 2 * NFFT:
raise ValueError(
"Coherence is calculated by averaging over *NFFT* length "
"segments. Your signal is too short for your choice of *NFFT*.")
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.abs(Pxy) ** 2 / (Pxx * Pyy)
return Cxy, f
class GaussianKDE:
"""
Representation of a kernel-density estimate using Gaussian kernels.
Parameters
----------
dataset : array-like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as only
parameter and return a scalar. If None (default), 'scott' is used.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
dim : int
Number of dimensions.
num_dp : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of *dataset*, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of *covariance*.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
"""
# This implementation with minor modification was too good to pass up.
# from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
def __init__(self, dataset, bw_method=None):
self.dataset = np.atleast_2d(dataset)
if not np.array(self.dataset).size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.dim, self.num_dp = np.array(self.dataset).shape
if bw_method is None:
pass
elif cbook._str_equal(bw_method, 'scott'):
self.covariance_factor = self.scotts_factor
elif cbook._str_equal(bw_method, 'silverman'):
self.covariance_factor = self.silverman_factor
elif isinstance(bw_method, Number):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
raise ValueError("`bw_method` should be 'scott', 'silverman', a "
"scalar or a callable")
# Computes the covariance matrix for each Gaussian kernel using
# covariance_factor().
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self.data_covariance = np.atleast_2d(
np.cov(
self.dataset,
rowvar=1,
bias=False))
self.data_inv_cov = np.linalg.inv(self.data_covariance)
self.covariance = self.data_covariance * self.factor ** 2
self.inv_cov = self.data_inv_cov / self.factor ** 2
self.norm_factor = (np.sqrt(np.linalg.det(2 * np.pi * self.covariance))
* self.num_dp)
def scotts_factor(self):
return np.power(self.num_dp, -1. / (self.dim + 4))
def silverman_factor(self):
return np.power(
self.num_dp * (self.dim + 2.0) / 4.0, -1. / (self.dim + 4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def evaluate(self, points):
"""
Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
(# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different
than the dimensionality of the KDE.
"""
points = np.atleast_2d(points)
dim, num_m = np.array(points).shape
if dim != self.dim:
raise ValueError("points have dimension {}, dataset has dimension "
"{}".format(dim, self.dim))
result = np.zeros(num_m)
if num_m >= self.num_dp:
# there are more points than data, so loop over data
for i in range(self.num_dp):
diff = self.dataset[:, i, np.newaxis] - points
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result = result + np.exp(-energy)
else:
# loop over points
for i in range(num_m):
diff = self.dataset - points[:, i, np.newaxis]
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result[i] = np.sum(np.exp(-energy), axis=0)
result = result / self.norm_factor
return result
__call__ = evaluate
|
iproduct/course-social-robotics
|
11-dnn-keras/venv/Lib/site-packages/matplotlib/mlab.py
|
Python
|
gpl-2.0
| 35,666
|
[
"Gaussian"
] |
ebc929d6c1c6a99c2cf48b4f5495e47fa1ea58df16d6ff423f5702ab05b9a74b
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
from espressomd.interactions import HarmonicBond
@utx.skipIfMissingFeatures("LENNARD_JONES")
class AnalyzeEnergy(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
harmonic = HarmonicBond(r_0=0.0, k=3)
@classmethod
def setUpClass(cls):
box_l = 20
cls.system.box_l = [box_l, box_l, box_l]
cls.system.cell_system.skin = 0.4
cls.system.time_step = 0.01
cls.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1.0, sigma=1.0,
cutoff=2**(1. / 6.), shift="auto")
cls.system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0,
cutoff=2**(1. / 6.), shift="auto")
cls.system.non_bonded_inter[1, 1].lennard_jones.set_params(
epsilon=1.0, sigma=1.0,
cutoff=2**(1. / 6.), shift="auto")
cls.system.thermostat.set_langevin(kT=0., gamma=1., seed=42)
cls.system.bonded_inter.add(cls.harmonic)
def setUp(self):
self.system.part.add(pos=[1, 2, 2], type=0)
self.system.part.add(pos=[5, 2, 2], type=0)
def tearDown(self):
self.system.part.clear()
def test_kinetic(self):
p0, p1 = self.system.part[:]
p0.pos = [1, 2, 2]
p1.pos = [5, 2, 2]
# single moving particle
p0.v = [3, 4, 5]
p1.v = [0, 0, 0]
energy = self.system.analysis.energy()
self.assertAlmostEqual(energy["total"], 25., delta=1e-7)
self.assertAlmostEqual(energy["kinetic"], 25., delta=1e-7)
self.assertAlmostEqual(energy["bonded"], 0., delta=1e-7)
self.assertAlmostEqual(energy["non_bonded"], 0., delta=1e-7)
# two moving particles
p1.v = [3, 4, 5]
energy = self.system.analysis.energy()
self.assertAlmostEqual(energy["total"], 50., delta=1e-7)
self.assertAlmostEqual(energy["kinetic"], 50., delta=1e-7)
self.assertAlmostEqual(energy["bonded"], 0., delta=1e-7)
self.assertAlmostEqual(energy["non_bonded"], 0., delta=1e-7)
def test_non_bonded(self):
p0, p1 = self.system.part[:]
p0.pos = [1, 2, 2]
p1.pos = [2, 2, 2]
energy = self.system.analysis.energy()
self.assertAlmostEqual(energy["total"], 1., delta=1e-5)
self.assertAlmostEqual(energy["kinetic"], 0., delta=1e-7)
self.assertAlmostEqual(energy["bonded"], 0., delta=1e-7)
self.assertAlmostEqual(energy["non_bonded"], 1., delta=1e-7)
# add another pair of particles
self.system.part.add(pos=[3, 2, 2], type=1)
self.system.part.add(pos=[4, 2, 2], type=1)
energy = self.system.analysis.energy()
self.assertAlmostEqual(energy["total"], 3., delta=1e-7)
self.assertAlmostEqual(energy["kinetic"], 0., delta=1e-7)
self.assertAlmostEqual(energy["bonded"], 0., delta=1e-7)
self.assertAlmostEqual(energy["non_bonded"], 3., delta=1e-7)
self.assertAlmostEqual(energy["non_bonded", 0, 1], 1., delta=1e-7)
self.assertAlmostEqual(energy["non_bonded", 0, 0]
+ energy["non_bonded", 0, 1]
+ energy["non_bonded", 1, 1], energy["total"], delta=1e-7)
def test_bonded(self):
p0, p1 = self.system.part[:]
p0.pos = [1, 2, 2]
p1.pos = [3, 2, 2]
# single bond
p0.add_bond((self.harmonic, p1))
energy = self.system.analysis.energy()
self.assertAlmostEqual(energy["total"], 6, delta=1e-7)
self.assertAlmostEqual(energy["kinetic"], 0., delta=1e-7)
self.assertAlmostEqual(energy["bonded"], 6, delta=1e-7)
self.assertAlmostEqual(energy["non_bonded"], 0., delta=1e-7)
# two bonds
p1.add_bond((self.harmonic, p0))
energy = self.system.analysis.energy()
self.assertAlmostEqual(energy["total"], 12, delta=1e-7)
self.assertAlmostEqual(energy["kinetic"], 0., delta=1e-7)
self.assertAlmostEqual(energy["bonded"], 12, delta=1e-7)
self.assertAlmostEqual(energy["non_bonded"], 0., delta=1e-7)
# bonds deleted
p0.delete_all_bonds()
p1.delete_all_bonds()
energy = self.system.analysis.energy()
self.assertAlmostEqual(energy["total"], 0., delta=1e-7)
self.assertAlmostEqual(energy["kinetic"], 0., delta=1e-7)
self.assertAlmostEqual(energy["bonded"], 0, delta=1e-7)
self.assertAlmostEqual(energy["non_bonded"], 0., delta=1e-7)
def test_all(self):
p0, p1 = self.system.part[:]
p0.pos = [1, 2, 2]
p1.pos = [2, 2, 2]
p0.v = [3, 4, 5]
p1.v = [3, 4, 5]
# single bond
p0.add_bond((self.harmonic, p1))
energy = self.system.analysis.energy()
self.assertAlmostEqual(energy["total"], 50. + 3. / 2. + 1., delta=1e-7)
self.assertAlmostEqual(energy["kinetic"], 50., delta=1e-7)
self.assertAlmostEqual(energy["bonded"], 3. / 2., delta=1e-7)
self.assertAlmostEqual(energy["non_bonded"], 1., delta=1e-7)
# two bonds
p1.add_bond((self.harmonic, p0))
energy = self.system.analysis.energy()
self.assertAlmostEqual(energy["total"], 50. + 3 + 1., delta=1e-7)
self.assertAlmostEqual(energy["kinetic"], 50., delta=1e-7)
self.assertAlmostEqual(energy["bonded"], 3., delta=1e-7)
self.assertAlmostEqual(energy["non_bonded"], 1., delta=1e-7)
# add another pair of particles
self.system.part.add(pos=[1, 5, 5], type=1)
self.system.part.add(pos=[2, 5, 5], type=1)
energy = self.system.analysis.energy()
self.assertAlmostEqual(
energy["total"], 50. + 3 + (1. + 1.), delta=1e-7)
self.assertAlmostEqual(energy["kinetic"], 50., delta=1e-7)
self.assertAlmostEqual(energy["bonded"], 3., delta=1e-7)
self.assertAlmostEqual(energy["non_bonded"], 1. + 1., delta=1e-7)
@utx.skipIfMissingFeatures(["ELECTROSTATICS", "P3M"])
def test_electrostatics(self):
from espressomd import electrostatics
p0, p1 = self.system.part[:]
p0.pos = [1, 2, 2]
p1.pos = [3, 2, 2]
p0.q = 1
p1.q = -1
p3m = electrostatics.P3M(prefactor=1.0,
accuracy=9.910945054074526e-08,
mesh=[22, 22, 22],
cao=7,
r_cut=8.906249999999998,
alpha=0.387611049779351,
tune=False)
self.system.actors.add(p3m)
# did not verify if this is correct, but looks pretty good (close to
# 1/2)
u_p3m = -0.501062398379
energy = self.system.analysis.energy()
self.assertAlmostEqual(energy["total"], u_p3m, delta=1e-5)
self.assertAlmostEqual(energy["kinetic"], 0., delta=1e-7)
self.assertAlmostEqual(energy["bonded"], 0., delta=1e-7)
self.assertAlmostEqual(energy["non_bonded"], 0, delta=1e-7)
self.assertAlmostEqual(energy["coulomb"], u_p3m, delta=1e-5)
if __name__ == "__main__":
ut.main()
|
fweik/espresso
|
testsuite/python/analyze_energy.py
|
Python
|
gpl-3.0
| 7,925
|
[
"ESPResSo"
] |
ee79f8626ee6bb05ccb180812eeaa7ae6f664f199b2cdffbb584e2c8e0b5bb0d
|
"""
This file tests electric_field_value and induction_operator
agains the canonical electric_field integral evaluation
"""
import numpy as np
import pytest
import psi4
pytestmark = pytest.mark.quick
def test_elec_fields():
mol = psi4.geometry("""
units bohr
0 1
O1 0.000000000000 0.000000000000 0.224348285559
H2 -1.423528800232 0.000000000000 -0.897393142237
H3 1.423528800232 0.000000000000 -0.897393142237
symmetry c1
no_com
no_reorient
""")
basis_obj = psi4.core.BasisSet.build(mol, 'ORBITAL', "cc-pvdz")
mints = psi4.core.MintsHelper(basis_obj)
# generate random points and dipole moments
coords = 5 * np.random.rand(50, 3)
moments = 0.1 * np.random.rand(50, 3)
# run the new implementation
coordinates = psi4.core.Matrix.from_array(coords)
dips = psi4.core.Matrix.from_array(moments)
ret = mints.induction_operator(coordinates, dips).np
# old implementation (used for EFP, for example)
nbf = mints.basisset().nbf()
V2 = np.zeros((nbf, nbf))
field_ints = np.zeros((3, nbf, nbf))
for c, m in zip(coords, moments):
# get electric field integrals from Psi4
p4_field_ints = mints.electric_field(origin=c)
for pole in range(3):
field_ints[pole] = np.asarray(p4_field_ints[pole])
# scale field integrals by induced dipole magnitudes.
for pole in range(3):
field_ints[pole] *= -m[pole]
V2 += field_ints[pole]
np.testing.assert_allclose(V2, ret)
# electric field expectation values
nbf = basis_obj.nbf()
mock_dmat = psi4.core.Matrix.from_array(np.random.rand(nbf, nbf))
field_val = mints.electric_field_value(coordinates, mock_dmat).np
# Electric field at points
points = coords
npt = len(points)
field_ref = np.zeros((npt, 3))
for ipt in range(npt):
p4_field_ints = mints.electric_field(origin=points[ipt])
field_ref[ipt] = [
np.vdot(mock_dmat.np, np.asarray(p4_field_ints[0])), # Ex
np.vdot(mock_dmat.np, np.asarray(p4_field_ints[1])), # Ey
np.vdot(mock_dmat.np, np.asarray(p4_field_ints[2])) # Ez
]
np.testing.assert_allclose(field_ref, field_val)
|
CDSherrill/psi4
|
tests/pytests/test_elec_fields.py
|
Python
|
lgpl-3.0
| 2,272
|
[
"Psi4"
] |
96d66e8f3982b74957185b25f6756f9ae69fb4bdcafc72ccfa51e78884b00209
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------
# Filename: multitaper.py
# Purpose: Python wrapper for multitaper `mtspec` f90 library of
# German A. Prieto
# Author: Moritz Beyreuther, Lion Krischer
# Email: beyreuth@geophysik.uni-muenchen.de
# License: GPLv2
#
# Copyright (C) 2008-2010 Moritz Beyreuther, Lion Krischer
#---------------------------------------------------------------------
"""
Python Wrapper for multitaper `mtspec` f90 library of German A. Prieto.
"""
import ctypes as C
from mtspec.util import mtspeclib
import numpy as np
def mtspec(data, delta, time_bandwidth, nfft=None, number_of_tapers=None,
quadratic=False, adaptive=True, verbose=False,
optional_output=False, statistics=False, rshape=False,
fcrit=False):
"""
Wrapper method for the mtspec subroutine in the library by German A.
Prieto.
This method estimates the adaptive weighted multitaper spectrum, as in
Thomson 1982. This is done by estimating the DPSS (discrete prolate
spheroidal sequences), multiplying each of the tapers with the data series,
take the FFT, and using the adaptive scheme for a better estimation.
:param data: :class:`numpy.ndarray`
Array with the data.
:param delta: float
Sample spacing of the data.
:param time_bandwidth: float
Time-bandwidth product. Common values are 2, 3, 4 and numbers in
between.
:param nfft: int
Number of points for fft. If nfft == None, no zero padding
will be applied before the fft
:param number_of_tapers: integer, optional
Number of tapers to use. Defaults to int(2*time_bandwidth) - 1. This
is maximum senseful amount. More tapers will have no great influence
on the final spectrum but increase the calculation time. Use fewer
tapers for a faster calculation.
:param quadratic: bool, optional
Whether or not to caluclate a quadratic multitaper. Will only work
if nfft is False or equal to the sample count. The nfft parameter
will overwrite the quadratic paramter. Defaults to False.
:param adaptive: bool, optional
Whether to use adaptive or constant weighting of the eigenspectra.
Defaults to True(adaptive).
:param verbose: bool, optional
Passed to the fortran library. Defaults to False.
:param optional_output: bool, optional
Calculates and returns additional output parameters. See the notes in
the docstring for further details.
:param statistics: bool, optional
Calculates and returns statistics. See the notes in the docstring for
further details.
:param rshape: integer/None, optional
Determines whether or not to perform the F-test for lines. If rshape
is 1 or 2, then don't put the lines back. If rshape is 2 only check
around 60 Hz. See the fortran source code for more informations.
Defaults to None (do not perform the F-test).
:param fcrit: float/None, optional
The threshold probability for the F-test. If none is given, the mtspec
library calculates a default value. See the fortran source code for
details. Defaults to None.
:return: Returns a list with :class:`numpy.ndarray`. See the note
below.
.. note::
This method will at return at least two arrays: The calculated spectrum
and the corresponding frequencies. If optional_output is true it will
also return (in the given order) (multidimensional) arrays containing
the eigenspectra, the corresponding eigencoefficients and an array
containing the weights for each eigenspectra normalized so that the sum
of squares over the eigenspectra is one. If statistics is True is will
also return (in the given order) (multidimensional) arrays containing
the jackknife 5% and 95% confidence intervals, the F statistics for
single line and the number of degrees of freedom for each frequency
bin. If both optional_output and statistics are true, the
optional_outputs will be returned before the statistics.
"""
npts = len(data)
# Depending if nfft is specified or not initialte MtspecTytpe
# for mtspec_pad_ or mtspec_d_
if nfft is None or nfft == npts:
nfft = npts
mt = _MtspecType("float64") # mtspec_d_
else:
mt = _MtspecType("float32") # mtspec_pad_
quadratic = False
# Use the optimal number of tapers in case no number is specified.
if number_of_tapers is None:
number_of_tapers = int(2 * time_bandwidth) - 1
# Transform the data to work with the library.
data = np.require(data, mt.float, mt.required)
# Get some information necessary for the call to the Fortran library.
number_of_frequency_bins = int(nfft / 2) + 1
# Create output arrays.
spectrum = mt.empty(number_of_frequency_bins)
frequency_bins = mt.empty(number_of_frequency_bins)
# Create optional outputs.
if optional_output is True:
eigenspectra = mt.empty((number_of_frequency_bins, number_of_tapers))
eigencoefficients = mt.empty((nfft, number_of_tapers), complex=True)
weights = mt.empty((number_of_frequency_bins, number_of_tapers))
else:
eigenspectra = eigencoefficients = weights = None
# Create statistics.
if statistics is True:
jackknife_interval = mt.empty((number_of_frequency_bins, 2))
f_statistics = mt.empty(number_of_frequency_bins)
degrees_of_freedom = mt.empty(number_of_frequency_bins)
else:
jackknife_interval = f_statistics = degrees_of_freedom = None
# Verbose mode on or off.
if verbose is True:
verbose = C.byref(C.c_char('y'))
else:
verbose = None
# Determine whether or not to compute the quadratic multitaper.
if quadratic is True:
quadratic = C.byref(C.c_int(1))
else:
quadratic = None
# Determine whether to use adaptive or constant weighting of the
# eigenspectra.
if adaptive is True:
adaptive = None
else:
adaptive = C.byref(C.c_int(1))
# Determines whether or not to perform the F-test for lines. If rshape is 1
# or 2, then don't put the lines back. If rshape is 2 only check around 60
# Hz. See the fortran source code for more informations.
if type(rshape) == int:
rshape = C.byref(C.c_int(rshape))
else:
rshape = None
# The threshold probability for the F-test. If none is given, the mtspec
# library calculates a default value. See the fortran source code for
# details.
if type(fcrit) == float:
fcrit = C.byref(C.c_float(fcrit))
else:
fcrit = None
# Call the library. Fortran passes pointers!
args = [C.byref(C.c_int(npts)), C.byref(C.c_int(nfft)),
C.byref(mt.c_float(delta)), mt.p(data),
C.byref(mt.c_float(time_bandwidth)),
C.byref(C.c_int(number_of_tapers)),
C.byref(C.c_int(number_of_frequency_bins)), mt.p(frequency_bins),
mt.p(spectrum), verbose, quadratic, adaptive,
mt.p(eigencoefficients), mt.p(weights),
mt.p(jackknife_interval), mt.p(degrees_of_freedom),
mt.p(eigenspectra), rshape, mt.p(f_statistics), fcrit, None]
# diffrent arguments, depending on mtspec_pad_ or mtspec_d_, adapt
if npts == nfft:
args.pop(1)
# finally call the shared library function
mt.mtspec(*args)
# Figure out what to return. See the docstring of this method for details.
return_values = [spectrum, frequency_bins]
if optional_output is True:
return_values.extend([eigenspectra, eigencoefficients, weights])
if statistics is True:
return_values.extend([jackknife_interval, f_statistics,
degrees_of_freedom])
return return_values
def sine_psd(data, delta, number_of_tapers=None, number_of_iterations=2,
degree_of_smoothing=1.0, statistics=False, verbose=False):
"""
Wrapper method for the sine_psd subroutine in the library by German A.
Prieto.
The subroutine is in charge of estimating the adaptive sine multitaper as
in Riedel and Sidorenko (1995).
This is done by performing a MSE adaptive estimation. First a pilot
spectral estimate is used, and S" is estimated, in order to get te number
of tapers to use, using (13) of Riedel and Sidorenko for a min square error
spectrum.
Unlike the prolate spheroidal multitapers, the sine multitaper adaptive
process introduces a variable resolution and error in the frequency domain.
Complete error information is contained in the output variables as the
corridor of 1-standard-deviation errors, and in the number of tapers used
at each frequency. The errors are estimated in the simplest way, from the
number of degrees of freedom (two per taper), not by jack-knifing. The
frequency resolution is found from K*fN/Nf where fN is the Nyquist
frequency and Nf is the number of frequencies estimated. The adaptive
process used is as follows. A quadratic fit to the log PSD within an
adaptively determined frequency band is used to find an estimate of the
local second derivative of the spectrum. This is used in an equation like R
& S (13) for the MSE taper number, with the difference that a parabolic
weighting is applied with increasing taper order. Because the FFTs of the
tapered series can be found by resampling the FFT of the original time
series (doubled in length and padded with zeros) only one FFT is required
per series, no matter how many tapers are used. This makes the program
fast. Compared with the Thomson multitaper programs, this code is not only
fast but simple and short. The spectra associated with the sine tapers are
weighted before averaging with a parabolically varying weight. The
expression for the optimal number of tapers given by R & S must be modified
since it gives an unbounded result near points where S" vanishes, which
happens at many points in most spectra. This program restricts the rate of
growth of the number of tapers so that a neighboring covering interval
estimate is never completely contained in the next such interval.
This method SHOULD not be used for sharp cutoffs or deep valleys, or small
sample sizes. Instead use Thomson multitaper in mtspec in this same
library.
:param data: :class:`numpy.ndarray`
Array with the data.
:param delta: float
Sample spacing of the data.
:param number_of_tapers: integer/None, optional
Number of tapers to use. If none is given, the library will perform an
adaptive taper estimation with a varying number of tapers for each
frequency. Defaults to None.
:param number_of_iterations: integer, optional
Number of iterations to perform. Values less than 2 will be set to 2.
Defaults to 2.
:param degree_of_smoothing: float, optional
Degree of smoothing. Defaults to 1.0.
:param statistics: bool, optional
Calculates and returns statistics. See the notes in the docstring for
further details.
:param verbose: bool, optional
Passed to the fortran library. Defaults to False.
:return: Returns a list with :class:`numpy.ndarray`. See the note below
for details.
.. note::
This method will at return at least two arrays: The calculated
spectrum and the corresponding frequencies. If statistics is True
is will also return (in the given order) (multidimensional) arrays
containing the 1-std errors (a simple dof estimate) and the number
of tapers used for each frequency point.
"""
# Verbose mode on or off.
if verbose is True:
verbose = C.byref(C.c_char('y'))
else:
verbose = None
# Set the number of tapers so it can be read by the library.
if number_of_tapers is None:
number_of_tapers = 0
# initialize _MtspecType to save some space
mt = _MtspecType("float32")
# Transform the data to work with the library.
data = np.require(data, mt.float, mt.required)
# Some variables necessary to call the library.
npts = len(data)
number_of_frequency_bins = int(npts / 2) + 1
# Create output arrays.
frequency_bins = mt.empty(number_of_frequency_bins)
spectrum = mt.empty(number_of_frequency_bins)
# Create optional arrays or set to None.
if statistics is True:
# here an exception, mt sets the type float32, here we need int32
# that is do all the type and POINTER definition once by hand
tapers_per_freq_point = np.empty(number_of_frequency_bins,
'int32', mt.required)
tapers_per_freq_point_p = \
tapers_per_freq_point.ctypes.data_as(C.POINTER(C.c_int))
errors = mt.empty((number_of_frequency_bins, 2))
else:
tapers_per_freq_point_p = errors = None
# Call the library. Fortran passes pointers!
mtspeclib.sine_psd_(C.byref(C.c_int(npts)),
C.byref(C.c_float(delta)), mt.p(data),
C.byref(C.c_int(number_of_tapers)),
C.byref(C.c_int(number_of_iterations)),
C.byref(C.c_float(degree_of_smoothing)),
C.byref(C.c_int(number_of_frequency_bins)),
mt.p(frequency_bins), mt.p(spectrum),
tapers_per_freq_point_p, mt.p(errors), verbose)
# Calculate return values.
return_values = [spectrum, frequency_bins]
if statistics is True:
return_values.extend([errors, tapers_per_freq_point])
return return_values
def dpss(npts, fw, nev, auto_spline=True, nmax=None):
"""
Wrapper method for the dpss subroutine in the library by German A.
Prieto.
Calculation of the Discrete Prolate Spheroidal Sequences also knows as the
slepian sequences, and the correspondent eigenvalues. Also, the (1 -
eigenvalue) terms are calculated.
By default this routine will use spline interpolation if sequences with
more than 200000 samples are requested.
:param npts: The number of points in the series
:param fw: the time-bandwidth product (number of Rayleigh bins)
:param nev: the desired number of tapers
:param auto_spline: Whether or not to automatically use spline
interpolation with npts > 200000. Defaults to True.
:param nmax: The number of actual points to calculate the dpss. If this
number is smaller than npts spline interpolation will be performed,
regardless of auto_spline.
:return: (v, lamb, theta) with v(npts,nev) the eigenvectors (tapers)
lamb the eigenvalues of the v's and theta the 1-lambda (energy
outside the bandwidth) values.
.. note::
The tapers are the eigenvectors of the tridiagonal matrix sigma(i,j)
[see Slepian(1978) eq 14 and 25.] They are also the eigenvectors of
the Toeplitz matrix eq. 18. We solve the tridiagonal system in
tridib and tinvit for the tapers and use them in the integral
equation in the frequency domain (dpss_ev subroutine) to get the
eigenvalues more accurately, by performing Chebychev Gaussian
Quadrature following Thomson's codes.
"""
mt = _MtspecType("float64")
v = mt.empty((npts, nev))
lamb = mt.empty(nev)
theta = mt.empty(nev)
# Set auto_spline to True.
if nmax and nmax < npts:
auto_spline = True
# Always set nmax.
else:
nmax = 200000
# Call either the spline routine or the normal routine.
if auto_spline is True and npts > nmax:
mtspeclib.dpss_spline_(C.byref(C.c_int(nmax)), C.byref(C.c_int(npts)),
C.byref(C.c_double(fw)), C.byref(C.c_int(nev)),
mt.p(v), mt.p(lamb), mt.p(theta))
else:
mtspeclib.dpss_(C.byref(C.c_int(npts)), C.byref(C.c_double(fw)),
C.byref(C.c_int(nev)), mt.p(v), mt.p(lamb),
mt.p(theta))
return (v, lamb, theta)
def wigner_ville_spectrum(data, delta, time_bandwidth=3.5,
number_of_tapers=None, smoothing_filter=None,
filter_width=100, frequency_divider=1,
verbose=False):
"""
Wrapper method of the modified wv_spec (wv_spec_to_array) subroutine in
the library of German A. Prieto.
It is very slow for large arrays so try with a small one (< 5000 samples)
first, or adapt frac respectively.
:param data: numpy.ndarray;
The input signal
:param delta: integer;
The input sampling interval
:param time_bandwidth: float;
Time bandwith product
:param number_of_tapers: int;
Number of tapers to use. If None the number will be automatically
determined
:param smoothing_filter: string;
One of 'boxcar', 'gauss' or just None
:param filter_width: int;
Filter width in samples
:param frequency_divider: int,
This method will always calculate all frequencies from 0..nyquist_freq.
This parameter allows the adjustment of the maximum frequency, so that
the frequencies range from 0..nyquist_freq/int(frequency_divider).
Defaults to 1.
:param verbose: bool;
If True turn on verbose output
"""
data = np.require(data, 'float32')
mt = _MtspecType("float32")
npts = len(data)
# Use the optimal number of tapers in case no number is specified.
if number_of_tapers is None:
number_of_tapers = int(2 * time_bandwidth) - 1
# Determine filter.
if not smoothing_filter:
smoothing_filter = 0
elif smoothing_filter == 'boxcar':
smoothing_filter = 1
elif smoothing_filter == 'gauss':
smoothing_filter = 2
else:
msg = 'Invalid value for smoothing filter.'
raise Exception(msg)
# Verbose mode on or off.
if verbose:
verbose = C.byref(C.c_char('y'))
else:
verbose = None
# Allocate the output array
# f90 code internally pads zeros to 2*npts. That is we only return
# every second frequency point, thus decrease the size of the array
output = mt.empty((npts//2//int(frequency_divider)+1, npts))
mtspeclib.wv_spec_to_array_(C.byref(C.c_int(npts)),
C.byref(C.c_float(delta)),
mt.p(data), mt.p(output),
C.byref(C.c_float(time_bandwidth)),
C.byref(C.c_int(number_of_tapers)),
C.byref(C.c_int(smoothing_filter)),
C.byref(C.c_float(filter_width)),
C.byref(C.c_int(frequency_divider)), verbose)
return output
def mt_coherence(df, xi, xj, tbp, kspec, nf, p, **kwargs):
"""
Construct the coherence spectrum from the yk's and the
weights of the usual multitaper spectrum estimation.
Note this code uses the real(4) multitaper code.
INPUT
:param df: float; sampling rate of time series
:param xi: numpy.ndarray; data for first series
:param xj: numpy.ndarray; data for second series
:param tbp: float; the time-bandwidth product
:param kspec: integer; number of tapers to use
:param nf: integer; number of freq points in spectrum
:param p: float; confidence for null hypothesis test, e.g. .95
OPTIONAL INPUT
:param iadapt: integer 0 - adaptive, 1 - constant weights
default adapt = 1
OPTIONAL OUTPUTS, the outputs are returned as dictionary, with keys as
specified below and values as numpy.ndarrays. In order to activate the
output set the corresponding kwarg in the argument list, e.g.
``mt_coherence(df, xi, xj, tbp, kspec, nf, p, freq=True, cohe=True)``
:param freq: the frequency bins
:param cohe: coherence of the two series (0 - 1)
:param phase: the phase at each frequency
:param speci: spectrum of first series
:param specj: spectrum of second series
:param conf: p confidence value for each freq.
:param cohe_ci: 95% bounds on coherence (not larger than 1)
:param phase_ci: 95% bounds on phase estimates
If confidence intervals are requested, then both phase and
cohe variables need to be requested as well.
"""
npts = len(xi)
if len(xj) != npts:
raise Exception("Inpurt ndarrays have mismatching length")
mt = _MtspecType('float32')
# convert type of input arguments if necessary
xi = np.require(xi, mt.float, mt.required)
xj = np.require(xj, mt.float, mt.required)
# fill up optional arguments, if not given set them None
args = []
for key in ('freq', 'cohe', 'phase', 'speci', 'specj', 'conf',
'cohe_ci' , 'phase_ci', 'iadapt'):
kwargs.setdefault(key, None)
if key in ('cohe_ci', 'phase_ci') and kwargs[key]:
kwargs[key] = mt.empty(nf,2)
args.append(mt.p(kwargs[key]))
elif key == 'iadapt' and kwargs[key]:
args.append(C.byref(C.c_int(kwargs[key])))
elif kwargs[key]:
kwargs[key] = mt.empty(nf)
args.append(mt.p(kwargs[key]))
else:
args.append(kwargs[key])
mtspeclib.mt_cohe_(C.byref(C.c_int(npts)), C.byref(C.c_float(df)),
mt.p(xi), mt.p(xj), C.byref(C.c_float(tbp)),
C.byref(C.c_int(kspec)), C.byref(C.c_int(nf)),
C.byref(C.c_float(p)), *args)
# remove None values from dictionary
return dict([(k, v) for k, v in kwargs.iteritems() if v is not None])
class _MtspecType(object):
"""
Simple class that stores type definition for interfacing with
the fortran code and provides some helper functions.
"""
struct = {"float32": (C.c_float, mtspeclib.mtspec_pad_),
"float64": (C.c_double, mtspeclib.mtspec_d_)}
def __init__(self, dtype):
"""
Depending on dtype initialize different type structures.
:param dtype: 'float32' or 'float64'
"""
if dtype not in self.struct.keys():
raise ValueError("dtype must be either 'float32' or 'float64'")
self.float = dtype
self.complex = 'complex%d' % (2 * float(dtype[-2:]))
self.c_float = self.struct[dtype][0]
self.pointer = C.POINTER(self.c_float)
self.required = ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE']
self.mtspec = self.struct[dtype][1]
def empty(self, shape, complex=False):
"""
A wrapper around np.empty which automatically sets the correct type
and returns an empty array.
:param shape: The shape of the array in np.empty format
"""
if complex:
return np.empty(shape, self.complex, self.required)
return np.empty(shape, self.float, self.required)
def p(self, ndarray):
"""
A wrapper around ctypes.data_as which automatically sets the
correct type. Returns none if ndarray is None.
:param ndarray: numpy input array or None
"""
# short variable name for passing as argument in function calls
if ndarray is None:
return None
return ndarray.ctypes.data_as(self.pointer)
|
luca-penasa/mtspec-python3
|
mtspec/multitaper.py
|
Python
|
gpl-2.0
| 23,632
|
[
"Gaussian"
] |
aa49c012e1e9c7244e2d3192e2b4115f738d4354b802afa693e5144e2e107202
|
# Copyright (c) Mathias Kaerlev 2013-2017.
#
# This file is part of cuwo.
#
# cuwo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cuwo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cuwo. If not, see <http://www.gnu.org/licenses/>.
"""
Constant string definitions
NOTE: This file is automatically generated. Do not modify.
"""
SOUND_NAMES = {
0: 'hit',
1: 'blade1',
2: 'blade2',
3: 'long-blade1',
4: 'long-blade2',
5: 'hit1',
6: 'hit2',
7: 'punch1',
8: 'punch2',
9: 'hit-arrow',
10: 'hit-arrow-critical',
11: 'smash1',
12: 'slam-ground',
13: 'smash-hit2',
14: 'smash-jump',
15: 'swing',
16: 'shield-swing',
17: 'swing-slow',
18: 'swing-slow2',
19: 'arrow-destroy',
20: 'blade1',
21: 'punch2',
22: 'salvo2',
23: 'sword-hit03',
24: 'block',
25: 'shield-slam',
26: 'roll',
27: 'destroy2',
28: 'cry',
29: 'levelup2',
30: 'missioncomplete',
31: 'water-splash01',
32: 'step2',
33: 'step-water',
34: 'step-water2',
35: 'step-water3',
36: 'channel2',
37: 'channel-hit',
38: 'fireball',
39: 'fire-hit',
40: 'magic02',
41: 'watersplash',
42: 'watersplash-hit',
43: 'lich-scream',
44: 'drink2',
45: 'pickup',
46: 'disenchant2',
47: 'upgrade2',
48: 'swirl',
49: 'human-voice01',
50: 'human-voice02',
51: 'gate',
52: 'spike-trap',
53: 'fire-trap',
54: 'lever',
55: 'charge2',
56: 'magic02',
57: 'drop',
58: 'drop-coin',
59: 'drop-item',
60: 'male-groan',
61: 'female-groan',
62: 'male-groan',
63: 'female-groan',
64: 'goblin-male-groan',
65: 'goblin-female-groan',
66: 'lizard-male-groan',
67: 'lizard-female-groan',
68: 'dwarf-male-groan',
69: 'dwarf-female-groan',
70: 'orc-male-groan',
71: 'orc-female-groan',
72: 'undead-male-groan',
73: 'undead-female-groan',
74: 'frogman-male-groan',
75: 'frogman-female-groan',
76: 'monster-groan',
77: 'troll-groan',
78: 'mole-groan',
79: 'slime-groan',
80: 'zombie-groan',
81: 'Explosion',
82: 'punch2',
83: 'menu-open2',
84: 'menu-close2',
85: 'menu-select',
86: 'menu-tab',
87: 'menu-grab-item',
88: 'menu-drop-item',
89: 'craft',
90: 'craft-proc',
91: 'absorb',
92: 'manashield',
93: 'bulwark',
94: 'bird1',
95: 'bird2',
96: 'bird3',
97: 'cricket1',
98: 'cricket2',
99: 'owl1',
100: 'owl2'
}
SOUND_IDS = {v: k for k, v in SOUND_NAMES.items()}
MODEL_NAMES = {
-1: None,
0: 'body2',
1: 'body4',
2: 'body3',
3: 'wizard-head',
4: 'wizard-body',
5: 'witch-head',
6: 'witch-body',
7: 'glove',
8: 'head2',
9: 'girl-head3',
10: 'elf-head-female',
11: 'old-head',
12: 'gnoll-head',
13: 'gnoll-body',
14: 'gnoll-hand',
15: 'gnoll-foot',
16: 'polar-gnoll-head',
17: 'polar-gnoll-body',
18: 'polar-gnoll-hand',
19: 'polar-gnoll-foot',
20: 'monkey-head',
21: 'monkey-body',
22: 'monkey-hand',
23: 'monkey-foot',
24: 'troll-head',
25: 'troll-body',
26: 'troll-arm',
27: 'troll-hand',
28: 'troll-foot',
29: 'dark-troll-eyes',
30: 'dark-troll-head',
31: 'dark-troll-body',
32: 'dark-troll-arm',
33: 'dark-troll-hand',
34: 'dark-troll-foot',
35: 'hell-demon-head',
36: 'hell-demon-body',
37: 'hell-demon-arm',
38: 'hell-demon-hand',
39: 'hell-demon-foot',
40: 'golem-head',
41: 'golem-body',
42: 'golem-arm',
43: 'golem-hand',
44: 'golem-foot',
45: 'golem-ember-head',
46: 'golem-ember-body',
47: 'golem-ember-arm',
48: 'golem-ember-hand',
49: 'golem-ember-foot',
50: 'golem-snow-head',
51: 'golem-snow-body',
52: 'golem-snow-arm',
53: 'golem-snow-hand',
54: 'golem-snow-foot',
55: 'yeti-head',
56: 'yeti-body',
57: 'yeti-arm',
58: 'yeti-hand',
59: 'yeti-foot',
60: 'ogre-head',
61: 'ogre-body',
62: 'ogre-hand',
63: 'ogre-foot',
64: 'rockling-head',
65: 'rockling-hand',
66: 'rockling-foot',
67: 'cyclops-head',
68: 'cyclops-body',
69: 'cyclops-arm',
70: 'cyclops-hand',
71: 'cyclops-foot',
72: 'mammoth-head',
73: 'mammoth-body',
74: 'mammoth-foot',
75: 'goblin-head-m01',
76: 'goblin-head-m02',
77: 'goblin-head-m03',
78: 'goblin-head-m04',
79: 'goblin-head-m05',
80: 'goblin-hair-m01',
81: 'goblin-hair-m02',
82: 'goblin-hair-m03',
83: 'goblin-hair-m04',
84: 'goblin-hair-m05',
85: 'goblin-hair-m06',
86: 'goblin-head-f01',
87: 'goblin-head-f02',
88: 'goblin-head-f03',
89: 'goblin-head-f04',
90: 'goblin-head-f05',
91: 'goblin-hair-f01',
92: 'goblin-hair-f02',
93: 'goblin-hair-f03',
94: 'goblin-hair-f04',
95: 'goblin-hair-f05',
96: 'goblin-hair-f06',
97: 'goblin-hand',
98: 'lizard-head-m01',
99: 'lizard-head-m02',
100: 'lizard-hair-m01',
101: 'lizard-hair-m02',
102: 'lizard-hair-m03',
103: 'lizard-hair-m04',
104: 'lizard-hair-m05',
105: 'lizard-hair-m06',
106: 'lizard-head-f01',
107: 'lizard-head-f02',
108: 'lizard-head-f03',
109: 'lizard-head-f04',
110: 'lizard-head-f05',
111: 'lizard-hand',
112: 'lizard-body',
113: 'lizard-foot',
114: 'frog-body',
115: 'frog-head',
116: 'frog-foot',
117: 'frog-hand',
118: 'plant-creature-body',
119: 'plant-creature-head',
120: 'plant-creature-foot',
121: 'plant-creature-hand',
122: 'radish-creature-head',
123: 'radish-creature-foot',
124: 'radish-creature-hand',
125: 'onionling-head',
126: 'onionling-foot',
127: 'desert-onionling',
128: 'desert-onionling-foot',
129: 'devourer-head',
130: 'devourer-foot',
131: 'mole-body',
132: 'mole-head',
133: 'mole-foot',
134: 'mole-hand',
135: 'biter-body',
136: 'biter-head',
137: 'biter-foot',
138: 'biter-hand',
139: 'koala-body',
140: 'koala-head',
141: 'koala-foot',
142: 'koala-hand',
143: 'squirrel-body',
144: 'squirrel-head',
145: 'squirrel-foot',
146: 'squirrel-hand',
147: 'raccoon-body',
148: 'raccoon-head',
149: 'raccoon-foot',
150: 'raccoon-hand',
151: 'owl-head',
152: 'owl-foot',
153: 'owl-hand',
154: 'bunny-body',
155: 'bunny-foot',
156: 'porcupine-body',
157: 'porcupine-foot',
158: 'squid-body',
159: 'squid-foot',
160: 'spike-head',
161: 'spike-foot',
162: 'spike-body',
163: 'spike-hand',
164: 'anubis-head',
165: 'anubis-foot',
166: 'anubis-body',
167: 'anubis-hand',
168: 'horus-head',
169: 'horus-foot',
170: 'horus-body',
171: 'horus-hand',
172: 'jester-head',
173: 'jester-foot',
174: 'jester-body',
175: 'jester-hand',
176: 'spectrino-head',
177: 'spectrino-foot',
178: 'spectrino-body',
179: 'spectrino-hand',
180: 'desert-nomad-head1',
181: 'desert-nomad-head2',
182: 'desert-nomad-head3',
183: 'desert-nomad-female-head1',
184: 'desert-nomad-female-head2',
185: 'desert-nomad-female-head3',
186: 'desert-nomad-foot',
187: 'desert-nomad-body',
188: 'desert-nomad-hand',
189: 'desert-nomad-hand2',
190: 'djinn-head',
191: 'djinn-foot',
192: 'djinn-body',
193: 'djinn-hand',
194: 'minotaur-head',
195: 'minotaur-foot',
196: 'minotaur-body',
197: 'minotaur-hand',
198: 'minotaur-arm',
199: 'imp-eyes',
200: 'imp-head',
201: 'imp-foot',
202: 'imp-body',
203: 'imp-hand',
204: 'penguin-body',
205: 'penguin-head',
206: 'penguin-foot',
207: 'penguin-hand',
208: 'crab-body',
209: 'crab-head',
210: 'crab-foot',
211: 'crab-hand',
212: 'crab-body-blue',
213: 'crab-head-blue',
214: 'crab-foot-blue',
215: 'crab-hand-blue',
216: 'barkbeetle-head',
217: 'barkbeetle-foot',
218: 'firebeetle-head',
219: 'firebeetle-foot',
220: 'snoutbeetle-head',
221: 'snoutbeetle-foot',
222: 'lemonbeetle-head',
223: 'lemonbeetle-foot',
224: 'santa-body',
225: 'santa-head',
226: 'zombie-body',
227: 'zombie-head',
228: 'foot',
229: 'zombie-hand',
230: 'hornet-body',
231: 'hornet-head',
232: 'hornet-foot',
233: 'hornet-hand',
234: 'insect-guard-body',
235: 'insect-guard-head',
236: 'insect-guard-foot',
237: 'insect-guard-hand',
238: 'insect-guard-wing',
239: 'fly-body',
240: 'fly-head',
241: 'fly-foot',
242: 'fly-hand',
243: 'bumblebee',
244: 'bumblebee-hand',
245: 'bumblebee-foot',
246: 'midge-body',
247: 'midge-head',
248: 'midge-foot',
249: 'midge-hand',
250: 'mosquito-body',
251: 'mosquito-head',
252: 'mosquito-foot',
253: 'mosquito-hand',
254: 'crow-body',
255: 'crow-head',
256: 'crow-foot',
257: 'crow-hand',
258: 'chicken-body',
259: 'chicken-head',
260: 'chicken-foot',
261: 'chicken-hand',
262: 'seagull-body',
263: 'seagull-head',
264: 'seagull-foot',
265: 'seagull-hand',
266: 'parrot-body',
267: 'parrot-head',
268: 'parrot-foot',
269: 'parrot-hand',
270: 'parrot-blue-body',
271: 'parrot-blue-head',
272: 'parrot-blue-foot',
273: 'parrot-blue-hand',
274: 'bat-body',
275: 'bat-head',
276: 'bat-foot',
277: 'bat-hand',
278: 'lich-body',
279: 'lich-head',
280: 'lich-hand',
281: 'lich-arm',
282: 'dwarf-head-m01',
283: 'dwarf-head-m02',
284: 'dwarf-head-m03',
285: 'dwarf-head-m04',
286: 'dwarf-head-m05',
287: 'dwarf-hair-m01',
288: 'dwarf-hair-m02',
289: 'dwarf-hair-m03',
290: 'dwarf-head-f01',
291: 'dwarf-head-f02',
292: 'dwarf-head-f03',
293: 'dwarf-head-f04',
294: 'dwarf-head-f05',
295: 'dwarf-hair-f01',
296: 'dwarf-hair-f02',
297: 'dwarf-hair-f03',
298: 'dwarf-hair-f04',
299: 'dwarf-hair-f05',
300: 'dwarf-body',
301: 'dwarf-body-female',
302: 'orc-hand',
303: 'undead-head-m01',
304: 'undead-head-m02',
305: 'undead-head-m03',
306: 'undead-head-m04',
307: 'undead-head-m05',
308: 'undead-head-m06',
309: 'undead-hair-m01',
310: 'undead-hair-m02',
311: 'undead-hair-m03',
312: 'undead-hair-m04',
313: 'undead-hair-m05',
314: 'undead-hair-m06',
315: 'undead-head-f01',
316: 'undead-head-f02',
317: 'undead-head-f03',
318: 'undead-head-f04',
319: 'undead-head-f05',
320: 'undead-head-f06',
321: 'undead-hair-f01',
322: 'undead-hair-f02',
323: 'undead-hair-f03',
324: 'undead-hair-f04',
325: 'undead-hair-f05',
326: 'undead-hair-f06',
327: 'undead-hand',
328: 'skeleton-head',
329: 'skeleton-eyes',
330: 'skeleton-hand',
331: 'skeleton-body',
332: 'skeleton-foot',
333: 'plainrunner-body',
334: 'plainrunner-foot',
335: 'leafrunner-body',
336: 'leafrunner-foot',
337: 'snowrunner-body',
338: 'snowrunner-foot',
339: 'desertrunner-body',
340: 'desertrunner-foot',
341: 'peacock-body',
342: 'peacock-foot',
343: 'peacock-head',
344: 'pony-body',
345: 'pony-head',
346: 'pony-foot',
347: 'pony-tail',
348: 'camel-body',
349: 'camel-head',
350: 'camel-foot',
351: 'cow-body',
352: 'cow-head',
353: 'cow-foot',
354: 'collie-body',
355: 'collie-head',
356: 'collie-hand',
357: 'collie-foot',
358: 'collie-tail',
359: 'shepherd-dog-body',
360: 'shepherd-dog-head',
361: 'shepherd-dog-hand',
362: 'shepherd-dog-foot',
363: 'shepherd-dog-tail',
364: 'skull-bull-body',
365: 'skull-bull-head',
366: 'skull-bull-hand',
367: 'skull-bull-foot',
368: 'skull-bull-tail',
369: 'alpaca-body',
370: 'alpaca-head',
371: 'alpaca-hand',
372: 'alpaca-foot',
373: 'alpaca-brown-body',
374: 'alpaca-brown-head',
375: 'alpaca-brown-hand',
376: 'alpaca-brown-foot',
377: 'dog-body2',
378: 'dog-head2',
379: 'dog-hand2',
380: 'dog-foot2',
381: 'dog-tail2',
382: 'scottish-terrier-body',
383: 'scottish-terrier-head',
384: 'scottish-terrier-hand',
385: 'scottish-terrier-foot',
386: 'scottish-terrier-tail',
387: 'wolf-body',
388: 'wolf-head',
389: 'wolf-hand',
390: 'wolf-foot',
391: 'wolf-tail',
392: 'panther-body',
393: 'panther-head',
394: 'panther-hand',
395: 'panther-foot',
396: 'panther-tail',
397: 'cat-body',
398: 'cat-head',
399: 'cat-hand',
400: 'cat-foot',
401: 'cat-tail',
402: 'cat-body2',
403: 'cat-head2',
404: 'cat-hand2',
405: 'cat-foot2',
406: 'cat-tail2',
407: 'cat-body3',
408: 'cat-head3',
409: 'cat-hand3',
410: 'cat-foot3',
411: 'cat-tail3',
412: 'pig-body',
413: 'pig-head',
414: 'pig-foot',
415: 'sheep-body',
416: 'sheep-head',
417: 'sheep-foot',
418: 'duckbill-body',
419: 'duckbill-head',
420: 'duckbill-foot',
421: 'duckbill-tail',
422: 'crocodile-body',
423: 'crocodile-head',
424: 'crocodile-foot',
425: 'dragon-body',
426: 'dragon-head',
427: 'dragon-foot',
428: 'dragon-wing',
429: 'dragon-tail',
430: 'hand2',
431: 'brown-hand',
432: 'foot',
433: 'boot',
434: 'wood-staff1',
435: 'wood-staff2',
436: 'wood-staff3',
437: 'wood-staff4',
438: 'wood-staff6',
439: 'wood-staff1-random1',
440: 'wood-staff2-random1',
441: 'wood-staff3-random1',
442: 'wood-staff4-random1',
443: 'wood-staff5-random1',
444: 'wood-staff1-random2',
445: 'wood-staff2-random2',
446: 'wood-staff3-random2',
447: 'wood-staff4-random2',
448: 'wood-staff5-random2',
449: 'wood-staff1-random3',
450: 'wood-staff2-random3',
451: 'wood-staff3-random3',
452: 'wood-staff4-random3',
453: 'wood-staff5-random3',
454: 'wood-staff1-random4',
455: 'wood-staff2-random4',
456: 'wood-staff3-random4',
457: 'wood-staff4-random4',
458: 'wood-staff5-random4',
459: 'wood-staff1-random5',
460: 'wood-staff2-random5',
461: 'wood-staff3-random5',
462: 'wood-staff4-random5',
463: 'wood-staff5-random5',
464: 'wood-staff1-random6',
465: 'wood-staff2-random6',
466: 'wood-staff3-random6',
467: 'wood-staff4-random6',
468: 'wood-staff5-random6',
469: 'wood-staff1-random7',
470: 'wood-staff2-random7',
471: 'wood-staff3-random7',
472: 'wood-staff4-random7',
473: 'wood-staff5-random7',
474: 'wood-staff1-random8',
475: 'wood-staff2-random8',
476: 'wood-staff3-random8',
477: 'wood-staff4-random8',
478: 'wood-staff5-random8',
479: 'wood-staff1-random9',
480: 'wood-staff2-random9',
481: 'wood-staff3-random9',
482: 'wood-staff4-random9',
483: 'wood-staff5-random9',
484: 'wood-staff1-random10',
485: 'wood-staff2-random10',
486: 'wood-staff3-random10',
487: 'wood-staff4-random10',
488: 'wood-staff5-random10',
489: 'wood-wand1',
490: 'wood-wand2',
491: 'wood-wand3',
492: 'wood-wand4',
493: 'wood-wand5',
494: 'wood-wand1-random1',
495: 'wood-wand2-random1',
496: 'wood-wand3-random1',
497: 'wood-wand4-random1',
498: 'wood-wand5-random1',
499: 'wood-wand1-random2',
500: 'wood-wand2-random2',
501: 'wood-wand3-random2',
502: 'wood-wand4-random2',
503: 'wood-wand5-random2',
504: 'wood-wand1-random3',
505: 'wood-wand2-random3',
506: 'wood-wand3-random3',
507: 'wood-wand4-random3',
508: 'wood-wand5-random3',
509: 'wood-wand1-random4',
510: 'wood-wand2-random4',
511: 'wood-wand3-random4',
512: 'wood-wand4-random4',
513: 'wood-wand5-random4',
514: 'wood-wand1-random5',
515: 'wood-wand2-random5',
516: 'wood-wand3-random5',
517: 'wood-wand4-random5',
518: 'wood-wand5-random5',
519: 'wood-wand1-random6',
520: 'wood-wand2-random6',
521: 'wood-wand3-random6',
522: 'wood-wand4-random6',
523: 'wood-wand5-random6',
524: 'wood-wand1-random7',
525: 'wood-wand2-random7',
526: 'wood-wand3-random7',
527: 'wood-wand4-random7',
528: 'wood-wand5-random7',
529: 'wood-wand1-random8',
530: 'wood-wand2-random8',
531: 'wood-wand3-random8',
532: 'wood-wand4-random8',
533: 'wood-wand5-random8',
534: 'wood-wand1-random9',
535: 'wood-wand2-random9',
536: 'wood-wand3-random9',
537: 'wood-wand4-random9',
538: 'wood-wand5-random9',
539: 'wood-wand1-random10',
540: 'wood-wand2-random10',
541: 'wood-wand3-random10',
542: 'wood-wand4-random10',
543: 'wood-wand5-random10',
544: 'gold-bracelet1',
545: 'gold-bracelet2',
546: 'gold-bracelet3',
547: 'gold-bracelet4',
548: 'gold-bracelet5',
549: 'gold-bracelet1-random1',
550: 'gold-bracelet2-random1',
551: 'gold-bracelet3-random1',
552: 'gold-bracelet4-random1',
553: 'gold-bracelet5-random1',
554: 'gold-bracelet1-random2',
555: 'gold-bracelet2-random2',
556: 'gold-bracelet3-random2',
557: 'gold-bracelet4-random2',
558: 'gold-bracelet5-random2',
559: 'gold-bracelet1-random3',
560: 'gold-bracelet2-random3',
561: 'gold-bracelet3-random3',
562: 'gold-bracelet4-random3',
563: 'gold-bracelet5-random3',
564: 'gold-bracelet1-random4',
565: 'gold-bracelet2-random4',
566: 'gold-bracelet3-random4',
567: 'gold-bracelet4-random4',
568: 'gold-bracelet5-random4',
569: 'gold-bracelet1-random5',
570: 'gold-bracelet2-random5',
571: 'gold-bracelet3-random5',
572: 'gold-bracelet4-random5',
573: 'gold-bracelet5-random5',
574: 'silver-bracelet1',
575: 'silver-bracelet2',
576: 'silver-bracelet3',
577: 'silver-bracelet4',
578: 'silver-bracelet5',
579: 'silver-bracelet1-random1',
580: 'silver-bracelet2-random1',
581: 'silver-bracelet3-random1',
582: 'silver-bracelet4-random1',
583: 'silver-bracelet5-random1',
584: 'silver-bracelet1-random2',
585: 'silver-bracelet2-random2',
586: 'silver-bracelet3-random2',
587: 'silver-bracelet4-random2',
588: 'silver-bracelet5-random2',
589: 'silver-bracelet1-random3',
590: 'silver-bracelet2-random3',
591: 'silver-bracelet3-random3',
592: 'silver-bracelet4-random3',
593: 'silver-bracelet5-random3',
594: 'silver-bracelet1-random4',
595: 'silver-bracelet2-random4',
596: 'silver-bracelet3-random4',
597: 'silver-bracelet4-random4',
598: 'silver-bracelet5-random4',
599: 'silver-bracelet1-random5',
600: 'silver-bracelet2-random5',
601: 'silver-bracelet3-random5',
602: 'silver-bracelet4-random5',
603: 'silver-bracelet5-random5',
604: 'obsidian-staff1',
605: 'obsidian-staff2',
606: 'obsidian-staff3',
607: 'obsidian-staff4',
608: 'obsidian-staff5',
609: 'iron-mace1',
610: 'iron-mace2',
611: 'iron-mace3',
612: 'iron-mace4',
613: 'iron-mace5',
614: 'iron-mace1-random1',
615: 'iron-mace2-random1',
616: 'iron-mace3-random1',
617: 'iron-mace4-random1',
618: 'iron-mace5-random1',
619: 'iron-mace1-random2',
620: 'iron-mace2-random2',
621: 'iron-mace3-random2',
622: 'iron-mace4-random2',
623: 'iron-mace5-random2',
624: 'iron-mace1-random3',
625: 'iron-mace2-random3',
626: 'iron-mace3-random3',
627: 'iron-mace4-random3',
628: 'iron-mace5-random3',
629: 'iron-mace1-random4',
630: 'iron-mace2-random4',
631: 'iron-mace3-random4',
632: 'iron-mace4-random4',
633: 'iron-mace5-random4',
634: 'iron-mace1-random5',
635: 'iron-mace2-random5',
636: 'iron-mace3-random5',
637: 'iron-mace4-random5',
638: 'iron-mace5-random5',
639: 'iron-mace1-random6',
640: 'iron-mace2-random6',
641: 'iron-mace3-random6',
642: 'iron-mace4-random6',
643: 'iron-mace5-random6',
644: 'iron-mace1-random7',
645: 'iron-mace2-random7',
646: 'iron-mace3-random7',
647: 'iron-mace4-random7',
648: 'iron-mace5-random7',
649: 'iron-mace1-random8',
650: 'iron-mace2-random8',
651: 'iron-mace3-random8',
652: 'iron-mace4-random8',
653: 'iron-mace5-random8',
654: 'iron-mace1-random9',
655: 'iron-mace2-random9',
656: 'iron-mace3-random9',
657: 'iron-mace4-random9',
658: 'iron-mace5-random9',
659: 'iron-mace1-random10',
660: 'iron-mace2-random10',
661: 'iron-mace3-random10',
662: 'iron-mace4-random10',
663: 'iron-mace5-random10',
664: 'wood-mace01',
665: 'wood-mace02',
666: 'wood-mace03',
667: 'wood-mace04',
668: 'wood-mace05',
669: 'light-helmet',
670: 'light-chest',
671: 'light-shoulder',
672: 'light-glove',
673: 'light-boot',
674: 'bow',
675: 'wood-bow2',
676: 'wood-bow3',
677: 'wood-bow4',
678: 'wood-bow5',
679: 'wood-bow1-random1',
680: 'wood-bow2-random1',
681: 'wood-bow3-random1',
682: 'wood-bow4-random1',
683: 'wood-bow5-random1',
684: 'wood-bow1-random2',
685: 'wood-bow2-random2',
686: 'wood-bow3-random2',
687: 'wood-bow4-random2',
688: 'wood-bow5-random2',
689: 'wood-bow1-random3',
690: 'wood-bow2-random3',
691: 'wood-bow3-random3',
692: 'wood-bow4-random3',
693: 'wood-bow5-random3',
694: 'wood-bow1-random4',
695: 'wood-bow2-random4',
696: 'wood-bow3-random4',
697: 'wood-bow4-random4',
698: 'wood-bow5-random4',
699: 'wood-bow1-random5',
700: 'wood-bow2-random5',
701: 'wood-bow3-random5',
702: 'wood-bow4-random5',
703: 'wood-bow5-random5',
704: 'wood-bow1-random6',
705: 'wood-bow2-random6',
706: 'wood-bow3-random6',
707: 'wood-bow4-random6',
708: 'wood-bow5-random6',
709: 'wood-bow1-random7',
710: 'wood-bow2-random7',
711: 'wood-bow3-random7',
712: 'wood-bow4-random7',
713: 'wood-bow5-random7',
714: 'wood-bow1-random8',
715: 'wood-bow2-random8',
716: 'wood-bow3-random8',
717: 'wood-bow4-random8',
718: 'wood-bow5-random8',
719: 'wood-bow1-random9',
720: 'wood-bow2-random9',
721: 'wood-bow3-random9',
722: 'wood-bow4-random9',
723: 'wood-bow5-random9',
724: 'wood-bow1-random10',
725: 'wood-bow2-random10',
726: 'wood-bow3-random10',
727: 'wood-bow4-random10',
728: 'wood-bow5-random10',
729: 'wood-crossbow1',
730: 'wood-crossbow2',
731: 'wood-crossbow3',
732: 'wood-crossbow4',
733: 'wood-crossbow5',
734: 'wood-crossbow1-random1',
735: 'wood-crossbow2-random1',
736: 'wood-crossbow3-random1',
737: 'wood-crossbow4-random1',
738: 'wood-crossbow5-random1',
739: 'wood-crossbow1-random2',
740: 'wood-crossbow2-random2',
741: 'wood-crossbow3-random2',
742: 'wood-crossbow4-random2',
743: 'wood-crossbow5-random2',
744: 'wood-crossbow1-random3',
745: 'wood-crossbow2-random3',
746: 'wood-crossbow3-random3',
747: 'wood-crossbow4-random3',
748: 'wood-crossbow5-random3',
749: 'wood-crossbow1-random4',
750: 'wood-crossbow2-random4',
751: 'wood-crossbow3-random4',
752: 'wood-crossbow4-random4',
753: 'wood-crossbow5-random4',
754: 'wood-crossbow1-random5',
755: 'wood-crossbow2-random5',
756: 'wood-crossbow3-random5',
757: 'wood-crossbow4-random5',
758: 'wood-crossbow5-random5',
759: 'wood-crossbow1-random6',
760: 'wood-crossbow2-random6',
761: 'wood-crossbow3-random6',
762: 'wood-crossbow4-random6',
763: 'wood-crossbow5-random6',
764: 'wood-crossbow1-random7',
765: 'wood-crossbow2-random7',
766: 'wood-crossbow3-random7',
767: 'wood-crossbow4-random7',
768: 'wood-crossbow5-random7',
769: 'wood-crossbow1-random8',
770: 'wood-crossbow2-random8',
771: 'wood-crossbow3-random8',
772: 'wood-crossbow4-random8',
773: 'wood-crossbow5-random8',
774: 'wood-crossbow1-random9',
775: 'wood-crossbow2-random9',
776: 'wood-crossbow3-random9',
777: 'wood-crossbow4-random9',
778: 'wood-crossbow5-random9',
779: 'wood-crossbow1-random10',
780: 'wood-crossbow2-random10',
781: 'wood-crossbow3-random10',
782: 'wood-crossbow4-random10',
783: 'wood-crossbow5-random10',
784: 'wood-boomerang1',
785: 'wood-boomerang2',
786: 'wood-boomerang3',
787: 'wood-boomerang4',
788: 'wood-boomerang5',
789: 'wood-boomerang1-random1',
790: 'wood-boomerang2-random1',
791: 'wood-boomerang3-random1',
792: 'wood-boomerang4-random1',
793: 'wood-boomerang5-random1',
794: 'wood-boomerang1-random2',
795: 'wood-boomerang2-random2',
796: 'wood-boomerang3-random2',
797: 'wood-boomerang4-random2',
798: 'wood-boomerang5-random2',
799: 'wood-boomerang1-random3',
800: 'wood-boomerang2-random3',
801: 'wood-boomerang3-random3',
802: 'wood-boomerang4-random3',
803: 'wood-boomerang5-random3',
804: 'wood-boomerang1-random4',
805: 'wood-boomerang2-random4',
806: 'wood-boomerang3-random4',
807: 'wood-boomerang4-random4',
808: 'wood-boomerang5-random4',
809: 'wood-boomerang1-random5',
810: 'wood-boomerang2-random5',
811: 'wood-boomerang3-random5',
812: 'wood-boomerang4-random5',
813: 'wood-boomerang5-random5',
814: 'wood-boomerang1-random6',
815: 'wood-boomerang2-random6',
816: 'wood-boomerang3-random6',
817: 'wood-boomerang4-random6',
818: 'wood-boomerang5-random6',
819: 'wood-boomerang1-random7',
820: 'wood-boomerang2-random7',
821: 'wood-boomerang3-random7',
822: 'wood-boomerang4-random7',
823: 'wood-boomerang5-random7',
824: 'wood-boomerang1-random8',
825: 'wood-boomerang2-random8',
826: 'wood-boomerang3-random8',
827: 'wood-boomerang4-random8',
828: 'wood-boomerang5-random8',
829: 'wood-boomerang1-random9',
830: 'wood-boomerang2-random9',
831: 'wood-boomerang3-random9',
832: 'wood-boomerang4-random9',
833: 'wood-boomerang5-random9',
834: 'wood-boomerang1-random10',
835: 'wood-boomerang2-random10',
836: 'wood-boomerang3-random10',
837: 'wood-boomerang4-random10',
838: 'wood-boomerang5-random10',
839: 'quiver',
840: 'arrow',
841: 'fireball',
842: 'torch',
843: 'mushroom',
844: 'shimmer-mushroom',
845: 'heartflower',
846: 'heartflower-frozen',
847: 'prickly-pear',
848: 'soulflower',
849: 'life-potion',
850: 'cactus-potion',
851: 'mana-potion',
852: 'cookie',
853: 'jelly-green',
854: 'jelly-pink',
855: 'jelly-blue',
856: 'jelly-yellow',
857: 'carrot',
858: 'pumpkin-mash',
859: 'candy',
860: 'lollipop',
861: 'softice',
862: 'donut-chocolate',
863: 'cotton-candy',
864: 'popcorn',
865: 'cereal-bar',
866: 'strawberry-cake',
867: 'chocolate-cake',
868: 'vanilla-cupcake',
869: 'chocolate-cupcake',
870: 'banana-split',
871: 'croissant',
872: 'lolly',
873: 'lemon-tart',
874: 'chocolate-cookie',
875: 'bubble-gum',
876: 'licorice-candy',
877: 'cinnamon-role',
878: 'apple-ring',
879: 'waffle',
880: 'water-ice',
881: 'date-cookie',
882: 'candied-apple',
883: 'strawberry-cocktail',
884: 'milk-chocolate-bar',
885: 'caramel-chocolate-bar',
886: 'mint-chocolate-bar',
887: 'white-chocolate-bar',
888: 'sugar-candy',
889: 'blackberry-marmelade',
890: 'salted-caramel',
891: 'ginger-tartlet',
892: 'mango-juice',
893: 'fruit-basket',
894: 'melon-icecream',
895: 'bloodorange-juice',
896: 'pancakes',
897: 'curry',
898: 'biscuit-role',
899: 'iron-sword1',
900: 'iron-sword2',
901: 'iron-sword3',
902: 'iron-sword4',
903: 'iron-sword5',
904: 'iron-sword1-random1',
905: 'iron-sword2-random1',
906: 'iron-sword3-random1',
907: 'iron-sword4-random1',
908: 'iron-sword5-random1',
909: 'iron-sword1-random2',
910: 'iron-sword2-random2',
911: 'iron-sword3-random2',
912: 'iron-sword4-random2',
913: 'iron-sword5-random2',
914: 'iron-sword1-random3',
915: 'iron-sword2-random3',
916: 'iron-sword3-random3',
917: 'iron-sword4-random3',
918: 'iron-sword5-random3',
919: 'iron-sword1-random4',
920: 'iron-sword2-random4',
921: 'iron-sword3-random4',
922: 'iron-sword4-random4',
923: 'iron-sword5-random4',
924: 'iron-sword1-random5',
925: 'iron-sword2-random5',
926: 'iron-sword3-random5',
927: 'iron-sword4-random5',
928: 'iron-sword5-random5',
929: 'iron-sword1-random6',
930: 'iron-sword2-random6',
931: 'iron-sword3-random6',
932: 'iron-sword4-random6',
933: 'iron-sword5-random6',
934: 'iron-sword1-random7',
935: 'iron-sword2-random7',
936: 'iron-sword3-random7',
937: 'iron-sword4-random7',
938: 'iron-sword5-random7',
939: 'iron-sword1-random8',
940: 'iron-sword2-random8',
941: 'iron-sword3-random8',
942: 'iron-sword4-random8',
943: 'iron-sword5-random8',
944: 'iron-sword1-random9',
945: 'iron-sword2-random9',
946: 'iron-sword3-random9',
947: 'iron-sword4-random9',
948: 'iron-sword5-random9',
949: 'iron-sword1-random10',
950: 'iron-sword2-random10',
951: 'iron-sword3-random10',
952: 'iron-sword4-random10',
953: 'iron-sword5-random10',
954: 'iron-dagger1',
955: 'iron-dagger2',
956: 'iron-dagger3',
957: 'iron-dagger4',
958: 'iron-dagger5',
959: 'iron-dagger1-random1',
960: 'iron-dagger2-random1',
961: 'iron-dagger3-random1',
962: 'iron-dagger4-random1',
963: 'iron-dagger5-random1',
964: 'iron-dagger1-random2',
965: 'iron-dagger2-random2',
966: 'iron-dagger3-random2',
967: 'iron-dagger4-random2',
968: 'iron-dagger5-random2',
969: 'iron-dagger1-random3',
970: 'iron-dagger2-random3',
971: 'iron-dagger3-random3',
972: 'iron-dagger4-random3',
973: 'iron-dagger5-random3',
974: 'iron-dagger1-random4',
975: 'iron-dagger2-random4',
976: 'iron-dagger3-random4',
977: 'iron-dagger4-random4',
978: 'iron-dagger5-random4',
979: 'iron-dagger1-random5',
980: 'iron-dagger2-random5',
981: 'iron-dagger3-random5',
982: 'iron-dagger4-random5',
983: 'iron-dagger5-random5',
984: 'iron-dagger1-random6',
985: 'iron-dagger2-random6',
986: 'iron-dagger3-random6',
987: 'iron-dagger4-random6',
988: 'iron-dagger5-random6',
989: 'iron-dagger1-random7',
990: 'iron-dagger2-random7',
991: 'iron-dagger3-random7',
992: 'iron-dagger4-random7',
993: 'iron-dagger5-random7',
994: 'iron-dagger1-random8',
995: 'iron-dagger2-random8',
996: 'iron-dagger3-random8',
997: 'iron-dagger4-random8',
998: 'iron-dagger5-random8',
999: 'iron-dagger1-random9',
1000: 'iron-dagger2-random9',
1001: 'iron-dagger3-random9',
1002: 'iron-dagger4-random9',
1003: 'iron-dagger5-random9',
1004: 'iron-dagger1-random10',
1005: 'iron-dagger2-random10',
1006: 'iron-dagger3-random10',
1007: 'iron-dagger4-random10',
1008: 'iron-dagger5-random10',
1009: 'iron-fist1',
1010: 'iron-fist2',
1011: 'iron-fist3',
1012: 'iron-fist4',
1013: 'iron-fist5',
1014: 'iron-fist1-random1',
1015: 'iron-fist2-random1',
1016: 'iron-fist3-random1',
1017: 'iron-fist4-random1',
1018: 'iron-fist5-random1',
1019: 'iron-fist1-random2',
1020: 'iron-fist2-random2',
1021: 'iron-fist3-random2',
1022: 'iron-fist4-random2',
1023: 'iron-fist5-random2',
1024: 'iron-fist1-random3',
1025: 'iron-fist2-random3',
1026: 'iron-fist3-random3',
1027: 'iron-fist4-random3',
1028: 'iron-fist5-random3',
1029: 'iron-fist1-random4',
1030: 'iron-fist2-random4',
1031: 'iron-fist3-random4',
1032: 'iron-fist4-random4',
1033: 'iron-fist5-random4',
1034: 'iron-fist1-random5',
1035: 'iron-fist2-random5',
1036: 'iron-fist3-random5',
1037: 'iron-fist4-random5',
1038: 'iron-fist5-random5',
1039: 'iron-fist1-random6',
1040: 'iron-fist2-random6',
1041: 'iron-fist3-random6',
1042: 'iron-fist4-random6',
1043: 'iron-fist5-random6',
1044: 'iron-fist1-random7',
1045: 'iron-fist2-random7',
1046: 'iron-fist3-random7',
1047: 'iron-fist4-random7',
1048: 'iron-fist5-random7',
1049: 'iron-fist1-random8',
1050: 'iron-fist2-random8',
1051: 'iron-fist3-random8',
1052: 'iron-fist4-random8',
1053: 'iron-fist5-random8',
1054: 'iron-fist1-random9',
1055: 'iron-fist2-random9',
1056: 'iron-fist3-random9',
1057: 'iron-fist4-random9',
1058: 'iron-fist5-random9',
1059: 'iron-fist1-random10',
1060: 'iron-fist2-random10',
1061: 'iron-fist3-random10',
1062: 'iron-fist4-random10',
1063: 'iron-fist5-random10',
1064: 'iron-shield01',
1065: 'iron-shield02',
1066: 'iron-shield03',
1067: 'iron-shield04',
1068: 'iron-shield05',
1069: 'iron-shield1-random1',
1070: 'iron-shield2-random1',
1071: 'iron-shield3-random1',
1072: 'iron-shield4-random1',
1073: 'iron-shield5-random1',
1074: 'iron-shield1-random2',
1075: 'iron-shield2-random2',
1076: 'iron-shield3-random2',
1077: 'iron-shield4-random2',
1078: 'iron-shield5-random2',
1079: 'iron-shield1-random3',
1080: 'iron-shield2-random3',
1081: 'iron-shield3-random3',
1082: 'iron-shield4-random3',
1083: 'iron-shield5-random3',
1084: 'iron-shield1-random4',
1085: 'iron-shield2-random4',
1086: 'iron-shield3-random4',
1087: 'iron-shield4-random4',
1088: 'iron-shield5-random4',
1089: 'iron-shield1-random5',
1090: 'iron-shield2-random5',
1091: 'iron-shield3-random5',
1092: 'iron-shield4-random5',
1093: 'iron-shield5-random5',
1094: 'iron-shield1-random6',
1095: 'iron-shield2-random6',
1096: 'iron-shield3-random6',
1097: 'iron-shield4-random6',
1098: 'iron-shield5-random6',
1099: 'iron-shield1-random7',
1100: 'iron-shield2-random7',
1101: 'iron-shield3-random7',
1102: 'iron-shield4-random7',
1103: 'iron-shield5-random7',
1104: 'iron-shield1-random8',
1105: 'iron-shield2-random8',
1106: 'iron-shield3-random8',
1107: 'iron-shield4-random8',
1108: 'iron-shield5-random8',
1109: 'iron-shield1-random9',
1110: 'iron-shield2-random9',
1111: 'iron-shield3-random9',
1112: 'iron-shield4-random9',
1113: 'iron-shield5-random9',
1114: 'iron-shield1-random10',
1115: 'iron-shield2-random10',
1116: 'iron-shield3-random10',
1117: 'iron-shield4-random10',
1118: 'iron-shield5-random10',
1119: 'wood-shield01',
1120: 'wood-shield02',
1121: 'wood-shield03',
1122: 'wood-shield04',
1123: 'wood-shield05',
1124: 'iron-chest1',
1125: 'iron-chest2',
1126: 'iron-chest3',
1127: 'iron-chest4',
1128: 'iron-chest5',
1129: 'iron-chest1-random1',
1130: 'iron-chest2-random1',
1131: 'iron-chest3-random1',
1132: 'iron-chest4-random1',
1133: 'iron-chest5-random1',
1134: 'iron-chest1-random2',
1135: 'iron-chest2-random2',
1136: 'iron-chest3-random2',
1137: 'iron-chest4-random2',
1138: 'iron-chest5-random2',
1139: 'iron-chest1-random3',
1140: 'iron-chest2-random3',
1141: 'iron-chest3-random3',
1142: 'iron-chest4-random3',
1143: 'iron-chest5-random3',
1144: 'iron-chest1-random4',
1145: 'iron-chest2-random4',
1146: 'iron-chest3-random4',
1147: 'iron-chest4-random4',
1148: 'iron-chest5-random4',
1149: 'iron-shoulder1',
1150: 'iron-shoulder2',
1151: 'iron-shoulder3',
1152: 'iron-shoulder4',
1153: 'iron-shoulder5',
1154: 'iron-shoulder1-random1',
1155: 'iron-shoulder2-random1',
1156: 'iron-shoulder3-random1',
1157: 'iron-shoulder4-random1',
1158: 'iron-shoulder5-random1',
1159: 'iron-shoulder1-random2',
1160: 'iron-shoulder2-random2',
1161: 'iron-shoulder3-random2',
1162: 'iron-shoulder4-random2',
1163: 'iron-shoulder5-random2',
1164: 'iron-shoulder1-random3',
1165: 'iron-shoulder2-random3',
1166: 'iron-shoulder3-random3',
1167: 'iron-shoulder4-random3',
1168: 'iron-shoulder5-random3',
1169: 'iron-shoulder1-random4',
1170: 'iron-shoulder2-random4',
1171: 'iron-shoulder3-random4',
1172: 'iron-shoulder4-random4',
1173: 'iron-shoulder5-random4',
1174: 'iron-hand1',
1175: 'iron-hand2',
1176: 'iron-hand3',
1177: 'iron-hand4',
1178: 'iron-hand5',
1179: 'iron-hand1-random1',
1180: 'iron-hand2-random1',
1181: 'iron-hand3-random1',
1182: 'iron-hand4-random1',
1183: 'iron-hand5-random1',
1184: 'iron-hand1-random2',
1185: 'iron-hand2-random2',
1186: 'iron-hand3-random2',
1187: 'iron-hand4-random2',
1188: 'iron-hand5-random2',
1189: 'iron-hand1-random3',
1190: 'iron-hand2-random3',
1191: 'iron-hand3-random3',
1192: 'iron-hand4-random3',
1193: 'iron-hand5-random3',
1194: 'iron-hand1-random4',
1195: 'iron-hand2-random4',
1196: 'iron-hand3-random4',
1197: 'iron-hand4-random4',
1198: 'iron-hand5-random4',
1199: 'iron-foot1',
1200: 'iron-foot2',
1201: 'iron-foot3',
1202: 'iron-foot4',
1203: 'iron-foot5',
1204: 'iron-foot1-random1',
1205: 'iron-foot2-random1',
1206: 'iron-foot3-random1',
1207: 'iron-foot4-random1',
1208: 'iron-foot5-random1',
1209: 'iron-foot1-random2',
1210: 'iron-foot2-random2',
1211: 'iron-foot3-random2',
1212: 'iron-foot4-random2',
1213: 'iron-foot5-random2',
1214: 'iron-foot1-random3',
1215: 'iron-foot2-random3',
1216: 'iron-foot3-random3',
1217: 'iron-foot4-random3',
1218: 'iron-foot5-random3',
1219: 'iron-foot1-random4',
1220: 'iron-foot2-random4',
1221: 'iron-foot3-random4',
1222: 'iron-foot4-random4',
1223: 'iron-foot5-random4',
1224: 'saurian-chest',
1225: 'saurian-shoulder',
1226: 'saurian-glove',
1227: 'saurian-boot',
1228: 'saurian-helmet',
1229: 'innkeeper-chest',
1230: 'backpack',
1231: 'body1',
1232: 'body2',
1233: 'body3',
1234: 'body4',
1235: 'body5',
1236: 'elf-head-m01',
1237: 'elf-head-m02',
1238: 'elf-head-m03',
1239: 'elf-head-m04',
1240: 'elf-head-f01',
1241: 'elf-head-f02',
1242: 'elf-head-f03',
1243: 'elf-head-f04',
1244: 'elf-head-f05',
1245: 'elf-head-f06',
1246: 'human-head-m01',
1247: 'human-head-m02',
1248: 'human-head-m03',
1249: 'human-head-m04',
1250: 'human-head-m05',
1251: 'human-head-m06',
1252: 'human-hair-m01',
1253: 'human-hair-m02',
1254: 'human-hair-m03',
1255: 'human-hair-m04',
1256: 'human-hair-m05',
1257: 'human-hair-m06',
1258: 'human-hair-m07',
1259: 'human-hair-m08',
1260: 'human-hair-m09',
1261: 'human-hair-m10',
1262: 'human-hair-m11',
1263: 'human-hair-m12',
1264: 'human-hair-m13',
1265: 'human-hair-m14',
1266: 'human-hair-m15',
1267: 'human-head-f01',
1268: 'human-head-f02',
1269: 'human-head-f03',
1270: 'human-head-f04',
1271: 'human-head-f05',
1272: 'human-head-f06',
1273: 'human-hair-f01',
1274: 'human-hair-f02',
1275: 'human-hair-f03',
1276: 'human-hair-f04',
1277: 'human-hair-f05',
1278: 'human-hair-f06',
1279: 'human-hair-f07',
1280: 'elf-hair-m01',
1281: 'elf-hair-m02',
1282: 'elf-hair-m03',
1283: 'elf-hair-m04',
1284: 'elf-hair-m05',
1285: 'elf-hair-m06',
1286: 'elf-hair-m07',
1287: 'elf-hair-m08',
1288: 'elf-hair-m09',
1289: 'elf-hair-m10',
1290: 'elf-hair-f01',
1291: 'elf-hair-f02',
1292: 'elf-hair-f03',
1293: 'elf-hair-f04',
1294: 'elf-hair-f05',
1295: 'elf-hair-f06',
1296: 'elf-hair-f07',
1297: 'elf-hair-f08',
1298: 'elf-hair-f09',
1299: 'elf-hair-f10',
1300: 'orc-head',
1301: 'orc-head-m02',
1302: 'orc-head-m03',
1303: 'orc-head-m04',
1304: 'orc-head-m05',
1305: 'orc-head-f01',
1306: 'orc-head-f02',
1307: 'orc-head-f03',
1308: 'orc-head-f04',
1309: 'orc-head-f05',
1310: 'orc-hair-m01',
1311: 'orc-hair-m02',
1312: 'orc-hair-m03',
1313: 'orc-hair-m04',
1314: 'orc-hair-m05',
1315: 'orc-hair-m06',
1316: 'orc-hair-m07',
1317: 'orc-hair-m08',
1318: 'orc-hair-m09',
1319: 'orc-hair-m10',
1320: 'orc-hair-f01',
1321: 'orc-hair-f02',
1322: 'orc-hair-f03',
1323: 'orc-hair-f04',
1324: 'frogman-head-m01',
1325: 'frogman-head-m02',
1326: 'frogman-head-m03',
1327: 'frogman-head-m04',
1328: 'frogman-head-m05',
1329: 'frogman-hair-m01',
1330: 'frogman-hair-m02',
1331: 'frogman-hair-m03',
1332: 'frogman-hair-m04',
1333: 'frogman-hair-m05',
1334: 'frogman-head-f01',
1335: 'frogman-head-f02',
1336: 'frogman-head-f03',
1337: 'frogman-head-f04',
1338: 'frogman-hair-f01',
1339: 'frogman-hair-f02',
1340: 'frogman-hair-f03',
1341: 'frogman-hair-f04',
1342: 'frogman-hand',
1343: 'mermaid-head-f01',
1344: 'mermaid-head-f02',
1345: 'mermaid-head-f03',
1346: 'mermaid-hair-f01',
1347: 'mermaid-hair-f02',
1348: 'mermaid-hair-f03',
1349: 'mermaid-body',
1350: 'mermaid-hand',
1351: 'merman-head-m01',
1352: 'merman-head-m02',
1353: 'merman-head-m03',
1354: 'merman-hair-m01',
1355: 'merman-hair-m02',
1356: 'merman-hair-m03',
1357: 'merman-body',
1358: 'mermaid-hand',
1359: 'linen-chest1',
1360: 'linen-chest2',
1361: 'linen-chest3',
1362: 'linen-chest4',
1363: 'linen-chest5',
1364: 'linen-chest1-random1',
1365: 'linen-chest2-random1',
1366: 'linen-chest3-random1',
1367: 'linen-chest4-random1',
1368: 'linen-chest5-random1',
1369: 'linen-chest1-random2',
1370: 'linen-chest2-random2',
1371: 'linen-chest3-random2',
1372: 'linen-chest4-random2',
1373: 'linen-chest5-random2',
1374: 'linen-chest1-random3',
1375: 'linen-chest2-random3',
1376: 'linen-chest3-random3',
1377: 'linen-chest4-random3',
1378: 'linen-chest5-random3',
1379: 'linen-chest1-random4',
1380: 'linen-chest2-random4',
1381: 'linen-chest3-random4',
1382: 'linen-chest4-random4',
1383: 'linen-chest5-random4',
1384: 'linen-shoulder1',
1385: 'linen-shoulder2',
1386: 'linen-shoulder3',
1387: 'linen-shoulder4',
1388: 'linen-shoulder5',
1389: 'linen-shoulder1-random1',
1390: 'linen-shoulder2-random1',
1391: 'linen-shoulder3-random1',
1392: 'linen-shoulder4-random1',
1393: 'linen-shoulder5-random1',
1394: 'linen-shoulder1-random2',
1395: 'linen-shoulder2-random2',
1396: 'linen-shoulder3-random2',
1397: 'linen-shoulder4-random2',
1398: 'linen-shoulder5-random2',
1399: 'linen-shoulder1-random3',
1400: 'linen-shoulder2-random3',
1401: 'linen-shoulder3-random3',
1402: 'linen-shoulder4-random3',
1403: 'linen-shoulder5-random3',
1404: 'linen-shoulder1-random4',
1405: 'linen-shoulder2-random4',
1406: 'linen-shoulder3-random4',
1407: 'linen-shoulder4-random4',
1408: 'linen-shoulder5-random4',
1409: 'linen-foot1',
1410: 'linen-foot2',
1411: 'linen-foot3',
1412: 'linen-foot4',
1413: 'linen-foot5',
1414: 'linen-foot1-random1',
1415: 'linen-foot2-random1',
1416: 'linen-foot3-random1',
1417: 'linen-foot4-random1',
1418: 'linen-foot5-random1',
1419: 'linen-foot1-random2',
1420: 'linen-foot2-random2',
1421: 'linen-foot3-random2',
1422: 'linen-foot4-random2',
1423: 'linen-foot5-random2',
1424: 'linen-foot1-random3',
1425: 'linen-foot2-random3',
1426: 'linen-foot3-random3',
1427: 'linen-foot4-random3',
1428: 'linen-foot5-random3',
1429: 'linen-foot1-random4',
1430: 'linen-foot2-random4',
1431: 'linen-foot3-random4',
1432: 'linen-foot4-random4',
1433: 'linen-foot5-random4',
1434: 'linen-hand1',
1435: 'linen-hand2',
1436: 'linen-hand3',
1437: 'linen-hand4',
1438: 'linen-hand5',
1439: 'linen-hand1-random1',
1440: 'linen-hand2-random1',
1441: 'linen-hand3-random1',
1442: 'linen-hand4-random1',
1443: 'linen-hand5-random1',
1444: 'linen-hand1-random2',
1445: 'linen-hand2-random2',
1446: 'linen-hand3-random2',
1447: 'linen-hand4-random2',
1448: 'linen-hand5-random2',
1449: 'linen-hand1-random3',
1450: 'linen-hand2-random3',
1451: 'linen-hand3-random3',
1452: 'linen-hand4-random3',
1453: 'linen-hand5-random3',
1454: 'linen-hand1-random4',
1455: 'linen-hand2-random4',
1456: 'linen-hand3-random4',
1457: 'linen-hand4-random4',
1458: 'linen-hand5-random4',
1459: 'wool-chest1',
1460: 'wool-chest2',
1461: 'wool-chest3',
1462: 'wool-chest4',
1463: 'wool-chest5',
1464: 'wool-chest1-random1',
1465: 'wool-chest2-random1',
1466: 'wool-chest3-random1',
1467: 'wool-chest4-random1',
1468: 'wool-chest5-random1',
1469: 'wool-chest1-random2',
1470: 'wool-chest2-random2',
1471: 'wool-chest3-random2',
1472: 'wool-chest4-random2',
1473: 'wool-chest5-random2',
1474: 'wool-chest1-random3',
1475: 'wool-chest2-random3',
1476: 'wool-chest3-random3',
1477: 'wool-chest4-random3',
1478: 'wool-chest5-random3',
1479: 'wool-chest1-random4',
1480: 'wool-chest2-random4',
1481: 'wool-chest3-random4',
1482: 'wool-chest4-random4',
1483: 'wool-chest5-random4',
1484: 'wool-shoulder1',
1485: 'wool-shoulder2',
1486: 'wool-shoulder3',
1487: 'wool-shoulder4',
1488: 'wool-shoulder5',
1489: 'wool-shoulder1-random1',
1490: 'wool-shoulder2-random1',
1491: 'wool-shoulder3-random1',
1492: 'wool-shoulder4-random1',
1493: 'wool-shoulder5-random1',
1494: 'wool-shoulder1-random2',
1495: 'wool-shoulder2-random2',
1496: 'wool-shoulder3-random2',
1497: 'wool-shoulder4-random2',
1498: 'wool-shoulder5-random2',
1499: 'wool-shoulder1-random3',
1500: 'wool-shoulder2-random3',
1501: 'wool-shoulder3-random3',
1502: 'wool-shoulder4-random3',
1503: 'wool-shoulder5-random3',
1504: 'wool-shoulder1-random4',
1505: 'wool-shoulder2-random4',
1506: 'wool-shoulder3-random4',
1507: 'wool-shoulder4-random4',
1508: 'wool-shoulder5-random4',
1509: 'wool-foot1',
1510: 'wool-foot2',
1511: 'wool-foot3',
1512: 'wool-foot4',
1513: 'wool-foot5',
1514: 'wool-foot1-random1',
1515: 'wool-foot2-random1',
1516: 'wool-foot3-random1',
1517: 'wool-foot4-random1',
1518: 'wool-foot5-random1',
1519: 'wool-foot1-random2',
1520: 'wool-foot2-random2',
1521: 'wool-foot3-random2',
1522: 'wool-foot4-random2',
1523: 'wool-foot5-random2',
1524: 'wool-foot1-random3',
1525: 'wool-foot2-random3',
1526: 'wool-foot3-random3',
1527: 'wool-foot4-random3',
1528: 'wool-foot5-random3',
1529: 'wool-foot1-random4',
1530: 'wool-foot2-random4',
1531: 'wool-foot3-random4',
1532: 'wool-foot4-random4',
1533: 'wool-foot5-random4',
1534: 'wool-hand1',
1535: 'wool-hand2',
1536: 'wool-hand3',
1537: 'wool-hand4',
1538: 'wool-hand5',
1539: 'wool-hand1-random1',
1540: 'wool-hand2-random1',
1541: 'wool-hand3-random1',
1542: 'wool-hand4-random1',
1543: 'wool-hand5-random1',
1544: 'wool-hand1-random2',
1545: 'wool-hand2-random2',
1546: 'wool-hand3-random2',
1547: 'wool-hand4-random2',
1548: 'wool-hand5-random2',
1549: 'wool-hand1-random3',
1550: 'wool-hand2-random3',
1551: 'wool-hand3-random3',
1552: 'wool-hand4-random3',
1553: 'wool-hand5-random3',
1554: 'wool-hand1-random4',
1555: 'wool-hand2-random4',
1556: 'wool-hand3-random4',
1557: 'wool-hand4-random4',
1558: 'wool-hand5-random4',
1564: 'gold-amulet1-random1',
1565: 'gold-amulet2-random1',
1566: 'gold-amulet3-random1',
1567: 'gold-amulet4-random1',
1568: 'gold-amulet5-random1',
1569: 'gold-amulet1-random2',
1570: 'gold-amulet2-random2',
1571: 'gold-amulet3-random2',
1572: 'gold-amulet4-random2',
1573: 'gold-amulet5-random2',
1574: 'gold-amulet1-random3',
1575: 'gold-amulet2-random3',
1576: 'gold-amulet3-random3',
1577: 'gold-amulet4-random3',
1578: 'gold-amulet5-random3',
1579: 'gold-amulet1-random4',
1580: 'gold-amulet2-random4',
1581: 'gold-amulet3-random4',
1582: 'gold-amulet4-random4',
1583: 'gold-amulet5-random4',
1584: 'gold-amulet1-random5',
1585: 'gold-amulet2-random5',
1586: 'gold-amulet3-random5',
1587: 'gold-amulet4-random5',
1588: 'gold-amulet5-random5',
1594: 'silver-amulet1-random1',
1595: 'silver-amulet2-random1',
1596: 'silver-amulet3-random1',
1597: 'silver-amulet4-random1',
1598: 'silver-amulet5-random1',
1599: 'silver-amulet1-random2',
1600: 'silver-amulet2-random2',
1601: 'silver-amulet3-random2',
1602: 'silver-amulet4-random2',
1603: 'silver-amulet5-random2',
1604: 'silver-amulet1-random3',
1605: 'silver-amulet2-random3',
1606: 'silver-amulet3-random3',
1607: 'silver-amulet4-random3',
1608: 'silver-amulet5-random3',
1609: 'silver-amulet1-random4',
1610: 'silver-amulet2-random4',
1611: 'silver-amulet3-random4',
1612: 'silver-amulet4-random4',
1613: 'silver-amulet5-random4',
1614: 'silver-amulet1-random5',
1615: 'silver-amulet2-random5',
1616: 'silver-amulet3-random5',
1617: 'silver-amulet4-random5',
1618: 'silver-amulet5-random5',
1619: 'gold-ring1-random1',
1620: 'gold-ring2-random1',
1621: 'gold-ring3-random1',
1622: 'gold-ring4-random1',
1623: 'gold-ring5-random1',
1624: 'gold-ring1-random2',
1625: 'gold-ring2-random2',
1626: 'gold-ring3-random2',
1627: 'gold-ring4-random2',
1628: 'gold-ring5-random2',
1629: 'gold-ring1-random3',
1630: 'gold-ring2-random3',
1631: 'gold-ring3-random3',
1632: 'gold-ring4-random3',
1633: 'gold-ring5-random3',
1634: 'gold-ring1-random4',
1635: 'gold-ring2-random4',
1636: 'gold-ring3-random4',
1637: 'gold-ring4-random4',
1638: 'gold-ring5-random4',
1639: 'gold-ring1-random5',
1640: 'gold-ring2-random5',
1641: 'gold-ring3-random5',
1642: 'gold-ring4-random5',
1643: 'gold-ring5-random5',
1644: 'silver-ring1-random1',
1645: 'silver-ring2-random1',
1646: 'silver-ring3-random1',
1647: 'silver-ring4-random1',
1648: 'silver-ring5-random1',
1649: 'silver-ring1-random2',
1650: 'silver-ring2-random2',
1651: 'silver-ring3-random2',
1652: 'silver-ring4-random2',
1653: 'silver-ring5-random2',
1654: 'silver-ring1-random3',
1655: 'silver-ring2-random3',
1656: 'silver-ring3-random3',
1657: 'silver-ring4-random3',
1658: 'silver-ring5-random3',
1659: 'silver-ring1-random4',
1660: 'silver-ring2-random4',
1661: 'silver-ring3-random4',
1662: 'silver-ring4-random4',
1663: 'silver-ring5-random4',
1664: 'silver-ring1-random5',
1665: 'silver-ring2-random5',
1666: 'silver-ring3-random5',
1667: 'silver-ring4-random5',
1668: 'silver-ring5-random5',
1669: 'silk-chest1',
1670: 'silk-chest2',
1671: 'silk-chest3',
1672: 'silk-chest4',
1673: 'silk-chest5',
1674: 'silk-chest1-random1',
1675: 'silk-chest2-random1',
1676: 'silk-chest3-random1',
1677: 'silk-chest4-random1',
1678: 'silk-chest5-random1',
1679: 'silk-chest1-random2',
1680: 'silk-chest2-random2',
1681: 'silk-chest3-random2',
1682: 'silk-chest4-random2',
1683: 'silk-chest5-random2',
1684: 'silk-chest1-random3',
1685: 'silk-chest2-random3',
1686: 'silk-chest3-random3',
1687: 'silk-chest4-random3',
1688: 'silk-chest5-random3',
1689: 'silk-chest1-random4',
1690: 'silk-chest2-random4',
1691: 'silk-chest3-random4',
1692: 'silk-chest4-random4',
1693: 'silk-chest5-random4',
1694: 'silk-shoulder1',
1695: 'silk-shoulder2',
1696: 'silk-shoulder3',
1697: 'silk-shoulder4',
1698: 'silk-shoulder5',
1699: 'silk-shoulder1-random1',
1700: 'silk-shoulder2-random1',
1701: 'silk-shoulder3-random1',
1702: 'silk-shoulder4-random1',
1703: 'silk-shoulder5-random1',
1704: 'silk-shoulder1-random2',
1705: 'silk-shoulder2-random2',
1706: 'silk-shoulder3-random2',
1707: 'silk-shoulder4-random2',
1708: 'silk-shoulder5-random2',
1709: 'silk-shoulder1-random3',
1710: 'silk-shoulder2-random3',
1711: 'silk-shoulder3-random3',
1712: 'silk-shoulder4-random3',
1713: 'silk-shoulder5-random3',
1714: 'silk-shoulder1-random4',
1715: 'silk-shoulder2-random4',
1716: 'silk-shoulder3-random4',
1717: 'silk-shoulder4-random4',
1718: 'silk-shoulder5-random4',
1719: 'silk-foot1',
1720: 'silk-foot2',
1721: 'silk-foot3',
1722: 'silk-foot4',
1723: 'silk-foot5',
1724: 'silk-foot1-random1',
1725: 'silk-foot2-random1',
1726: 'silk-foot3-random1',
1727: 'silk-foot4-random1',
1728: 'silk-foot5-random1',
1729: 'silk-foot1-random2',
1730: 'silk-foot2-random2',
1731: 'silk-foot3-random2',
1732: 'silk-foot4-random2',
1733: 'silk-foot5-random2',
1734: 'silk-foot1-random3',
1735: 'silk-foot2-random3',
1736: 'silk-foot3-random3',
1737: 'silk-foot4-random3',
1738: 'silk-foot5-random3',
1739: 'silk-foot1-random4',
1740: 'silk-foot2-random4',
1741: 'silk-foot3-random4',
1742: 'silk-foot4-random4',
1743: 'silk-foot5-random4',
1744: 'silk-hand1',
1745: 'silk-hand2',
1746: 'silk-hand3',
1747: 'silk-hand4',
1748: 'silk-hand5',
1749: 'silk-hand1-random1',
1750: 'silk-hand2-random1',
1751: 'silk-hand3-random1',
1752: 'silk-hand4-random1',
1753: 'silk-hand5-random1',
1754: 'silk-hand1-random2',
1755: 'silk-hand2-random2',
1756: 'silk-hand3-random2',
1757: 'silk-hand4-random2',
1758: 'silk-hand5-random2',
1759: 'silk-hand1-random3',
1760: 'silk-hand2-random3',
1761: 'silk-hand3-random3',
1762: 'silk-hand4-random3',
1763: 'silk-hand5-random3',
1764: 'silk-hand1-random4',
1765: 'silk-hand2-random4',
1766: 'silk-hand3-random4',
1767: 'silk-hand4-random4',
1768: 'silk-hand5-random4',
1769: 'iron-helmet',
1770: 'iron-axe1',
1771: 'iron-axe2',
1772: 'iron-axe3',
1773: 'iron-axe4',
1774: 'iron-axe5',
1775: 'iron-axe1-random1',
1776: 'iron-axe2-random1',
1777: 'iron-axe3-random1',
1778: 'iron-axe4-random1',
1779: 'iron-axe5-random1',
1780: 'iron-axe1-random2',
1781: 'iron-axe2-random2',
1782: 'iron-axe3-random2',
1783: 'iron-axe4-random2',
1784: 'iron-axe5-random2',
1785: 'iron-axe1-random3',
1786: 'iron-axe2-random3',
1787: 'iron-axe3-random3',
1788: 'iron-axe4-random3',
1789: 'iron-axe5-random3',
1790: 'iron-axe1-random4',
1791: 'iron-axe2-random4',
1792: 'iron-axe3-random4',
1793: 'iron-axe4-random4',
1794: 'iron-axe5-random4',
1795: 'iron-axe1-random5',
1796: 'iron-axe2-random5',
1797: 'iron-axe3-random5',
1798: 'iron-axe4-random5',
1799: 'iron-axe5-random5',
1800: 'iron-axe1-random6',
1801: 'iron-axe2-random6',
1802: 'iron-axe3-random6',
1803: 'iron-axe4-random6',
1804: 'iron-axe5-random6',
1805: 'iron-axe1-random7',
1806: 'iron-axe2-random7',
1807: 'iron-axe3-random7',
1808: 'iron-axe4-random7',
1809: 'iron-axe5-random7',
1810: 'iron-axe1-random8',
1811: 'iron-axe2-random8',
1812: 'iron-axe3-random8',
1813: 'iron-axe4-random8',
1814: 'iron-axe5-random8',
1815: 'iron-axe1-random9',
1816: 'iron-axe2-random9',
1817: 'iron-axe3-random9',
1818: 'iron-axe4-random9',
1819: 'iron-axe5-random9',
1820: 'iron-axe1-random10',
1821: 'iron-axe2-random10',
1822: 'iron-axe3-random10',
1823: 'iron-axe4-random10',
1824: 'iron-axe5-random10',
1825: 'iron-greatsword1',
1826: 'iron-greatsword2',
1827: 'iron-greatsword3',
1828: 'iron-greatsword4',
1829: 'iron-greatsword5',
1830: 'iron-greatsword1-random1',
1831: 'iron-greatsword2-random1',
1832: 'iron-greatsword3-random1',
1833: 'iron-greatsword4-random1',
1834: 'iron-greatsword5-random1',
1835: 'iron-greatsword1-random2',
1836: 'iron-greatsword2-random2',
1837: 'iron-greatsword3-random2',
1838: 'iron-greatsword4-random2',
1839: 'iron-greatsword5-random2',
1840: 'iron-greatsword1-random3',
1841: 'iron-greatsword2-random3',
1842: 'iron-greatsword3-random3',
1843: 'iron-greatsword4-random3',
1844: 'iron-greatsword5-random3',
1845: 'iron-greatsword1-random4',
1846: 'iron-greatsword2-random4',
1847: 'iron-greatsword3-random4',
1848: 'iron-greatsword4-random4',
1849: 'iron-greatsword5-random4',
1850: 'iron-greatsword1-random5',
1851: 'iron-greatsword2-random5',
1852: 'iron-greatsword3-random5',
1853: 'iron-greatsword4-random5',
1854: 'iron-greatsword5-random5',
1855: 'iron-greatsword1-random6',
1856: 'iron-greatsword2-random6',
1857: 'iron-greatsword3-random6',
1858: 'iron-greatsword4-random6',
1859: 'iron-greatsword5-random6',
1860: 'iron-greatsword1-random7',
1861: 'iron-greatsword2-random7',
1862: 'iron-greatsword3-random7',
1863: 'iron-greatsword4-random7',
1864: 'iron-greatsword5-random7',
1865: 'iron-greatsword1-random8',
1866: 'iron-greatsword2-random8',
1867: 'iron-greatsword3-random8',
1868: 'iron-greatsword4-random8',
1869: 'iron-greatsword5-random8',
1870: 'iron-greatsword1-random9',
1871: 'iron-greatsword2-random9',
1872: 'iron-greatsword3-random9',
1873: 'iron-greatsword4-random9',
1874: 'iron-greatsword5-random9',
1875: 'iron-greatsword1-random10',
1876: 'iron-greatsword2-random10',
1877: 'iron-greatsword3-random10',
1878: 'iron-greatsword4-random10',
1879: 'iron-greatsword5-random10',
1880: 'iron-longsword1',
1881: 'iron-longsword2',
1882: 'iron-longsword3',
1883: 'iron-longsword4',
1884: 'iron-longsword5',
1885: 'iron-longsword1-random1',
1886: 'iron-longsword2-random1',
1887: 'iron-longsword3-random1',
1888: 'iron-longsword4-random1',
1889: 'iron-longsword5-random1',
1890: 'iron-longsword1-random2',
1891: 'iron-longsword2-random2',
1892: 'iron-longsword3-random2',
1893: 'iron-longsword4-random2',
1894: 'iron-longsword5-random2',
1895: 'iron-longsword1-random3',
1896: 'iron-longsword2-random3',
1897: 'iron-longsword3-random3',
1898: 'iron-longsword4-random3',
1899: 'iron-longsword5-random3',
1900: 'iron-longsword1-random4',
1901: 'iron-longsword2-random4',
1902: 'iron-longsword3-random4',
1903: 'iron-longsword4-random4',
1904: 'iron-longsword5-random4',
1905: 'iron-longsword1-random5',
1906: 'iron-longsword2-random5',
1907: 'iron-longsword3-random5',
1908: 'iron-longsword4-random5',
1909: 'iron-longsword5-random5',
1910: 'iron-longsword1-random6',
1911: 'iron-longsword2-random6',
1912: 'iron-longsword3-random6',
1913: 'iron-longsword4-random6',
1914: 'iron-longsword5-random6',
1915: 'iron-longsword1-random7',
1916: 'iron-longsword2-random7',
1917: 'iron-longsword3-random7',
1918: 'iron-longsword4-random7',
1919: 'iron-longsword5-random7',
1920: 'iron-longsword1-random8',
1921: 'iron-longsword2-random8',
1922: 'iron-longsword3-random8',
1923: 'iron-longsword4-random8',
1924: 'iron-longsword5-random8',
1925: 'iron-longsword1-random9',
1926: 'iron-longsword2-random9',
1927: 'iron-longsword3-random9',
1928: 'iron-longsword4-random9',
1929: 'iron-longsword5-random9',
1930: 'iron-longsword1-random10',
1931: 'iron-longsword2-random10',
1932: 'iron-longsword3-random10',
1933: 'iron-longsword4-random10',
1934: 'iron-longsword5-random10',
1935: 'bone-greatsword',
1936: 'obsidian-greatsword',
1937: 'iron-greataxe1',
1938: 'iron-greataxe2',
1939: 'iron-greataxe3',
1940: 'iron-greataxe4',
1941: 'iron-greataxe5',
1942: 'iron-greataxe1-random1',
1943: 'iron-greataxe2-random1',
1944: 'iron-greataxe3-random1',
1945: 'iron-greataxe4-random1',
1946: 'iron-greataxe5-random1',
1947: 'iron-greataxe1-random2',
1948: 'iron-greataxe2-random2',
1949: 'iron-greataxe3-random2',
1950: 'iron-greataxe4-random2',
1951: 'iron-greataxe5-random2',
1952: 'iron-greataxe1-random3',
1953: 'iron-greataxe2-random3',
1954: 'iron-greataxe3-random3',
1955: 'iron-greataxe4-random3',
1956: 'iron-greataxe5-random3',
1957: 'iron-greataxe1-random4',
1958: 'iron-greataxe2-random4',
1959: 'iron-greataxe3-random4',
1960: 'iron-greataxe4-random4',
1961: 'iron-greataxe5-random4',
1962: 'iron-greataxe1-random5',
1963: 'iron-greataxe2-random5',
1964: 'iron-greataxe3-random5',
1965: 'iron-greataxe4-random5',
1966: 'iron-greataxe5-random5',
1967: 'iron-greataxe1-random6',
1968: 'iron-greataxe2-random6',
1969: 'iron-greataxe3-random6',
1970: 'iron-greataxe4-random6',
1971: 'iron-greataxe5-random6',
1972: 'iron-greataxe1-random7',
1973: 'iron-greataxe2-random7',
1974: 'iron-greataxe3-random7',
1975: 'iron-greataxe4-random7',
1976: 'iron-greataxe5-random7',
1977: 'iron-greataxe1-random8',
1978: 'iron-greataxe2-random8',
1979: 'iron-greataxe3-random8',
1980: 'iron-greataxe4-random8',
1981: 'iron-greataxe5-random8',
1982: 'iron-greataxe1-random9',
1983: 'iron-greataxe2-random9',
1984: 'iron-greataxe3-random9',
1985: 'iron-greataxe4-random9',
1986: 'iron-greataxe5-random9',
1987: 'iron-greataxe1-random10',
1988: 'iron-greataxe2-random10',
1989: 'iron-greataxe3-random10',
1990: 'iron-greataxe4-random10',
1991: 'iron-greataxe5-random10',
1992: 'bone-greataxe',
1993: 'obsidian-greataxe',
1994: 'saurian-greataxe',
1995: 'wood-greatmace02',
1996: 'iron-greatmace1',
1997: 'iron-greatmace2',
1998: 'iron-greatmace3',
1999: 'iron-greatmace4',
2000: 'iron-greatmace5',
2001: 'iron-greatmace1-random1',
2002: 'iron-greatmace2-random1',
2003: 'iron-greatmace3-random1',
2004: 'iron-greatmace4-random1',
2005: 'iron-greatmace5-random1',
2006: 'iron-greatmace1-random2',
2007: 'iron-greatmace2-random2',
2008: 'iron-greatmace3-random2',
2009: 'iron-greatmace4-random2',
2010: 'iron-greatmace5-random2',
2011: 'iron-greatmace1-random3',
2012: 'iron-greatmace2-random3',
2013: 'iron-greatmace3-random3',
2014: 'iron-greatmace4-random3',
2015: 'iron-greatmace5-random3',
2016: 'iron-greatmace1-random4',
2017: 'iron-greatmace2-random4',
2018: 'iron-greatmace3-random4',
2019: 'iron-greatmace4-random4',
2020: 'iron-greatmace5-random4',
2021: 'iron-greatmace1-random5',
2022: 'iron-greatmace2-random5',
2023: 'iron-greatmace3-random5',
2024: 'iron-greatmace4-random5',
2025: 'iron-greatmace5-random5',
2026: 'iron-greatmace1-random6',
2027: 'iron-greatmace2-random6',
2028: 'iron-greatmace3-random6',
2029: 'iron-greatmace4-random6',
2030: 'iron-greatmace5-random6',
2031: 'iron-greatmace1-random7',
2032: 'iron-greatmace2-random7',
2033: 'iron-greatmace3-random7',
2034: 'iron-greatmace4-random7',
2035: 'iron-greatmace5-random7',
2036: 'iron-greatmace1-random8',
2037: 'iron-greatmace2-random8',
2038: 'iron-greatmace3-random8',
2039: 'iron-greatmace4-random8',
2040: 'iron-greatmace5-random8',
2041: 'iron-greatmace1-random9',
2042: 'iron-greatmace2-random9',
2043: 'iron-greatmace3-random9',
2044: 'iron-greatmace4-random9',
2045: 'iron-greatmace5-random9',
2046: 'iron-greatmace1-random10',
2047: 'iron-greatmace2-random10',
2048: 'iron-greatmace3-random10',
2049: 'iron-greatmace4-random10',
2050: 'iron-greatmace5-random10',
2051: 'iron-pickaxe',
2052: 'bone-greatmace',
2053: 'obsidian-greatmace',
2054: 'obsidian-chest',
2055: 'obsidian-shoulders',
2056: 'obsidian-boot',
2057: 'obsidian-glove',
2058: 'obsidian-helmet',
2059: 'obsidian-sword',
2062: 'gold-chest',
2063: 'gold-shoulder',
2064: 'gold-boot',
2065: 'gold-glove',
2066: 'gold-helmet',
2067: 'bone-chest',
2068: 'bone-shoulders',
2069: 'bone-boot',
2070: 'bone-glove',
2072: 'bone-sword',
2073: 'bone-mace',
2074: 'bone-axe',
2075: 'cube',
2076: 'door',
2077: 'window',
2078: 'goddess2',
2079: 'artifact',
2080: 'pet-box',
2081: 'quest-item-amulet01',
2082: 'quest-item-amulet02',
2083: 'quest-item-jewel-case',
2084: 'copper-coin',
2085: 'silver-coin',
2086: 'gold-coin',
2087: 'bush',
2088: 'snow-bush',
2089: 'cobwebscrub',
2090: 'berry-bush',
2091: 'snow-berry',
2092: 'snow-berry-mash',
2093: 'scrub',
2094: 'scrub-green',
2095: 'fire-scrub',
2096: 'ginseng',
2097: 'ginseng-root',
2098: 'fir-tree',
2099: 'thorn-tree',
2100: 'gold-deposit',
2101: 'iron-deposit',
2102: 'silver-deposit',
2103: 'sandstone-deposit',
2104: 'emerald-deposit',
2105: 'sapphire-deposit',
2106: 'ruby-deposit',
2107: 'diamond-deposit',
2108: 'ice-crystal-deposit',
2109: 'scarecrow',
2110: 'aim',
2111: 'dummy',
2112: 'tree-leaves',
2113: 'vase',
2114: 'vase2',
2115: 'vase3',
2116: 'vase4',
2117: 'candle01',
2118: 'candle02',
2119: 'candle03',
2120: 'undead-candle1',
2121: 'undead-candle2',
2122: 'undead-candle3',
2123: 'character-platform',
2124: 'antique-building1',
2125: 'antique-building2',
2126: 'antique-building3',
2127: 'antique-building4',
2128: 'entrance-crypt',
2129: 'entrance-barrow',
2130: 'entrance-mine',
2131: 'entrance-antique',
2132: 'entrance-tomb',
2133: 'monster-body-reptile-shell',
2134: 'monster-body-reptile-shell-spiked',
2135: 'monster-body-reptile-crest1',
2136: 'monster-body-reptile-crest2',
2137: 'monster-body-reptile-crest3',
2138: 'monster-body-reptile-crest4',
2139: 'monster-body-reptile-crest5',
2140: 'monster-body-reptile-spiked1',
2141: 'monster-body-reptile-spiked2',
2142: 'monster-foot-reptile-normal',
2143: 'monster-foot-reptile-claw1',
2144: 'monster-foot-reptile-claw2',
2145: 'monster-head-reptile-lizard',
2146: 'monster-head-reptile-turtle',
2147: 'monster-head-reptile-horn1',
2148: 'monster-head-reptile-horn2',
2149: 'monster-head-reptile-horn3',
2150: 'monster-tail-reptile-normal',
2151: 'monster-tail-reptile-spike1',
2152: 'monster-tail-reptile-spike2',
2153: 'rune-giant-head-normal01',
2154: 'rune-giant-head-laser',
2155: 'rune-giant-body01',
2156: 'rune-giant-body02',
2157: 'rune-giant-hand01',
2158: 'rune-giant-foot01',
2159: 'cotton-plant',
2160: 'cotton',
2161: 'turtle-body',
2162: 'turtle-head',
2163: 'turtle-foot',
2164: 'egg1',
2165: 'egg1',
2166: 'egg1',
2167: 'werewolf-head',
2168: 'werewolf-hand',
2169: 'werewolf-body',
2170: 'werewolf-foot',
2171: 'vampire-head',
2172: 'vampire-eyes',
2173: 'vampire-hand',
2174: 'frame-house01',
2175: 'frame-inn',
2176: 'frame-shop',
2177: 'frame-tower',
2178: 'building-stone-well',
2179: 'building-stilt-inn1',
2180: 'building-stilt-hut3',
2181: 'building-stilt-hut2',
2182: 'building-stilt-path',
2183: 'landingplace',
2184: 'stone-bridge',
2185: 'entrance-stairs',
2186: 'carpet1',
2187: 'carpet2',
2188: 'carpet3',
2189: 'framework-base',
2190: 'framework-floor',
2191: 'framework-floor-stairs',
2192: 'framework-wall',
2193: 'framework-wall-window',
2194: 'framework-wall-door',
2195: 'framework-wall-indoor',
2196: 'framework-wall-balcony',
2197: 'framework-wall-lamp',
2198: 'framework-roof1',
2199: 'framework-roof2',
2200: 'framework-roof3',
2201: 'framework-arc',
2202: 'stone-base',
2203: 'stone-floor',
2204: 'stone-floor-stairs',
2205: 'stone-wall',
2206: 'stone-wall-window',
2207: 'stone-wall-door',
2208: 'stone-wall-indoor',
2209: 'stone-wall-balcony',
2210: 'stone-wall-lamp',
2211: 'stone-roof1',
2212: 'stone-roof2',
2213: 'stone-roof3',
2214: 'stone-arc',
2215: 'whitewood-base',
2216: 'whitewood-floor',
2217: 'whitewood-floor-stairs',
2218: 'whitewood-wall',
2219: 'whitewood-wall-window',
2220: 'whitewood-wall-door',
2221: 'whitewood-wall-indoor',
2222: 'whitewood-wall-balcony',
2223: 'whitewood-wall-lamp',
2224: 'whitewood-roof1',
2225: 'whitewood-roof2',
2226: 'whitewood-roof3',
2227: 'whitewood-arc',
2228: 'clay-base',
2229: 'clay-floor',
2230: 'clay-floor-stairs',
2231: 'clay-wall',
2232: 'clay-wall-window',
2233: 'clay-wall-door',
2234: 'clay-wall-indoor',
2235: 'clay-wall-balcony',
2236: 'clay-wall-lamp',
2237: 'clay-roof1',
2238: 'clay-roof2',
2239: 'clay-roof3',
2240: 'clay-arc',
2241: 'clay-entrance-stairs',
2242: 'whiteclay-entrance-stairs',
2243: 'whiteclay-base',
2244: 'whiteclay-floor',
2245: 'whiteclay-floor-stairs',
2246: 'whiteclay-wall',
2247: 'whiteclay-wall-window',
2248: 'whiteclay-wall-door',
2249: 'whiteclay-wall-indoor',
2250: 'whiteclay-wall-balcony',
2251: 'whiteclay-wall-lamp',
2252: 'whiteclay-roof1',
2253: 'whiteclay-roof2',
2254: 'whiteclay-roof3',
2255: 'whiteclay-arc',
2256: 'antiqueruin-entrance-stairs',
2257: 'antiqueruin-base',
2258: 'antiqueruin-floor',
2259: 'antiqueruin-floor-stairs',
2260: 'antiqueruin-wall',
2261: 'antiqueruin-roof1',
2262: 'antiqueruin-roof2',
2263: 'antiqueruin-roof3',
2264: 'antiqueruin-arc',
2265: 'jungleruin-entrance-stairs',
2266: 'jungleruin-base',
2267: 'jungleruin-floor',
2268: 'jungleruin-floor-stairs',
2269: 'jungleruin-wall',
2270: 'jungleruin-wall-door',
2271: 'jungleruin-wall-indoor',
2272: 'jungleruin-roof1',
2273: 'jungleruin-roof2',
2274: 'jungleruin-roof3',
2275: 'jungleruin-arc',
2276: 'desertruin-entrance-stairs',
2277: 'desertruin-base',
2278: 'desertruin-floor',
2279: 'desertruin-floor-stairs',
2280: 'desertruin-wall',
2281: 'desertruin-wall-door',
2282: 'desertruin-wall-indoor',
2283: 'desertruin-roof1',
2284: 'desertruin-roof2',
2285: 'desertruin-roof3',
2286: 'desertruin-arc',
2287: 'wood-base',
2288: 'wood-floor',
2289: 'wood-floor-stairs',
2290: 'wood-wall',
2291: 'wood-wall-window',
2292: 'wood-wall-door',
2293: 'wood-wall-indoor',
2294: 'wood-wall-balcony',
2295: 'wood-wall-lamp',
2296: 'wood-roof1',
2297: 'wood-roof2',
2298: 'wood-roof3',
2299: 'wood-arc',
2300: 'palm-leaf',
2301: 'palm-leaf-diagonal',
2302: 'white-castle-round-tower',
2303: 'building-warrior',
2304: 'building-ranger',
2305: 'building-rogue',
2306: 'building-mage',
2307: 'building-smithy',
2308: 'building-carpentersshop',
2309: 'building-weavingmill',
2310: 'cactus1',
2311: 'cactus2',
2313: 'painting01',
2314: 'painting02',
2315: 'icon-talk',
2316: 'icon-analyze',
2317: 'icon-vendor',
2318: 'digested-leftovers01',
2319: 'digested-leftovers02',
2320: 'digested-leftovers03',
2321: 'digested-leftovers04',
2322: 'flower-vase1',
2323: 'flower-vase2',
2324: 'flower-vase3',
2325: 'flower-vase4',
2326: 'flower-vase5',
2327: 'flower-vase6',
2328: 'flower-vase7',
2329: 'flower-vase8',
2330: 'flower-vase9',
2331: 'pitchfork',
2332: 'pumpkin',
2333: 'pumpkin-muffin',
2334: 'pineapple',
2335: 'pineapple-slice',
2336: 'spiribit',
2337: 'iron-nugget',
2338: 'silver-nugget',
2339: 'gold-nugget',
2340: 'emerald-nugget',
2341: 'sapphire-nugget',
2342: 'ruby-nugget',
2343: 'diamond-nugget',
2344: 'iron-cube',
2345: 'silver-cube',
2346: 'gold-cube',
2347: 'wood-cube',
2348: 'fire-cube',
2349: 'ice-cube',
2350: 'unholy-cube',
2351: 'wind-cube',
2352: 'cobweb',
2357: 'ice-crystal',
2358: 'ice-crystal-helmet',
2359: 'ice-crystal-shoulder',
2360: 'ice-crystal-chest',
2361: 'ice-crystal-boot',
2362: 'ice-crystal-glove',
2363: 'ice-coated-yarn',
2364: 'sandstone',
2365: 'wood-log',
2366: 'parrot-feather',
2367: 'parrot-beak',
2368: 'parrot-glove',
2369: 'parrot-chest',
2370: 'parrot-boot',
2371: 'parrot-helmet',
2372: 'parrot-shoulder',
2373: 'mammoth-hair',
2374: 'mammoth-glove',
2375: 'mammoth-chest',
2376: 'mammoth-boot',
2377: 'mammoth-helmet',
2378: 'mammoth-shoulder',
2379: 'bullterrier-head1',
2380: 'bullterrier-head2',
2381: 'bullterrier-head3',
2382: 'gnobold-foot',
2383: 'gnobold-head',
2384: 'gnobold-hand',
2385: 'insectoid-foot',
2386: 'insectoid-head',
2387: 'insectoid-hand',
2388: 'insectoid-body',
2389: 'flask',
2390: 'water-flask',
2391: 'radish-slice',
2392: 'onion-slice',
2393: 'mushroom-spit',
2394: 'plant-fiber',
2395: 'soup-ginseng',
2396: 'unknown',
2397: 'desert-rib',
2398: 'desert-skull',
2399: 'slime-green',
2400: 'slime-pink',
2401: 'slime-yellow',
2402: 'slime-blue',
2403: 'frightener-head',
2404: 'frightener-eyes',
2405: 'sand-horror-head',
2406: 'sand-horror-hand',
2407: 'seastar',
2408: 'bread',
2409: 'sandwich',
2410: 'fish1',
2411: 'fish2',
2412: 'shark',
2413: 'lantern-fish',
2414: 'lantern-fish-eyes',
2415: 'mawfish',
2416: 'piranha',
2417: 'blowfish',
2418: 'seahorse',
2421: 'formula',
2422: 'key1',
2423: 'flowers2',
2424: 'flowers',
2425: 'grass',
2426: 'grass2',
2427: 'grass3',
2428: 'lava-flower',
2429: 'lava-grass',
2430: 'thorn-plant',
2431: 'echinacea2',
2432: 'leaf',
2433: 'lantern02',
2434: 'torch',
2435: 'stone',
2436: 'stone2',
2437: 'tendril',
2438: 'tulips-colorful',
2439: 'cornflower',
2440: 'reed',
2441: 'pumpkin-leaves',
2442: 'pineapple-leaves',
2443: 'sunflower',
2444: 'bean-tendril',
2445: 'desert-flower01',
2446: 'desert-flower02',
2447: 'wheat',
2448: 'corn',
2449: 'water-lily01',
2450: 'water-lily02',
2451: 'inn-sign',
2452: 'shop-sign',
2453: 'weapon-shop-sign',
2454: 'armor-shop-sign',
2455: 'identifier-sign',
2456: 'smithy-sign',
2457: 'carpentersshop-sign',
2458: 'weavingmill-sign',
2459: 'ivy',
2460: 'wall-roses-red',
2461: 'wall-roses-white',
2462: 'christmas-tree',
2463: 'underwater-plant',
2464: 'alga',
2465: 'coral',
2466: 'inca-art1',
2467: 'inca-art2',
2468: 'inca-art3',
2469: 'inca-art4',
2470: 'crest1',
2471: 'castle-chain',
2472: 'wall-skull',
2473: 'torch-read',
2474: 'torch-green',
2475: 'torch-blue',
2476: 'torch-yellow',
2477: 'liana',
2478: 'chandelier',
2479: 'cobwebs',
2480: 'cobwebs2',
2481: 'goddess2',
2482: 'door',
2483: 'big-door',
2484: 'window',
2485: 'castle-window',
2486: 'gate',
2487: 'spike-trap',
2488: 'stomp-trap',
2489: 'lever',
2490: 'chest-base02',
2491: 'chest-top02',
2492: 'table',
2493: 'stone-table',
2494: 'sandstone-table',
2495: 'stone-stool',
2496: 'sandstone-stool',
2497: 'stool',
2498: 'bench',
2499: 'bed',
2500: 'bedtable',
2501: 'cupboard',
2502: 'market-stand1',
2503: 'market-stand2',
2504: 'market-stand3',
2505: 'barrel',
2506: 'crate',
2507: 'open-crate',
2508: 'sack',
2509: 'shelter',
2510: 'desktop',
2511: 'counter',
2512: 'shelf1',
2513: 'shelf2',
2514: 'shelf3',
2515: 'castle-shelf1',
2516: 'castle-shelf2',
2517: 'castle-shelf3',
2518: 'stone-shelf1',
2519: 'stone-shelf2',
2520: 'stone-shelf3',
2521: 'sandstone-shelf1',
2522: 'sandstone-shelf2',
2523: 'sandstone-shelf3',
2524: 'corpse',
2525: 'runestone',
2526: 'flower-box01',
2527: 'flower-box02',
2528: 'flower-box03',
2529: 'street-light02',
2530: 'street-light01',
2531: 'fence01',
2532: 'fence02',
2533: 'fence03',
2534: 'fence04',
2535: 'furnace',
2536: 'anvil',
2537: 'sawbench',
2538: 'workbench',
2539: 'customization-bench',
2540: 'spinningwheel',
2541: 'loom',
2542: 'overground-dungeon01',
2543: 'underground-dungeon01',
2544: 'castle-arc',
2545: 'temple-arc',
2546: 'bomb1',
2547: 'glider',
2548: 'boat2',
2549: 'stars',
2550: 'plaster',
2551: 'zzz',
2552: 'heart',
2553: 'angry',
2554: 'campfire',
2555: 'tent',
2556: 'beach-umbrella',
2557: 'beach-towel',
2558: 'wood-mat',
2559: 'compass2',
2560: 'mission',
2561: 'city',
2562: 'skull',
2563: 'airship',
2564: 'sphinx01',
2565: 'obelisk',
2566: 'manacube',
2567: 'maptile',
2568: 'cloud02'
}
MODEL_IDS = {v: k for k, v in MODEL_NAMES.items()}
ITEM_NAMES = {
(1, 0): 'Cookie',
(1, 1): 'LifePotion',
(1, 2): 'CactusPotion',
(1, 3): 'ManaPotion',
(1, 4): 'GinsengSoup',
(1, 5): 'SnowBerryMash',
(1, 6): 'MushroomSpit',
(1, 7): 'Bomb',
(1, 8): 'PineappleSlice',
(1, 9): 'PumpkinMuffin',
(2, 0): 'Formula',
(3, 0): 'Sword',
(3, 1): 'Axe',
(3, 2): 'Mace',
(3, 3): 'Dagger',
(3, 4): 'Fist',
(3, 5): 'Longsword',
(3, 6): 'Bow',
(3, 7): 'Crossbow',
(3, 8): 'Boomerang',
(3, 9): 'Arrow',
(3, 10): 'Staff',
(3, 11): 'Wand',
(3, 12): 'Bracelet',
(3, 13): 'Shield',
(3, 14): 'Arrows',
(3, 15): 'Greatsword',
(3, 16): 'Greataxe',
(3, 17): 'Greatmace',
(3, 20): 'Torch',
(4, 0): 'ChestArmor',
(5, 0): 'Gloves',
(6, 0): 'Boots',
(7, 0): 'ShoulderArmor',
(8, 0): 'Amulet',
(9, 0): 'Ring',
(11, 0): 'Nugget',
(11, 1): 'Log',
(11, 2): 'Feather',
(11, 3): 'Horn',
(11, 4): 'Claw',
(11, 5): 'Fiber',
(11, 6): 'Cobweb',
(11, 7): 'Hair',
(11, 8): 'Crystal',
(11, 9): 'Yarn',
(11, 10): 'Cube',
(11, 11): 'Capsule',
(11, 12): 'Flask',
(11, 13): 'Orb',
(11, 14): 'Spirit',
(11, 15): 'Mushroom',
(11, 16): 'Pumpkin',
(11, 17): 'Pineapple',
(11, 18): 'RadishSlice',
(11, 19): 'ShimmerMushroom',
(11, 20): 'GinsengRoot',
(11, 21): 'OnionSlice',
(11, 22): 'Heartflower',
(11, 23): 'PricklyPear',
(11, 24): 'FrozenHeartflower',
(11, 25): 'Soulflower',
(11, 26): 'WaterFlask',
(11, 27): 'SnowBerry',
(12, 0): 'Coin',
(13, 0): 'PlatinumCoin',
(14, 0): 'Leftovers',
(15, 0): 'Beak',
(16, 0): 'Painting',
(18, 0): 'Candle',
(18, 1): 'Candle',
(19, 0): 'Pet',
(20, 0): 'Bait',
(20, 1): 'Bait',
(20, 2): 'Bait',
(20, 3): 'Bait',
(20, 4): 'Bait',
(20, 5): 'Bait',
(20, 6): 'Bait',
(20, 7): 'Bait',
(20, 8): 'Bait',
(20, 9): 'Bait',
(20, 10): 'Bait',
(20, 11): 'Bait',
(20, 12): 'Bait',
(20, 13): 'Bait',
(20, 14): 'Bait',
(20, 15): 'Bait',
(20, 16): 'Bait',
(20, 17): 'Bait',
(20, 18): 'Bait',
(20, 19): 'BubbleGum',
(20, 20): 'Bait',
(20, 21): 'Bait',
(20, 22): 'VanillaCupcake',
(20, 23): 'ChocolateCupcake',
(20, 24): 'Bait',
(20, 25): 'CinnamonRole',
(20, 26): 'Waffle',
(20, 27): 'Croissant',
(20, 28): 'Bait',
(20, 29): 'Bait',
(20, 30): 'Candy',
(20, 31): 'Bait',
(20, 32): 'Bait',
(20, 33): 'PumpkinMash',
(20, 34): 'CottonCandy',
(20, 35): 'Carrot',
(20, 36): 'BlackberryMarmelade',
(20, 37): 'GreenJelly',
(20, 38): 'PinkJelly',
(20, 39): 'YellowJelly',
(20, 40): 'BlueJelly',
(20, 41): 'Bait',
(20, 42): 'Bait',
(20, 43): 'Bait',
(20, 44): 'Bait',
(20, 45): 'Bait',
(20, 46): 'Bait',
(20, 47): 'Bait',
(20, 48): 'Bait',
(20, 49): 'Bait',
(20, 50): 'BananaSplit',
(20, 51): 'Bait',
(20, 52): 'Bait',
(20, 53): 'Popcorn',
(20, 54): 'Bait',
(20, 55): 'LicoriceCandy',
(20, 56): 'CerealBar',
(20, 57): 'SaltedCaramel',
(20, 58): 'GingerTartlet',
(20, 59): 'MangoJuice',
(20, 60): 'FruitBasket',
(20, 61): 'MelonIceCream',
(20, 62): 'BloodOrangeJuice',
(20, 63): 'MilkChocolateBar',
(20, 64): 'MintChocolateBar',
(20, 65): 'WhiteChocolateBar',
(20, 66): 'CaramelChocolateBar',
(20, 67): 'ChocolateCookie',
(20, 68): 'Bait',
(20, 69): 'Bait',
(20, 70): 'Bait',
(20, 71): 'Bait',
(20, 72): 'Bait',
(20, 73): 'Bait',
(20, 74): 'SugarCandy',
(20, 75): 'AppleRing',
(20, 76): 'Bait',
(20, 77): 'Bait',
(20, 78): 'Bait',
(20, 79): 'Bait',
(20, 80): 'Bait',
(20, 81): 'Bait',
(20, 82): 'Bait',
(20, 83): 'Bait',
(20, 84): 'Bait',
(20, 85): 'Bait',
(20, 86): 'WaterIce',
(20, 87): 'ChocolateDonut',
(20, 88): 'Pancakes',
(20, 89): 'Bait',
(20, 90): 'StrawberryCake',
(20, 91): 'ChocolateCake',
(20, 92): 'Lollipop',
(20, 93): 'Softice',
(20, 94): 'Bait',
(20, 95): 'Bait',
(20, 96): 'Bait',
(20, 97): 'Bait',
(20, 98): 'CandiedApple',
(20, 99): 'DateCookie',
(20, 100): 'Bait',
(20, 101): 'Bait',
(20, 102): 'Bread',
(20, 103): 'Curry',
(20, 104): 'Lolly',
(20, 105): 'LemonTart',
(20, 106): 'StrawberryCocktail',
(20, 107): 'Bait',
(20, 108): 'Bait',
(20, 109): 'Bait',
(20, 110): 'Bait',
(20, 111): 'Bait',
(20, 112): 'Bait',
(20, 113): 'Bait',
(20, 114): 'Bait',
(20, 115): 'Bait',
(20, 116): 'Bait',
(20, 117): 'Bait',
(20, 118): 'Bait',
(20, 119): 'Bait',
(20, 120): 'Bait',
(20, 121): 'Bait',
(20, 122): 'Bait',
(20, 123): 'Bait',
(20, 124): 'Bait',
(20, 125): 'Bait',
(20, 126): 'Bait',
(20, 127): 'Bait',
(20, 128): 'Bait',
(20, 129): 'Bait',
(20, 130): 'Bait',
(20, 131): 'Bait',
(20, 132): 'Bait',
(20, 133): 'Bait',
(20, 134): 'Bait',
(20, 135): 'Bait',
(20, 136): 'Bait',
(20, 137): 'Bait',
(20, 138): 'Bait',
(20, 139): 'Bait',
(20, 140): 'Bait',
(20, 141): 'Bait',
(20, 142): 'Bait',
(20, 143): 'Bait',
(20, 144): 'Bait',
(20, 145): 'Bait',
(20, 146): 'Bait',
(20, 147): 'Bait',
(20, 148): 'Bait',
(20, 149): 'Bait',
(20, 150): 'Bait',
(20, 151): 'BiscuitRole',
(20, 152): 'Bait',
(20, 153): 'Bait',
(20, 154): 'Bait',
(20, 155): 'Bait',
(21, 0): 'Amulet1',
(21, 1): 'Amulet2',
(21, 2): 'JewelCase',
(21, 3): 'Key',
(21, 4): 'Medicine',
(21, 5): 'Antivenom',
(21, 6): 'BandAid',
(21, 7): 'Crutch',
(21, 8): 'Bandage',
(21, 9): 'Salve',
(23, 0): 'HangGlider',
(23, 1): 'Boat',
(24, 0): 'Lamp',
(25, 0): 'ManaCube'
}
ITEM_IDS = {v: k for k, v in ITEM_NAMES.items()}
STATIC_NAMES = {
0: 'Statue',
1: 'Door',
2: 'BigDoor',
3: 'Window',
4: 'CastleWindow',
5: 'Gate',
6: 'FireTrap',
7: 'SpikeTrap',
8: 'StompTrap',
9: 'Lever',
10: 'Chest',
11: 'ChestTop02',
12: 'Table',
13: 'Table',
14: 'Table',
15: 'Stool',
16: 'Stool',
17: 'Stool',
18: 'Bench',
19: 'Bed',
20: 'BedTable',
21: 'MarketStand1',
22: 'MarketStand2',
23: 'MarketStand3',
24: 'Barrel',
25: 'Crate',
26: 'OpenCrate',
27: 'Sack',
28: 'Shelter',
29: 'Cupboard',
30: 'Desktop',
31: 'Counter',
32: 'Shelf1',
33: 'Shelf2',
34: 'Shelf3',
35: 'CastleShelf1',
36: 'CastleShelf2',
37: 'CastleShelf3',
38: 'StoneShelf1',
39: 'StoneShelf2',
40: 'StoneShelf3',
41: 'SandstoneShelf1',
42: 'SandstoneShelf2',
43: 'SandstoneShelf3',
44: 'Corpse',
45: 'RuneStone',
46: 'Artifact',
47: 'FlowerBox1',
48: 'FlowerBox2',
49: 'FlowerBox3',
50: 'StreetLight',
51: 'FireStreetLight',
52: 'Fence1',
53: 'Fence2',
54: 'Fence3',
55: 'Fence4',
56: 'Vase1',
57: 'Vase2',
58: 'Vase3',
59: 'Vase4',
60: 'Vase5',
61: 'Vase6',
62: 'Vase7',
63: 'Vase8',
64: 'Vase9',
65: 'Campfire',
66: 'Tent',
67: 'BeachUmbrella',
68: 'BeachTowel',
69: 'SleepingMat',
71: 'Furnace',
72: 'Anvil',
73: 'SpinningWheel',
74: 'Loom',
75: 'SawBench',
76: 'Workbench',
77: 'CustomizationBench'
}
STATIC_IDS = {v: k for k, v in STATIC_NAMES.items()}
STATIC_MODELS = {
0: 2481,
1: 2482,
2: 2483,
3: 2484,
4: 2485,
5: 2486,
6: None,
7: 2487,
8: 2488,
9: 2489,
10: 2490,
11: 2491,
12: 2492,
13: 2493,
14: None,
15: 2495,
16: 2497,
17: 2496,
18: 2498,
19: 2499,
20: 2500,
21: 2502,
22: 2503,
23: 2504,
24: 2505,
25: 2506,
26: 2507,
27: 2508,
28: 2509,
29: 2501,
30: 2510,
31: 2511,
32: 2512,
33: 2513,
34: 2514,
35: 2515,
36: 2516,
37: 2517,
38: 2518,
39: 2519,
40: 2520,
41: 2521,
42: 2522,
43: 2523,
44: 2524,
45: 2525,
46: 2079,
47: 2526,
48: 2527,
49: 2528,
50: 2529,
51: 2530,
52: 2531,
53: 2532,
54: 2533,
55: 2534,
56: 2322,
57: 2323,
58: 2324,
59: 2325,
60: 2326,
61: 2327,
62: 2328,
63: 2329,
64: 2330,
65: 2554,
66: 2555,
67: 2556,
68: 2557,
69: 2558,
70: None,
71: 2535,
72: 2536,
73: 2540,
74: 2541,
75: 2537,
76: 2538,
77: 2539
}
ENTITY_NAMES = {
0: 'ElfMale',
1: 'ElfFemale',
2: 'HumanMale',
3: 'HumanFemale',
4: 'GoblinMale',
5: 'GoblinFemale',
6: 'Bullterrier',
7: 'LizardmanMale',
8: 'LizardmanFemale',
9: 'DwarfMale',
10: 'DwarfFemale',
11: 'OrcMale',
12: 'OrcFemale',
13: 'FrogmanMale',
14: 'FrogmanFemale',
15: 'UndeadMale',
16: 'UndeadFemale',
17: 'Skeleton',
18: 'OldMan',
19: 'Collie',
20: 'ShepherdDog',
21: 'SkullBull',
22: 'Alpaca',
23: 'BrownAlpaca',
24: 'Egg',
25: 'Turtle',
26: 'Terrier',
27: 'ScottishTerrier',
28: 'Wolf',
29: 'Panther',
30: 'Cat',
31: 'BrownCat',
32: 'WhiteCat',
33: 'Pig',
34: 'Sheep',
35: 'Bunny',
36: 'Porcupine',
37: 'GreenSlime',
38: 'PinkSlime',
39: 'YellowSlime',
40: 'BlueSlime',
41: 'Frightener',
42: 'SandHorror',
43: 'Wizard',
44: 'Bandit',
45: 'Witch',
46: 'Ogre',
47: 'Rockling',
48: 'Gnoll',
49: 'PolarGnoll',
50: 'Monkey',
51: 'Gnobold',
52: 'Insectoid',
53: 'Hornet',
54: 'InsectGuard',
55: 'Crow',
56: 'Chicken',
57: 'Seagull',
58: 'Parrot',
59: 'Bat',
60: 'Fly',
61: 'Midge',
62: 'Mosquito',
63: 'PlainRunner',
64: 'LeafRunner',
65: 'SnowRunner',
66: 'DesertRunner',
67: 'Peacock',
68: 'Frog',
69: 'PlantCreature',
70: 'RadishCreature',
71: 'Onionling',
72: 'DesertOnionling',
73: 'Devourer',
74: 'Duckbill',
75: 'Crocodile',
76: 'SpikeCreature',
77: 'Anubis',
78: 'Horus',
79: 'Jester',
80: 'Spectrino',
81: 'Djinn',
82: 'Minotaur',
83: 'NomadMale',
84: 'NomadFemale',
85: 'Imp',
86: 'Spitter',
87: 'Mole',
88: 'Biter',
89: 'Koala',
90: 'Squirrel',
91: 'Raccoon',
92: 'Owl',
93: 'Penguin',
94: 'Werewolf',
96: 'Zombie',
97: 'Vampire',
98: 'Horse',
99: 'Camel',
100: 'Cow',
101: 'Dragon',
102: 'BarkBeetle',
103: 'FireBeetle',
104: 'SnoutBeetle',
105: 'LemonBeetle',
106: 'Crab',
107: 'SeaCrab',
108: 'Troll',
109: 'DarkTroll',
110: 'HellDemon',
111: 'Golem',
112: 'EmberGolem',
113: 'SnowGolem',
114: 'Yeti',
115: 'Cyclops',
116: 'Mammoth',
117: 'Lich',
118: 'RuneGiant',
119: 'Saurian',
120: 'Bush',
121: 'SnowBush',
122: 'SnowBerryBush',
123: 'CottonPlant',
124: 'Scrub',
125: 'CobwebScrub',
126: 'FireScrub',
127: 'Ginseng',
128: 'Cactus',
130: 'ThornTree',
131: 'GoldDeposit',
132: 'IronDeposit',
133: 'SilverDeposit',
134: 'SandstoneDeposit',
135: 'EmeraldDeposit',
136: 'SapphireDeposit',
137: 'RubyDeposit',
138: 'DiamondDeposit',
139: 'IceCrystalDeposit',
140: 'Scarecrow',
141: 'Aim',
142: 'Dummy',
143: 'Vase',
144: 'Bomb',
145: 'SapphireFish',
146: 'LemonFish',
147: 'Seahorse',
148: 'Mermaid',
149: 'Merman',
150: 'Shark',
151: 'Bumblebee',
152: 'LanternFish',
153: 'MawFish',
154: 'Piranha',
155: 'Blowfish'
}
ENTITY_IDS = {v: k for k, v in ENTITY_NAMES.items()}
LOCATION_NAMES = {
(1, 0): 'Village',
(1, 1): 'Village',
(1, 2): 'Village',
(1, 3): 'Village',
(1, 4): 'Village',
(1, 5): 'Village',
(1, 6): 'Village',
(1, 7): 'Village',
(1, 8): 'Village',
(1, 9): 'Village',
(2, 0): 'Mountain',
(2, 1): 'Mountain',
(2, 2): 'Mountain',
(2, 3): 'Mountain',
(2, 4): 'Mountain',
(2, 5): 'Mountain',
(2, 6): 'Mountain',
(2, 7): 'Mountain',
(2, 8): 'Mountain',
(2, 9): 'Mountain',
(3, 0): 'Forest',
(3, 1): 'Forest',
(3, 2): 'Forest',
(3, 3): 'Forest',
(3, 4): 'Forest',
(3, 5): 'Forest',
(3, 6): 'Forest',
(3, 7): 'Forest',
(3, 8): 'Forest',
(3, 9): 'Forest',
(4, 0): 'Lake',
(4, 1): 'Lake',
(4, 2): 'Lake',
(4, 3): 'Lake',
(4, 4): 'Lake',
(4, 5): 'Lake',
(4, 6): 'Lake',
(4, 7): 'Lake',
(4, 8): 'Lake',
(4, 9): 'Lake',
(5, 0): 'Ruins',
(5, 1): 'Ruins',
(5, 2): 'Ruins',
(5, 3): 'Ruins',
(5, 4): 'Gravesite',
(6, 0): 'Canyon',
(6, 1): 'Canyon',
(6, 2): 'Canyon',
(6, 3): 'Canyon',
(6, 4): 'Canyon',
(6, 5): 'Canyon',
(6, 6): 'Canyon',
(6, 7): 'Canyon',
(6, 8): 'Canyon',
(6, 9): 'Canyon',
(7, 0): 'Valley',
(7, 1): 'Valley',
(7, 2): 'Valley',
(7, 3): 'Valley',
(7, 4): 'Valley',
(7, 5): 'Valley',
(7, 6): 'Valley',
(7, 7): 'Valley',
(7, 8): 'Valley',
(7, 9): 'Valley',
(8, 0): 'Crater',
(8, 1): 'Crater',
(8, 2): 'Crater',
(8, 3): 'Crater',
(8, 4): 'Crater',
(8, 5): 'Crater',
(8, 6): 'Crater',
(8, 7): 'Crater',
(8, 8): 'Crater',
(8, 9): 'Crater',
(9, 0): 'Cave',
(10, 0): 'Portal',
(11, 0): 'Rock',
(11, 1): 'Rock',
(11, 2): 'Rock',
(11, 3): 'Rock',
(11, 4): 'Rock',
(11, 5): 'Rock',
(11, 6): 'Rock',
(11, 7): 'Rock',
(11, 8): 'Rock',
(11, 9): 'Rock',
(12, 0): 'Tree',
(12, 1): 'Tree',
(12, 2): 'Tree',
(12, 3): 'Tree',
(12, 4): 'Tree',
(12, 5): 'Tree',
(12, 6): 'Tree',
(12, 7): 'Tree',
(12, 8): 'Tree',
(12, 9): 'Tree',
(13, 0): 'Peak',
(13, 1): 'Peak',
(13, 2): 'Peak',
(13, 3): 'Peak',
(13, 4): 'Peak',
(13, 5): 'Peak',
(13, 6): 'Peak',
(13, 7): 'Peak',
(13, 8): 'Peak',
(13, 9): 'Peak',
(14, 0): 'Castle',
(14, 1): 'Ruins',
(14, 2): 'Catacombs',
(14, 3): 'Palace',
(14, 4): 'Temple',
(14, 5): 'Pyramid',
(15, 0): 'Island',
(15, 1): 'Island',
(15, 2): 'Island',
(15, 3): 'Island',
(15, 4): 'Island',
(15, 5): 'Island',
(15, 6): 'Island',
(15, 7): 'Island',
(15, 8): 'Island',
(15, 9): 'Island'
}
LOCATION_IDS = {v: k for k, v in LOCATION_NAMES.items()}
QUARTER_NAMES = {
(1, 1): 'Trade Quarter',
(1, 2): 'Crafting Quarter',
(1, 3): 'Class Quarter',
(1, 4): 'Pet Quarter',
(4, 0): 'Portal',
(5, 0): 'Palace'
}
QUARTER_IDS = {v: k for k, v in QUARTER_NAMES.items()}
SKILL_NAMES = {
0: 'PetTaming',
1: 'PetRiding',
2: 'Climbing',
3: 'HangGliding',
4: 'Swimming',
5: 'BoatDriving',
6: 'Ability1',
7: 'Ability2',
8: 'Ability3',
9: 'Ability4',
10: 'Ability5'
}
SKILL_IDS = {v: k for k, v in SKILL_NAMES.items()}
ABILITY_NAMES = {
21: 'RangerKick',
34: 'HealingStream',
48: 'Intercept',
49: 'Teleport',
50: 'Retreat',
54: 'Smash',
79: 'Sneak',
86: 'Cyclone',
88: 'FireExplosion',
96: 'Shuriken',
97: 'Camouflage',
99: 'Aim',
100: 'Swiftness',
101: 'Bulwark',
102: 'WarFrenzy',
103: 'ManaShield'
}
ABILITY_IDS = {v: k for k, v in ABILITY_NAMES.items()}
|
matpow2/cuwo
|
cuwo/strings.py
|
Python
|
gpl-3.0
| 94,588
|
[
"CRYSTAL"
] |
8293c9a5ac318d43f9bb84080a6ad63a88be09599c0155256a351a8f58f4ccd2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
freeseer - vga/presentation capture software
Copyright (C) 2011 Free and Open Source Software Learning Centre
http://fosslc.org
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For support, questions, suggestions or any other inquiries, visit:
http://wiki.github.com/Freeseer/freeseer/
@author: Thanh Ha
'''
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
from freeseer.frontend.qtcommon.dpi_adapt_qtgui import QWidgetWithDpi
from freeseer.frontend.qtcommon import resource # noqa
class ConfigToolWidget(QWidgetWithDpi):
'''
classdocs
'''
def __init__(self, parent=None):
'''
Constructor
'''
super(ConfigToolWidget, self).__init__(parent)
self.setMinimumSize(800, 460)
self.mainLayout = QtGui.QHBoxLayout()
self.setLayout(self.mainLayout)
#
# Left panel
#
self.leftPanelLayout = QtGui.QVBoxLayout()
self.mainLayout.addLayout(self.leftPanelLayout)
self.optionsTreeWidget = QtGui.QTreeWidget()
self.optionsTreeWidget.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Minimum)
self.optionsTreeWidget.setHeaderHidden(True)
self.optionsTreeWidget.headerItem().setText(0, "1")
# General
QtGui.QTreeWidgetItem(self.optionsTreeWidget)
self.optionsTreeWidget.topLevelItem(0).setText(0, "General")
# AV
QtGui.QTreeWidgetItem(self.optionsTreeWidget)
self.optionsTreeWidget.topLevelItem(1).setText(0, "AV Config")
# Plugins
QtGui.QTreeWidgetItem(self.optionsTreeWidget)
self.optionsTreeWidget.topLevelItem(2).setText(0, "Plugins")
# About
QtGui.QTreeWidgetItem(self.optionsTreeWidget)
self.optionsTreeWidget.topLevelItem(3).setText(0, "About")
closeIcon = QtGui.QIcon.fromTheme("application-exit")
self.closePushButton = QtGui.QPushButton("Close")
self.closePushButton.setIcon(closeIcon)
self.leftPanelLayout.addWidget(self.optionsTreeWidget)
self.leftPanelLayout.addWidget(self.closePushButton)
self.optionsTreeWidget.expandAll()
#
# Right panel
#
self.rightPanelWidget = QtGui.QWidget()
self.rightPanelWidget.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
self.mainLayout.addWidget(self.rightPanelWidget)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
main = ConfigToolWidget()
main.show()
sys.exit(app.exec_())
|
Freeseer/freeseer
|
src/freeseer/frontend/configtool/ConfigToolWidget.py
|
Python
|
gpl-3.0
| 3,246
|
[
"VisIt"
] |
3bfddbbf32698ddfb9f0faa63c0dffa88292714207a17d329800625edfb4c5d3
|
"""Contains the drivers and interface code for pinball machines which
use the Multimorphic P3-ROC hardware controllers.
This code can be used with P-ROC driver boards, or with Stern SAM, Stern
Whitestar, Williams WPC, or Williams WPC95 driver boards.
Much of this code is from the P-ROC drivers section of the pyprocgame project,
written by Adam Preble and Gerry Stellenberg. It was originally released under
the MIT license and is released here under the MIT License.
More info on the P3-ROC hardware platform: http://pinballcontrollers.com/
Original code source on which this module was based:
https://github.com/preble/pyprocgame
If you want to use the Mission Pinball Framework with P3-ROC hardware, you also
need libpinproc and pypinproc. More info:
http://www.pinballcontrollers.com/forum/index.php?board=10.0
"""
# p_roc.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import re
import time
import sys
try:
import pinproc
pinproc_imported = True
except:
pinproc_imported = False
from mpf.system.platform import Platform
proc_output_module = 3
proc_pdb_bus_addr = 0xC00
class HardwarePlatform(Platform):
"""Platform class for the P3-ROC hardware controller.
Args:
machine: The MachineController instance.
Attributes:
machine: The MachineController instance.
proc: The P3-ROC pinproc.PinPROC device.
machine_type: Constant of the pinproc.MachineType
"""
def __init__(self, machine):
super(HardwarePlatform, self).__init__(machine)
self.log = logging.getLogger('P3-ROC')
self.log.debug("Configuring P3-ROC hardware.")
if not pinproc_imported:
self.log.error('Could not import "pinproc". Most likely you do not '
'have libpinproc and/or pypinproc installed. You can'
' run MPF in software-only "virtual" mode by using '
'the -x command like option for now instead.')
sys.exit()
# ----------------------------------------------------------------------
# Platform-specific hardware features. WARNING: Do not edit these. They
# are based on what the P3-ROC hardware can and cannot do.
self.features['max_pulse'] = 255
self.features['hw_timer'] = False
self.features['hw_rule_coil_delay'] = False
self.features['variable_recycle_time'] = False
self.features['variable_debounce_time'] = False
self.features['hw_enable_auto_disable'] = False
self.features['hw_led_fade'] = True
# todo need to add differences between patter and pulsed_patter
# Make the platform features available to everyone
self.machine.config['platform'] = self.features
# ----------------------------------------------------------------------
self.machine_type = pinproc.normalize_machine_type(
self.machine.config['hardware']['driverboards'])
# Connect to the P3-ROC. Keep trying if it doesn't work the first time.
self.proc = None
self.log.info("Connecting to P3-ROC")
while not self.proc:
try:
self.proc = pinproc.PinPROC(self.machine_type)
self.proc.reset(1)
except IOError:
print "Retrying..."
self.log.info("Succefully connected to P3-ROC")
'''
Since the P3-ROC has no Aux port, this code will break it.
# Clear out the default program for the aux port since we might need it
# for a 9th column. Details:
# http://www.pinballcontrollers.com/forum/index.php?topic=1360
commands = []
commands += [pinproc.aux_command_disable()]
for i in range(1, 255):
commands += [pinproc.aux_command_jump(0)]
self.proc.aux_send_commands(0, commands)
# End of the clear out the default program for the aux port.
'''
# Because PDBs can be configured in many different ways, we need to
# traverse the YAML settings to see how many PDBs are being used.
# Then we can configure the P3-ROC appropriately to use those PDBs.
# Only then can we relate the YAML coil/light #'s to P3-ROC numbers for
# the collections.
self.log.debug("Configuring P3-ROC for PDBs (P-ROC driver boards).")
self.pdbconfig = PDBConfig(self.proc, self.machine.config)
self.polarity = self.machine_type == pinproc.MachineTypeSternWhitestar\
or self.machine_type == pinproc.MachineTypeSternSAM\
or self.machine_type == pinproc.MachineTypePDB
def configure_driver(self, config, device_type='coil'):
""" Creates a P3-ROC driver.
Typically drivers are coils or flashers, but for the P3-ROC this is
also used for matrix-based lights.
Args:
config: Dictionary of settings for the driver.
device_type: String with value of either 'coil' or 'switch'.
Returns:
A reference to the PROCDriver object which is the actual object you
can use to pulse(), patter(), enable(), etc.
"""
# todo need to add Aux Bus support
# todo need to add virtual driver support for driver counts > 256
# Find the P3-ROC number for each driver. For P3-ROC driver boards, the
# P3-ROC number is specified via the Ax-By-C format.
proc_num = self.pdbconfig.get_proc_number(device_type,
str(config['number']))
if proc_num == -1:
self.log.error("Coil cannot be controlled by the P3-ROC. "
"Ignoring.")
return
if device_type == 'coil':
proc_driver_object = PROCDriver(proc_num, self.proc)
elif device_type == 'light':
proc_driver_object = PROCMatrixLight(proc_num, self.proc)
if 'polarity' in config:
state = proc_driver_object.proc.driver_get_state(config['number'])
state['polarity'] = config['polarity']
proc_driver_object.proc.driver_update_state(state)
return proc_driver_object, config['number']
def configure_switch(self, config):
"""Configures a P3-ROC switch.
Args:
config: Dictionary of settings for the switch. In the case
of the P3-ROC, it uses the following:
number : The number (or number string) for the switch as specified
in the machine configuration file.
debounce : Boolean which specifies whether the P3-ROC should
debounce this switch first before sending open and close
notifications to the host computer.
Returns:
switch : A reference to the switch object that was just created.
proc_num : Integer of the actual hardware switch number the P3-ROC
uses to refer to this switch. Typically your machine
configuration files would specify a switch number like `SD12` or
`7/5`. This `proc_num` is an int between 0 and 255.
state : An integer of the current hardware state of the switch, used
to set the initial state state in the machine. A value of 0
means the switch is open, and 1 means it's closed. Note this
state is the physical state of the switch, so if you configure
the switch to be normally-closed (i.e. "inverted" then your code
will have to invert it too.) MPF handles this automatically if
the switch type is 'NC'.
"""
if self.machine_type == pinproc.MachineTypePDB:
proc_num = self.pdbconfig.get_proc_number('switch',
str(config['number']))
if config['number'] == -1:
self.log.error("Switch cannot be controlled by the P3-ROC. "
"Ignoring.")
return
else:
proc_num = pinproc.decode(self.machine_type, str(config['number']))
switch = PROCSwitch(proc_num)
# The P3-ROC needs to be configured to notify the host computers of
# switch events. (That notification can be for open or closed,
# debounced or nondebounced.)
self.log.debug("Configuring switch's host notification settings. P3-ROC"
"number: %s, debounce: %s", proc_num,
config['debounce'])
if config['debounce'] is False or \
proc_num >= pinproc.SwitchNeverDebounceFirst:
self.proc.switch_update_rule(proc_num, 'closed_nondebounced',
{'notifyHost': True,
'reloadActive': False}, [], False)
self.proc.switch_update_rule(proc_num, 'open_nondebounced',
{'notifyHost': True,
'reloadActive': False}, [], False)
else:
self.proc.switch_update_rule(proc_num, 'closed_debounced',
{'notifyHost': True,
'reloadActive': False}, [], False)
self.proc.switch_update_rule(proc_num, 'open_debounced',
{'notifyHost': True,
'reloadActive': False}, [], False)
# Read in and set the initial switch state
# The P3-ROC uses the following values for hw switch states:
# 1 - closed (debounced)
# 2 - open (debounced)
# 3 - closed (not debounced)
# 4 - open (not debounced)
states = self.proc.switch_get_states()
if states[proc_num] == 1 or states[proc_num] == 3:
state = 1
else:
state = 0
# Return the switch object and an integer of its current state.
# 1 = active, 0 = inactive
return switch, proc_num, state
def configure_led(self, config):
""" Configures a P3-ROC RGB LED controlled via a PD-LED."""
# todo add polarity
# split the number (which comes in as a string like w-x-y-z) into parts
config['number'] = config['number_str'].split('-')
if 'polarity' in config:
invert = not config['polarity']
else:
invert = False
return PDBLED(board=int(config['number'][0]),
address=[int(config['number'][1]),
int(config['number'][2]),
int(config['number'][3])],
proc_driver=self.proc,
invert=invert)
def configure_matrixlight(self, config):
"""Configures a P3-ROC matrix light."""
# On the P3-ROC, matrix lights are drivers
return self.configure_driver(config, 'light')
def configure_gi(self, config):
"""Configures a P3-ROC GI string light."""
# On the P3-ROC, GI strings are drivers
return self.configure_driver(config, 'light')
def configure_dmd(self):
"""The P3-ROC does not support a physical DMD, so this method does
nothing. It's included here in case it's called by mistake.
"""
self.log.error("An attempt was made to configure a physical DMD, but "
"the P3-ROC does not support physical DMDs.")
def tick(self):
"""Checks the P3-ROC for any events (switch state changes).
Also tickles the watchdog and flushes any queued commands to the P3-ROC.
"""
# Get P3-ROC events
for event in self.proc.get_events():
event_type = event['type']
event_value = event['value']
if event_type == 99: # CTRL-C to quit todo does this go here?
self.machine.quit()
elif event_type == pinproc.EventTypeDMDFrameDisplayed:
pass
elif event_type == pinproc.EventTypeSwitchClosedDebounced:
self.machine.switch_controller.process_switch(state=1,
num=event_value)
elif event_type == pinproc.EventTypeSwitchOpenDebounced:
self.machine.switch_controller.process_switch(state=0,
num=event_value)
elif event_type == pinproc.EventTypeSwitchClosedNondebounced:
self.machine.switch_controller.process_switch(state=1,
num=event_value,
debounced=False)
elif event_type == pinproc.EventTypeSwitchOpenNondebounced:
self.machine.switch_controller.process_switch(state=0,
num=event_value,
debounced=False)
else:
self.log.warning("Received unrecognized event from the P3-ROC. "
"Type: %s, Value: %s", event_type, event_value)
self.proc.watchdog_tickle()
self.proc.flush()
def write_hw_rule(self,
sw,
sw_activity,
coil_action_ms, # 0 = disable, -1 = hold forever
coil=None,
pulse_ms=0,
pwm_on=0,
pwm_off=0,
delay=0,
recycle_time=0,
debounced=True,
drive_now=False):
"""Used to write (or update) a hardware rule to the P3-ROC.
*Hardware Rules* are used to configure the P3-ROC to automatically
change driver states based on switch changes. These rules are
completely handled by the P3-ROC hardware (i.e. with no interaction from
the Python game code). They're used for things that you want to happen
fast, like firing coils when flipper buttons are pushed, slingshots,
pop bumpers, etc.
You can overwrite existing hardware rules at any time to change or
remove them.
Args:
sw : switch object
Which switch you're creating this rule for. The parameter is a
reference to the switch object itsef.
sw_activity : int
Do you want this coil to fire when the switch becomes active
(1) or inactive (0)
coil_action_ms : int
The total time (in ms) that this coil action should take place.
A value of -1 means it's forever.
coil : coil object
Which coil is this rule controlling
pulse_ms : int
How long should the coil be pulsed (ms)
pwm_on : int
If the coil should be held on at less than 100% duty cycle,
this is the "on" time (in ms).
pwm_off : int
If the coil should be held on at less than 100% duty cycle,
this is the "off" time (in ms).
delay : int
Not currently implemented for the P3-ROC hardware
recycle_time : int
How long (in ms) should this switch rule wait before firing
again. Put another way, what's the "fastest" this rule can
fire? This is used to prevent "machine gunning" of slingshots
and pop bumpers. Do not use it with flippers. Note the P3-ROC
has a non-configurable delay time of 125ms. (So it's either
125ms or 0.) So if you set this delay to anything other than
0, it will be 125ms.
debounced : bool
Should the P3-ROC fire this coil after the switch has been
debounced? Typically no.
drive_now : bool
Should the P3-ROC check the state of the switches when this
rule is firts applied, and fire the coils if they should be?
Typically this is True, especially with flippers because you
want them to fire if the player is holding in the buttons when
the machine enables the flippers (which is done via several
calls to this method.)
"""
self.log.debug("Setting HW Rule. Switch:%s, Action ms:%s, Coil:%s, "
"Pulse:%s, pwm_on:%s, pwm_off:%s, Delay:%s, Recycle:%s,"
"Debounced:%s, Now:%s", sw.name, coil_action_ms,
coil.name, pulse_ms, pwm_on, pwm_off, delay,
recycle_time, debounced, drive_now)
if (sw_activity == 0 and debounced):
event_type = "open_debounced"
elif (sw_activity == 0 and not debounced):
event_type = "open_nondebounced"
elif (sw_activity == 1 and debounced):
event_type = "closed_debounced"
else: # if sw_activity == 1 and not debounced:
event_type = "closed_nondebounced"
# Note the P3-ROC uses a 125ms non-configurable recycle time. So any
# non-zero value passed here will enable the 125ms recycle.
reloadActive = False
if recycle_time:
reloadActive = True
# We only want to notifyHost for debounced switch events. We use non-
# debounced for hw_rules since they're faster, but we don't want to
# notify the host on them since the host would then get two events
# one for the nondebounced followed by one for the debounced.
notifyHost = False
if debounced:
notifyHost = True
rule = {'notifyHost': notifyHost, 'reloadActive': reloadActive}
# Now let's figure out what type of P3-ROC action we need to take.
# We're going to 'brtue force' this here because it's the easiest to
# understand. (Which makes it the most pythonic, right? :)
proc_action = 'disable'
patter = False # makes it easier to understand later...
if pwm_on and pwm_off:
patter = True
if coil_action_ms == -1: # hold coil forever
if patter:
proc_action = 'patter'
else:
proc_action = 'enable'
elif coil_action_ms > 0: # timed action of some sort
if coil_action_ms <= pulse_ms:
proc_action = 'pulse'
pulse_ms = coil_action_ms
elif patter:
if pulse_ms:
pass
# todo error, P3-ROC can't do timed patter with pulse
else: # no initial pulse
proc_action = 'pulsed_patter'
this_driver = []
final_driver = []
# The P3-ROC ties hardware rules to switches, with a list of linked
# drivers that should change state based on a switch activity.
# Since our framework applies the rules one-at-a-time, we have to read
# the existing linked drivers from the hardware for that switch, add
# our new driver to the list, then re-update the rule on the hardware.
if proc_action == 'pulse':
this_driver = [pinproc.driver_state_pulse(
coil.hw_driver.state(), pulse_ms)]
elif proc_action == 'patter':
this_driver = [pinproc.driver_state_patter(
coil.hw_driver.state(), pwm_on, pwm_off, pulse_ms, True)]
# todo above param True should not be there. Change to now?
elif proc_action == 'enable':
this_driver = [pinproc.driver_state_pulse(
coil.hw_driver.state(), 0)]
elif proc_action == 'disable':
this_driver = [pinproc.driver_state_disable(
coil.hw_driver.state())]
elif proc_action == 'pulsed_patter':
this_driver = [pinproc.driver_state_pulsed_patter(
coil.hw_driver.state(), pwm_on, pwm_off,
coil_action_ms)]
# merge in any previously-configured driver rules for this switch
final_driver = list(this_driver) # need to make an actual copy
sw_rule_string = str(sw.name)+str(event_type)
if sw_rule_string in self.hw_switch_rules:
for driver in self.hw_switch_rules[sw_rule_string]:
final_driver.append(driver)
self.hw_switch_rules[sw_rule_string].extend(this_driver)
else:
self.hw_switch_rules[sw_rule_string] = this_driver
self.log.debug("Writing HW rule for switch: %s, event_type: %s,"
"rule: %s, final_driver: %s, drive now: %s",
sw.number, event_type,
rule, final_driver, drive_now)
self.proc.switch_update_rule(sw.number, event_type, rule, final_driver,
drive_now)
def clear_hw_rule(self, sw_name):
"""Clears a hardware rule.
This is used if you want to remove the linkage between a switch and
some driver activity. For example, if you wanted to disable your
flippers (so that a player pushing the flipper buttons wouldn't cause
the flippers to flip), you'd call this method with your flipper button
as the *sw_num*.
Parameters
----------
sw_num : int
The number of the switch whose rule you want to clear.
"""
sw_num = self.machine.switches[sw_name].number
self.log.debug("Clearing HW rule for switch: %s", sw_num)
self.proc.switch_update_rule(sw_num, 'open_nondebounced',
{'notifyHost': False,
'reloadActive': False}, [])
self.proc.switch_update_rule(sw_num, 'closed_nondebounced',
{'notifyHost': False,
'reloadActive': False}, [])
self.proc.switch_update_rule(sw_num, 'open_debounced',
{'notifyHost': True,
'reloadActive': False}, [])
self.proc.switch_update_rule(sw_num, 'closed_debounced',
{'notifyHost': True,
'reloadActive': False}, [])
for entry in self.hw_switch_rules.keys(): # slice for copy
if entry.startswith(self.machine.switches.number(sw_num).name):
# disable any drivers from this rule which are active now
# todo make this an option?
for driver_dict in self.hw_switch_rules[entry]:
self.proc.driver_disable(driver_dict['driverNum'])
# Remove this rule from our list
del self.hw_switch_rules[entry]
# todo need to read in the notifyHost settings and reapply those
# appropriately.
class PDBLED(object):
"""Represents an RGB LED connected to a PD-LED board."""
def __init__(self, board, address, proc_driver, invert=False):
self.log = logging.getLogger('PDBLED')
self.board = board
self.address = address
self.proc = proc_driver
self.invert = invert
# todo make sure self.address is a 3-element list
self.log.debug("Creating PD-LED item: board: %s, "
"RGB outputs: %s", self.board,
self.address)
def color(self, color):
"""Instantly sets this LED to the color passed.
Args:
color: a 3-item list of integers representing R, G, and B values,
0-255 each.
"""
#self.log.debug("Setting Color. Board: %s, Address: %s, Color: %s",
# self.board, self.address, color)
self.proc.led_color(self.board, self.address[0],
self.normalize_color(color[0]))
self.proc.led_color(self.board, self.address[1],
self.normalize_color(color[1]))
self.proc.led_color(self.board, self.address[2],
self.normalize_color(color[2]))
def fade(self, color, fade_ms):
# todo
# not implemented. For now we'll just immediately set the color
self.color(color, fade_ms)
def disable(self):
"""Disables (turns off) this LED instantly. For multi-color LEDs it
turns all elements off.
"""
self.proc.led_color(self.board, self.address[0],
self.normalize_color(0))
self.proc.led_color(self.board, self.address[1],
self.normalize_color(0))
self.proc.led_color(self.board, self.address[2],
self.normalize_color(0))
def enable(self):
"""Enables (turns on) this LED instantly. For multi-color LEDs it turns
all elements on.
"""
self.color(self.normalize_color(255),
self.normalize_color(255),
self.normalize_color(255)
)
def normalize_color(self, color):
if self.invert:
return 255-color
else:
return color
class PDBSwitch(object):
"""Base class for switches connected to a P3-ROC."""
def __init__(self, pdb, number_str):
upper_str = number_str.upper()
if upper_str.startswith('SD'):
self.sw_type = 'dedicated'
self.sw_number = int(upper_str[2:])
elif '/' in upper_str:
self.sw_type = 'matrix'
self.sw_number = self.parse_matrix_num(upper_str)
else:
self.sw_type = 'proc'
self.sw_number = int(number_str)
def proc_num(self):
return self.sw_number
def parse_matrix_num(self, num_str):
cr_list = num_str.split('/')
return (32 + int(cr_list[0])*16 + int(cr_list[1]))
class PDBCoil(object):
"""Base class for coils connected to a P3-ROC that are controlled via P3-ROC
driver boards (i.e. the PD-16 board).
"""
def __init__(self, pdb, number_str):
self.pdb = pdb
upper_str = number_str.upper()
if self.is_direct_coil(upper_str):
self.coil_type = 'dedicated'
self.banknum = (int(number_str[1:]) - 1)/8
self.outputnum = int(number_str[1:])
elif self.is_pdb_coil(number_str):
self.coil_type = 'pdb'
(self.boardnum, self.banknum, self.outputnum) = decode_pdb_address(
number_str, self.pdb.aliases)
else:
self.coil_type = 'unknown'
def bank(self):
if self.coil_type == 'dedicated':
return self.banknum
elif self.coil_type == 'pdb':
return self.boardnum * 2 + self.banknum
else:
return -1
def output(self):
return self.outputnum
def is_direct_coil(self, string):
if len(string) < 2 or len(string) > 3:
return False
if not string[0] == 'C':
return False
if not string[1:].isdigit():
return False
return True
def is_pdb_coil(self, string):
return is_pdb_address(string, self.pdb.aliases)
class PDBLight(object):
"""Base class for lights connected to a PD-8x8 driver board."""
def __init__(self, pdb, number_str):
self.pdb = pdb
upper_str = number_str.upper()
if self.is_direct_lamp(upper_str):
self.lamp_type = 'dedicated'
self.output = int(number_str[1:])
elif self.is_pdb_lamp(number_str):
# C-Ax-By-z:R-Ax-By-z or C-x/y/z:R-x/y/z
self.lamp_type = 'pdb'
source_addr, sink_addr = self.split_matrix_addr_parts(number_str)
(self.source_boardnum, self.source_banknum, self.source_outputnum)\
= decode_pdb_address(source_addr, self.pdb.aliases)
(self.sink_boardnum, self.sink_banknum, self.sink_outputnum)\
= decode_pdb_address(sink_addr, self.pdb.aliases)
else:
self.lamp_type = 'unknown'
def source_board(self):
return self.source_boardnum
def sink_board(self):
return self.sink_boardnum
def source_bank(self):
return self.source_boardnum * 2 + self.source_banknum
def sink_bank(self):
return self.sink_boardnum * 2 + self.sink_banknum
def source_output(self):
return self.source_outputnum
def sink_output(self):
return self.sink_outputnum
def dedicated_bank(self):
return self.banknum
def dedicated_output(self):
return self.output
def is_direct_lamp(self, string):
if len(string) < 2 or len(string) > 3:
return False
if not string[0] == 'L':
return False
if not string[1:].isdigit():
return False
return True
def split_matrix_addr_parts(self, string):
""" Input is of form C-Ax-By-z:R-Ax-By-z or C-x/y/z:R-x/y/z or
aliasX:aliasY. We want to return only the address part: Ax-By-z,
x/y/z, or aliasX. That is, remove the two character prefix if present.
"""
addrs = string.rsplit(':')
if len(addrs) is not 2:
return []
addrs_out = []
for addr in addrs:
bits = addr.split('-')
if len(bits) is 1:
addrs_out.append(addr) # Append unchanged.
else: # Generally this will be len(bits) 2 or 4.
# Remove the first bit and rejoin.
addrs_out.append('-'.join(bits[1:]))
return addrs_out
def is_pdb_lamp(self, string):
params = self.split_matrix_addr_parts(string)
if len(params) != 2:
return False
for addr in params:
if not is_pdb_address(addr, self.pdb.aliases):
return False
return True
class PROCSwitch(object):
def __init__(self, number):
self.log = logging.getLogger('PROCSwitch')
self.number = number
class PROCDriver(object):
""" Base class for drivers connected to a P3-ROC. This class is used for all
drivers, regardless of whether they're connected to a P-ROC driver board
(such as the PD-16 or PD-8x8) or an OEM driver board.
"""
def __init__(self, number, proc_driver):
self.log = logging.getLogger('PROCDriver')
self.number = number
self.proc = proc_driver
def disable(self):
"""Disables (turns off) this driver."""
self.log.debug('Disabling Driver')
self.proc.driver_disable(self.number)
def enable(self):
"""Enables (turns on) this driver."""
self.log.debug('Enabling Driver')
self.proc.driver_schedule(number=self.number, schedule=0xffffffff,
cycle_seconds=0, now=True)
def pulse(self, milliseconds=None):
"""Enables this driver for `milliseconds`.
``ValueError`` will be raised if `milliseconds` is outside of the range
0-255.
"""
if milliseconds not in range(256):
raise ValueError('milliseconds must be in range 0-255.')
self.log.debug('Pulsing Driver for %sms', milliseconds)
self.proc.driver_pulse(self.number, milliseconds)
def future_pulse(self, milliseconds=None, timestamp=0):
"""Enables this driver for `milliseconds` at P3-ROC timestamp:
`timestamp`. If no parameter is provided for `milliseconds`,
:attr:`pulse_ms` is used. If no parameter is provided or
`timestamp`, 0 is used. ``ValueError`` will be raised if `milliseconds`
is outside of the range 0-255.
"""
if milliseconds is None:
milliseconds = self.config['pulse_ms']
if milliseconds not in range(256):
raise ValueError('milliseconds must be in range 0-255.')
self.log.debug("Driver %s - future pulse %d", self.name,
milliseconds, timestamp)
self.proc.driver_future_pulse(self.number, milliseconds, timestamp)
def pwm(self, on_ms=10, off_ms=10, original_on_ms=0, now=True):
"""Enables a pitter-patter sequence.
It starts by activating the driver for `original_on_ms` milliseconds.
Then it repeatedly turns the driver on for `on_ms` milliseconds and
off for `off_ms` milliseconds.
"""
if not original_on_ms in range(256):
raise ValueError('original_on_ms must be in range 0-255.')
if not on_ms in range(128):
raise ValueError('on_ms must be in range 0-127.')
if not off_ms in range(128):
raise ValueError('off_ms must be in range 0-127.')
self.log.debug("Patter on:%d, off:%d, orig_on:%d, now:%s", on_ms,
off_ms, original_on_ms, now)
self.proc.driver_patter(self.number, on_ms, off_ms, original_on_ms, now)
def timed_pwm(self, on_ms=10, off_ms=10, run_time=0, now=True):
"""Enables a pitter-patter sequence that runs for `run_time`
milliseconds.
Until it ends, the sequence repeatedly turns the driver on for
`on_ms` milliseconds and off for `off_ms` milliseconds.
"""
if run_time not in range(256):
raise ValueError('run_time must be in range 0-255.')
if on_ms not in range(128):
raise ValueError('on_ms must be in range 0-127.')
if off_ms not in range(128):
raise ValueError('off_ms must be in range 0-127.')
self.log.debug("Driver %s - pulsed patter on:%d, off:%d,"
"run_time:%d, now:%s", self.name, on_ms, off_ms,
run_time, now)
self.proc.driver_pulsed_patter(self.number, on_ms, off_ms, run_time,
now)
self.last_time_changed = time.time()
def schedule(self, schedule, cycle_seconds=0, now=True):
"""Schedules this driver to be enabled according to the given
`schedule` bitmask."""
self.log.debug("Driver %s - schedule %08x", self.name, schedule)
self.proc.driver_schedule(number=self.number, schedule=schedule,
cycle_seconds=cycle_seconds, now=now)
self.last_time_changed = time.time()
def state(self):
"""Returns a dictionary representing this driver's current
configuration state.
"""
return self.proc.driver_get_state(self.number)
def tick(self):
pass
class PROCMatrixLight(object):
def __init__(self, number, proc_driver):
self.log = logging.getLogger('PROCMatrixLight')
self.number = number
self.proc = proc_driver
def off(self):
"""Disables (turns off) this driver."""
self.proc.driver_disable(self.number)
self.last_time_changed = time.time()
def on(self, brightness=255, fade_ms=0, start=0):
"""Enables (turns on) this driver."""
if brightness >= 255:
self.proc.driver_schedule(number=self.number, schedule=0xffffffff,
cycle_seconds=0, now=True)
elif brightness == 0:
self.off()
else:
pass
# patter rates of 10/1 through 2/9
self.last_time_changed = time.time()
'''
Koen's fade code he posted to pinballcontrollers:
def mode_tick(self):
if self.fade_counter % 10 == 0:
for lamp in self.game.lamps:
if lamp.name.find("gi0") == -1:
var = 4.0*math.sin(0.02*float(self.fade_counter)) + 5.0
on_time = 11-round(var)
off_time = round(var)
lamp.patter(on_time, off_time)
self.fade_counter += 1
'''
class PDBConfig(object):
""" This class is only used when the P3-ROC is configured to use P3-ROC
driver boards such as the PD-16 or PD-8x8. i.e. not when it's operating in
WPC or Stern mode.
"""
indexes = []
proc = None
aliases = None # set in __init__
def __init__(self, proc, config):
self.log = logging.getLogger('PDBConfig')
self.log.debug("Processing P3-ROC Driver Board configuration")
self.proc = proc
# Grab globals from the config data
self.get_globals(config)
# Initialize some lists for data collecting
coil_bank_list = []
lamp_source_bank_list = []
lamp_list = []
lamp_list_for_index = []
self.aliases = []
if 'PRDriverAliases' in config:
for alias_dict in config['PRDriverAliases']:
alias = DriverAlias(alias_dict['expr'], alias_dict['repl'])
self.aliases.append(alias)
# Make a list of unique coil banks
for name in config['coils']:
item_dict = config['coils'][name]
coil = PDBCoil(self, str(item_dict['number']))
if coil.bank() not in coil_bank_list:
coil_bank_list.append(coil.bank())
# Make a list of unique lamp source banks. The P3-ROC only supports 2.
# TODO: What should be done if 2 is exceeded?
if 'matrixlights' in config:
for name in config['matrixlights']:
item_dict = config['matrixlights'][name]
lamp = PDBLight(self, str(item_dict['number']))
# Catalog PDB banks
# Dedicated lamps don't use PDB banks. They use P3-ROC direct
# driver pins.
if lamp.lamp_type == 'dedicated':
pass
elif lamp.lamp_type == 'pdb':
if lamp.source_bank() not in lamp_source_bank_list:
lamp_source_bank_list.append(lamp.source_bank())
# Create dicts of unique sink banks. The source index is
# needed when setting up the driver groups.
lamp_dict = {'source_index':
lamp_source_bank_list.index(
lamp.source_bank()),
'sink_bank': lamp.sink_bank(),
'source_output': lamp.source_output()}
# lamp_dict_for_index. This will be used later when the
# p-roc numbers are requested. The requestor won't know
# the source_index, but it will know the source board.
# This is why two separate lists are needed.
lamp_dict_for_index = {'source_board': lamp.source_board(),
'sink_bank': lamp.sink_bank(),
'source_output':
lamp.source_output()}
if lamp_dict not in lamp_list:
lamp_list.append(lamp_dict)
lamp_list_for_index.append(lamp_dict_for_index)
# Create a list of indexes. The PDB banks will be mapped into this
# list. The index of the bank is used to calculate the P3-ROC driver
# number for each driver.
num_proc_banks = pinproc.DriverCount/8
self.indexes = [99] * num_proc_banks
self.initialize_drivers(proc)
# Set up dedicated driver groups (groups 0-3).
for group_ctr in range(0, 4):
# TODO: Fix this. PDB Banks 0-3 are also interpreted as dedicated
# bank here.
enable = group_ctr in coil_bank_list
self.log.debug("Driver group %02d (dedicated): Enable=%s",
group_ctr, enable)
proc.driver_update_group_config(group_ctr,
0,
group_ctr,
0,
0,
False,
True,
enable,
True)
group_ctr += 1
# Process lamps first. The P3-ROC can only control so many drivers
# directly. Since software won't have the speed to control lamp
# matrixes, map the lamps first. If there aren't enough P3-ROC driver
# groups for coils, the overflow coils can be controlled by software
# via VirtualDrivers (which should get set up automatically by this
# code.)
for i, lamp_dict in enumerate(lamp_list):
# If the bank is 16 or higher, the P3-ROC can't control it
# directly. Software can't really control lamp matrixes either
# (need microsecond resolution). Instead of doing crazy logic here
# for a case that probably won't happen, just ignore these banks.
if (group_ctr >= num_proc_banks or lamp_dict['sink_bank'] >= 16):
self.log.error("Lamp matrix banks can't be mapped to index "
"%d because that's outside of the banks the "
"P3-ROC can control.", lamp_dict['sink_bank'])
else:
self.log.debug("Driver group %02d (lamp sink): slow_time=%d "
"enable_index=%d row_activate_index=%d "
"row_enable_index=%d matrix=%s", group_ctr,
self.lamp_matrix_strobe_time,
lamp_dict['sink_bank'],
lamp_dict['source_output'],
lamp_dict['source_index'], True )
self.indexes[group_ctr] = lamp_list_for_index[i]
proc.driver_update_group_config(group_ctr,
self.lamp_matrix_strobe_time,
lamp_dict['sink_bank'],
lamp_dict['source_output'],
lamp_dict['source_index'],
True,
True,
True,
True)
group_ctr += 1
for coil_bank in coil_bank_list:
# If the bank is 16 or higher, the P3-ROC can't control it directly.
# Software will have do the driver logic and write any changes to
# the PDB bus. Therefore, map these banks to indexes above the
# P3-ROC's driver count, which will force the drivers to be created
# as VirtualDrivers. Appending the bank avoids conflicts when
# group_ctr gets too high.
if (group_ctr >= num_proc_banks or coil_bank >= 16):
self.log.warning("Driver group %d mapped to driver index"
"outside of P3-ROC control. These Drivers "
"will become VirtualDrivers. Note, the "
"index will not match the board/bank "
"number; so software will need to request "
"those values before updating the "
"drivers.", coil_bank)
self.indexes.append(coil_bank)
else:
self.log.debug("Driver group %02d: slow_time=%d Enable "
"Index=%d", group_ctr, 0, coil_bank)
self.indexes[group_ctr] = coil_bank
proc.driver_update_group_config(group_ctr,
0,
coil_bank,
0,
0,
False,
True,
True,
True)
group_ctr += 1
for i in range(group_ctr, 26):
self.log.debug("Driver group %02d: disabled", i)
proc.driver_update_group_config(i,
self.lamp_matrix_strobe_time,
0,
0,
0,
False,
True,
False,
True)
# Make sure there are two indexes. If not, fill them in.
while len(lamp_source_bank_list) < 2:
lamp_source_bank_list.append(0)
# Now set up globals. First disable them to allow the P3-ROC to set up
# the polarities on the Drivers. Then enable them.
self.configure_globals(proc, lamp_source_bank_list, False)
self.configure_globals(proc, lamp_source_bank_list, True)
def initialize_drivers(self, proc):
# Loop through all of the drivers, initializing them with the polarity.
for i in range(0, 208):
state = {'driverNum': i,
'outputDriveTime': 0,
'polarity': True,
'state': False,
'waitForFirstTimeSlot': False,
'timeslots': 0,
'patterOnTime': 0,
'patterOffTime': 0,
'patterEnable': False,
'futureEnable': False}
proc.driver_update_state(state)
def get_globals(self, config):
if 'PRDriverGlobals' in config and 'lamp_matrix_strobe_time' \
in config['PRDriverGlobals']:
self.lamp_matrix_strobe_time = int(config['PRDriverGlobals']
['lamp_matrix_strobe_time'])
else:
self.lamp_matrix_strobe_time = 100
if 'PRDriverGlobals' in config and 'watchdog_time' \
in config['PRDriverGlobals']:
self.watchdog_time = int(config['PRDriverGlobals']
['watchdog_time'])
else:
self.watchdog_time = 1000
if 'PRDriverGlobals' in config and 'use_watchdog' \
in config['PRDriverGlobals']:
self.use_watchdog = bool(config['PRDriverGlobals']['use_watchdog'])
else:
self.use_watchdog = True
def configure_globals(self, proc, lamp_source_bank_list, enable=True):
if enable:
self.log.debug("Configuring PDB Driver Globals: polarity = %s "
"matrix column index 0 = %d matrix column index "
"1 = %d", True, lamp_source_bank_list[0],
lamp_source_bank_list[1]);
proc.driver_update_global_config(enable, # Don't enable outputs yet
True, # Polarity
False, # N/A
False, # N/A
1, # N/A
lamp_source_bank_list[0],
lamp_source_bank_list[1],
False, # Active low rows? No
False, # N/A
False, # Stern? No
False, # Reset watchdog trigger
self.use_watchdog, # Enable watchdog
self.watchdog_time)
# Now set up globals
proc.driver_update_global_config(True, # Don't enable outputs yet
True, # Polarity
False, # N/A
False, # N/A
1, # N/A
lamp_source_bank_list[0],
lamp_source_bank_list[1],
False, # Active low rows? No
False, # N/A
False, # Stern? No
False, # Reset watchdog trigger
self.use_watchdog, # Enable watchdog
self.watchdog_time)
def get_proc_number(self, device_type, number_str):
"""Returns the P3-ROC number for the requested driver string.
This method uses the driver string to look in the indexes list that
was set up when the PDBs were configured. The resulting P3-ROC index
* 3 is the first driver number in the group, and the driver offset is
to that.
"""
if device_type == 'coil':
coil = PDBCoil(self, number_str)
bank = coil.bank()
if bank == -1:
return (-1)
index = self.indexes.index(coil.bank())
num = index * 8 + coil.output()
return num
if device_type == 'light':
lamp = PDBLight(self, number_str)
if lamp.lamp_type == 'unknown':
return (-1)
elif lamp.lamp_type == 'dedicated':
return lamp.dedicated_output()
lamp_dict_for_index = {'source_board': lamp.source_board(),
'sink_bank': lamp.sink_bank(),
'source_output': lamp.source_output()}
if lamp_dict_for_index not in self.indexes:
return -1
index = self.indexes.index(lamp_dict_for_index)
num = index * 8 + lamp.sink_output()
return num
if device_type == 'switch':
switch = PDBSwitch(self, number_str)
num = switch.proc_num()
return num
class DriverAlias(object):
def __init__(self, key, value):
self.expr = re.compile(key)
self.repl = value
def matches(self, addr):
return self.expr.match(addr)
def decode(self, addr):
return self.expr.sub(repl=self.repl, string=addr)
def is_pdb_address(addr, aliases=[]):
"""Returne True if the given address is a valid PDB address."""
try:
decode_pdb_address(addr=addr, aliases=aliases)
return True
except:
return False
def decode_pdb_address(addr, aliases=[]):
"""Decodes Ax-By-z or x/y/z into PDB address, bank number, and output
number.
Raises a ValueError exception if it is not a PDB address, otherwise returns
a tuple of (addr, bank, number).
"""
for alias in aliases:
if alias.matches(addr):
addr = alias.decode(addr)
break
if '-' in addr: # Ax-By-z form
params = addr.rsplit('-')
if len(params) != 3:
raise ValueError('pdb address must have 3 components')
board = int(params[0][1:])
bank = int(params[1][1:])
output = int(params[2][0:])
return (board, bank, output)
elif '/' in addr: # x/y/z form
params = addr.rsplit('/')
if len(params) != 3:
raise ValueError('pdb address must have 3 components')
board = int(params[0])
bank = int(params[1])
output = int(params[2])
return (board, bank, output)
else:
raise ValueError('PDB address delimeter (- or /) not found.')
# The MIT License (MIT)
# Oringal code on which this module was based:
# Copyright (c) 2009-2011 Adam Preble and Gerry Stellenberg
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
jabdoa2/mpf
|
mpf/platform/p3_roc.py
|
Python
|
mit
| 53,581
|
[
"Brian"
] |
490fabd3d35d33da0d16f1e5e9941a3094248bcf0721b8e38760ba5b84cef0f4
|
# This file is part of Bioy
#
# Bioy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Bioy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Bioy. If not, see <http://www.gnu.org/licenses/>.
"""Provide some fasta or fastq plots for length distribution and quality scores
"""
import logging
import matplotlib as plt
import pandas as pd
from bioy_pkg.utils import Opener
from bioy_pkg.sequtils import fastalite
log = logging.getLogger(__name__)
def build_parser(parser):
# required inputs
parser.add_argument(
'intype',
choices=['fasta', 'csv'],
help='plot either and fasta or csv file')
parser.add_argument(
'infile',
type=Opener(),
help='CSV tabular blast file of query and subject hits.')
# common outputs
parser.add_argument(
'-o', '--out',
metavar='FILE',
default='plot.pdf',
help="Classification results.")
parser.add_argument(
'--column',
help=('if csv is specified select a column to plot'))
parser.add_argument(
'--xaxis',
help='plot x axis')
parser.add_argument(
'--title',
help=('density plot title'))
parser.add_argument(
'--limit', type=int, help='limit number of rows read')
def action(args):
# for debugging:
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
if args.intype == 'fasta':
fa = fastalite(args.infile, limit=args.limit)
df = pd.Series(data={f.id: f.seq for f in fa}, name='seq')
df = df.reset_index()
df = df.set_index('index')
df.index.name = 'id'
df['length'] = df['seq'].apply(len)
column = 'length'
else: # elif args.intpe == 'csv':
df = pd.read_csv(args.infile)
column = args.column
xticks = args.xaxis.split(',') if args.xaxis else None
# do not display plots
plt.use('Agg')
# format blast data and add additional available information
pl = df[column].plot(kind='kde',
title=args.title,
xticks=xticks)
log.info('printing to {}'.format(args.out))
pl.get_figure().savefig(args.out)
|
crosenth/bioy
|
bioy_pkg/subcommands/plot_sequence_length.py
|
Python
|
gpl-3.0
| 2,686
|
[
"BLAST"
] |
3ed82286c7633c5f90a0e46a7518f10be3c59e2384e5ee19cfaf1a834668c51c
|
import six
from json import dumps
from unittest import TestCase
try:
from unittest.mock import patch, mock_open
except ImportError:
from mock import patch
from six.moves import builtins
from .sample_data import PARAMS, RECORD0, RECORD1, RECORD2, RECORD3, RECORD4
from dark.reads import Read, Reads
from dark.hsp import HSP
from dark.score import LowerIsBetterScore
from dark.blast.alignments import BlastReadsAlignments
from dark.titles import titleCounts, TitleAlignments, TitlesAlignments
from dark.utils import StringIO
class TestTitleCounts(TestCase):
"""
Test the titleCounts function.
"""
def testEmpty(self):
"""
If passed an empty readsAlignments, titleCounts must return an
empty dictionary.
"""
mockOpener = mock_open(read_data=dumps(PARAMS) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
self.assertEqual({}, titleCounts(readsAlignments))
def testThreeRecords(self):
"""
If alignments for three reads are passed to titleCounts, it must
return the correct title counts.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
self.assertEqual(
{
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99': 1,
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.': 1,
'gi|887699|gb|DQ37780 Cowpox virus 15': 1,
'gi|887699|gb|DQ37780 Monkeypox virus 456': 1,
'gi|887699|gb|DQ37780 Squirrelpox virus 55': 1
},
titleCounts(readsAlignments))
def testDuplicatedTitle(self):
"""
If alignments for reads have a common title, the count on that title
must be correct.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
self.assertEqual(
{
'gi|887699|gb|DQ37780 Cowpox virus 15': 2,
},
titleCounts(readsAlignments))
class TestTitlesAlignments(TestCase):
"""
Test the TitlesAlignments class
"""
def testEmpty(self):
"""
An instance of TitlesAlignments must have no titles if passed an
empty readsAlignments instance.
"""
mockOpener = mock_open(read_data=(dumps(PARAMS) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual([], list(titlesAlignments))
def testExpectedTitles(self):
"""
An instance of TitlesAlignments must have the expected titles.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual(
[
'gi|887699|gb|DQ37780 Cowpox virus 15',
'gi|887699|gb|DQ37780 Monkeypox virus 456',
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
],
sorted(titlesAlignments))
def testExpectedTitleDetails(self):
"""
An instance of TitleAlignments in a TitlesAlignments instance must
have the expected attributes.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
read = Read('id0', 'A' * 70)
reads.add(read)
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(37000, titleAlignments.subjectLength)
self.assertEqual(1, len(titleAlignments))
self.assertEqual(read, titleAlignments[0].read)
self.assertEqual(HSP(20), titleAlignments[0].hsps[0])
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 55'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(38000, titleAlignments.subjectLength)
self.assertEqual(1, len(titleAlignments))
self.assertEqual(read, titleAlignments[0].read)
self.assertEqual(HSP(25), titleAlignments[0].hsps[0])
def testTitleCollection(self):
"""
A title that occurs in the alignments of multiple reads must have
the data from both reads collected properly.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
read2 = Read('id2', 'A' * 70)
read3 = Read('id3', 'A' * 70)
reads.add(read2)
reads.add(read3)
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Cowpox virus 15'
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(30000, titleAlignments.subjectLength)
self.assertEqual(2, len(titleAlignments))
self.assertEqual(read2, titleAlignments[0].read)
self.assertEqual(HSP(20), titleAlignments[0].hsps[0])
self.assertEqual(read3, titleAlignments[1].read)
self.assertEqual(HSP(20), titleAlignments[1].hsps[0])
def testAddTitleRepeat(self):
"""
The addTitle function must raise a C{KeyError} if an attempt is made
to add a pre-existing title to a TitlesAlignments instance.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'
titleAlignments = TitleAlignments(title, 55)
error = (
"Title 'gi\\|887699\\|gb\\|DQ37780 Squirrelpox virus "
"1296/99' already present in TitlesAlignments instance\\.")
six.assertRaisesRegex(
self, KeyError, error, titlesAlignments.addTitle, title,
titleAlignments)
def testAddTitle(self):
"""
The addTitle function must add a title to the TitlesAlignments
instance.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
title = 'gi|887699|gb|DQ37780 Squirrelpox virus 23'
titleAlignments = TitleAlignments(title, 55)
self.assertTrue(title not in titlesAlignments)
titlesAlignments.addTitle(title, titleAlignments)
self.assertTrue(title in titlesAlignments)
def testHsps(self):
"""
The hsps function must yield all the hsps for all titles in a
TitlesAlignments instance.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = list(titlesAlignments.hsps())
self.assertEqual(
sorted([HSP(20), HSP(25), HSP(20), HSP(20), HSP(20)]),
sorted(result))
def testTwoJSONInputsWithSubjectInCommon(self):
"""
If two JSON files are passed to L{BlastReadsAlignments} with a matched
subject in common and a TitlesAlignments is made, the title in the
TitlesAlignments must have information from both reads, including the
correct HSP scores.
"""
class SideEffect(object):
def __init__(self):
self.first = True
def sideEffect(self, _ignoredFilename, **kwargs):
if self.first:
self.first = False
return StringIO(dumps(PARAMS) + '\n' +
dumps(RECORD2) + '\n')
else:
return StringIO(dumps(PARAMS) + '\n' +
dumps(RECORD4) + '\n')
title = 'gi|887699|gb|DQ37780 Cowpox virus 15'
sideEffect = SideEffect()
with patch.object(builtins, 'open') as mockMethod:
mockMethod.side_effect = sideEffect.sideEffect
reads = Reads()
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id4', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, ['file1.json', 'file2.json'])
titlesAlignments = TitlesAlignments(readsAlignments)
titleAlignments = titlesAlignments[title]
self.assertEqual(title, titleAlignments.subjectTitle)
self.assertEqual(4, titleAlignments.hspCount())
self.assertEqual('id2', titleAlignments[0].read.id)
self.assertEqual('id4', titleAlignments[1].read.id)
# First matching read has one HSP.
self.assertEqual(HSP(20), titleAlignments[0].hsps[0])
# Second matching read has three HSPs.
self.assertEqual(HSP(10), titleAlignments[1].hsps[0])
self.assertEqual(HSP(5), titleAlignments[1].hsps[1])
self.assertEqual(HSP(3), titleAlignments[1].hsps[2])
def testToDict(self):
"""
The toDict method must return the expected value.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'C' * 70))
reads.add(Read('id2', 'G' * 70))
reads.add(Read('id3', 'T' * 70))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertEqual(
{
'scoreClass': 'LowerIsBetterScore',
'titles': {
'gi|887699|gb|DQ37780 Cowpox virus 15': {
'subjectLength': 30000,
'subjectTitle': 'gi|887699|gb|DQ37780 Cowpox virus 15',
'titleAlignments': [
{
'hsps': [
{
'identicalCount': None,
'percentIdentical': None,
'positiveCount': None,
'percentPositive': None,
'readEnd': 68,
'readEndInSubject': 1405,
'readFrame': 1,
'readMatchedSequence': (
'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA'),
'readStart': 27,
'readStartInSubject': 1334,
'score': 1e-06,
'subjectEnd': 1400,
'subjectFrame': 1,
'subjectMatchedSequence': (
'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA'),
'subjectStart': 1361}],
'read': {
'id': 'id2',
'quality': None,
'sequence': 'G' * 70,
}
},
{
'hsps': [
{
'identicalCount': None,
'percentIdentical': None,
'positiveCount': None,
'percentPositive': None,
'readEnd': 68,
'readEndInSubject': 1405,
'readFrame': 1,
'readMatchedSequence': (
'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA'),
'readStart': 27,
'readStartInSubject': 1334,
'score': 1e-05,
'subjectEnd': 1400,
'subjectFrame': 1,
'subjectMatchedSequence': (
'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA'),
'subjectStart': 1361}],
'read': {
'id': 'id3',
'quality': None,
'sequence': 'T' * 70,
},
},
],
},
'gi|887699|gb|DQ37780 Monkeypox virus 456': {
'subjectLength': 35000,
'subjectTitle': (
'gi|887699|gb|DQ37780 Monkeypox virus 456'),
'titleAlignments': [
{
'hsps': [
{
'identicalCount': None,
'percentIdentical': None,
'positiveCount': None,
'percentPositive': None,
'readEnd': 68,
'readEndInSubject': 11405,
'readFrame': 1,
'readMatchedSequence': (
'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA'),
'readStart': 27,
'readStartInSubject': 11334,
'score': 1e-08,
'subjectEnd': 11400,
'subjectFrame': 1,
'subjectMatchedSequence': (
'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA'),
'subjectStart': 11361}],
'read': {
'id': 'id1',
'quality': None,
'sequence': 'C' * 70,
},
},
],
},
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.': {
'subjectLength': 35000,
'subjectTitle': (
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'),
'titleAlignments': [
{
'hsps': [
{
'identicalCount': None,
'percentIdentical': None,
'positiveCount': None,
'percentPositive': None,
'readEnd': 68,
'readEndInSubject': 10405,
'readFrame': 1,
'readMatchedSequence': (
'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA'),
'readStart': 27,
'readStartInSubject': 10334,
'score': 1e-07,
'subjectEnd': 10400,
'subjectFrame': 1,
'subjectMatchedSequence': (
'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA'),
'subjectStart': 10361}],
'read': {
'id': 'id1',
'quality': None,
'sequence': 'C' * 70,
},
},
],
},
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99': {
'subjectLength': 37000,
'subjectTitle': (
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99'),
'titleAlignments': [
{
'hsps': [
{
'identicalCount': None,
'percentIdentical': None,
'positiveCount': None,
'percentPositive': None,
'readEnd': 68,
'readEndInSubject': 15405,
'readFrame': 1,
'readMatchedSequence': (
'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA'),
'readStart': 27,
'readStartInSubject': 15334,
'score': 1e-11,
'subjectEnd': 15400,
'subjectFrame': 1,
'subjectMatchedSequence': (
'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA'),
'subjectStart': 15361}],
'read': {
'id': 'id0',
'quality': None,
'sequence': 'A' * 70,
},
},
],
},
'gi|887699|gb|DQ37780 Squirrelpox virus 55': {
'subjectLength': 38000,
'subjectTitle': (
'gi|887699|gb|DQ37780 Squirrelpox virus 55'),
'titleAlignments': [
{
'hsps': [
{
'identicalCount': None,
'percentIdentical': None,
'positiveCount': None,
'percentPositive': None,
'readEnd': 68,
'readEndInSubject': 12405,
'readFrame': 1,
'readMatchedSequence': (
'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA'),
'readStart': 27,
'readStartInSubject': 12334,
'score': 1e-10,
'subjectEnd': 12400,
'subjectFrame': 1,
'subjectMatchedSequence': (
'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA'),
'subjectStart': 12361}],
'read': {
'id': 'id0',
'quality': None,
'sequence': 'A' * 70,
},
},
],
},
},
},
titlesAlignments.toDict())
class TestTitlesAlignmentsFiltering(TestCase):
"""
Test the TitlesAlignments class filter function.
"""
def testFilterWithNoArguments(self):
"""
The filter function must return a TitlesAlignments instance with all
the titles of the original when called with no arguments.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter()
self.assertEqual(
[
'gi|887699|gb|DQ37780 Cowpox virus 15',
'gi|887699|gb|DQ37780 Monkeypox virus 456',
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
],
sorted(result))
def testMinMatchingReads(self):
"""
The filter function must work correctly when passed a value for
minMatchingReads.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMatchingReads=2)
self.assertEqual(
[
'gi|887699|gb|DQ37780 Cowpox virus 15',
],
list(result))
def testMaxMatchingReads(self):
"""
The filter function must work correctly when passed a value for
maxMatchingReads.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxMatchingReads=1)
# Cowpox virus 15 is not in the results as it is matched by two
# reads.
self.assertEqual(
sorted([
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
'gi|887699|gb|DQ37780 Monkeypox virus 456',
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'
]),
sorted(result))
def testMinMedianScore_Bits(self):
"""
The filter function must work correctly when passed a value for
minMedianScore when using bit scores.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMedianScore=22)
self.assertEqual(
[
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
],
list(result))
def testMinMedianScore_EValue(self):
"""
The filter function must work correctly when passed a value for
minMedianScore when using e values.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minMedianScore=1e-9)
self.assertEqual(
[
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
],
sorted(result))
def testWithScoreBetterThan_Bits(self):
"""
The filter function must work correctly when passed a value for
withScoreBetterThan when using bit scores.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(withScoreBetterThan=24)
self.assertEqual(
[
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
],
list(result))
def testWithScoreBetterThan_EValue(self):
"""
The filter function must work correctly when passed a value for
withScoreBetterThan when using e values.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(withScoreBetterThan=1e-10)
self.assertEqual(
[
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
],
list(result))
def testReadSetFilterAllowAnything(self):
"""
The filter function must work correctly when passed a 0.0 value for
minNewReads, i.e. that considers any read set sufficiently novel.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minNewReads=0.0)
self.assertEqual(
[
'gi|887699|gb|DQ37780 Cowpox virus 15',
'gi|887699|gb|DQ37780 Monkeypox virus 456',
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
],
sorted(result))
def testReadSetFilterStrict(self):
"""
The filter function must work correctly when passed a 1.0 value for
minNewReads.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minNewReads=1.0)
# Either 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'
# invalidates 'gi|887699|gb|DQ37780 Monkeypox virus 456' or
# vice-versa. It depends on Python's dict walking order. Check
# for both, making sure just one of them is true.
mummypox = 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.'
monkeypox = 'gi|887699|gb|DQ37780 Monkeypox virus 456'
assertionCount = 0
if mummypox in result:
self.assertTrue(monkeypox in
result.readSetFilter.invalidates(mummypox))
assertionCount += 1
if monkeypox in result:
self.assertTrue(mummypox in
result.readSetFilter.invalidates(monkeypox))
assertionCount += 1
self.assertEqual(1, assertionCount)
def testCoverageExcludesAll(self):
"""
The coverage function must return an titlesAlignments instance with
no titles if none of its titles has sufficient coverage.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minCoverage=0.1)
self.assertEqual(0, len(result))
def testCoverageIncludesAll(self):
"""
The coverage function must return an titlesAlignments instance with
all titles if all its titles has sufficient coverage.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(minCoverage=0.0)
self.assertEqual(
[
'gi|887699|gb|DQ37780 Cowpox virus 15',
'gi|887699|gb|DQ37780 Monkeypox virus 456',
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
],
sorted(result))
def testCoverageIncludesSome(self):
"""
The coverage function must return an titlesAlignments instance with
only the expected titles if only some of its titles have sufficient
coverage.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
# To understand why the following produces the result it does,
# you need to look at the HSP coverage in sample_data.py and
# calculate the coverage by hand.
result = titlesAlignments.filter(minCoverage=0.0011)
self.assertEqual(
[
'gi|887699|gb|DQ37780 Cowpox virus 15',
'gi|887699|gb|DQ37780 Monkeypox virus 456',
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
],
sorted(result))
def testMaxTitlesNegative(self):
"""
The filter function must raise a ValueError if maxTitles is less than
zero.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
error = '^maxTitles \\(-1\\) cannot be negative\\.$'
six.assertRaisesRegex(self, ValueError, error,
titlesAlignments.filter, maxTitles=-1)
def testUnknownSortOn(self):
"""
The filter function must raise a ValueError if the passed sortOn
value isn't recognized.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
error = ('^Sort attribute must be one of "length", "maxScore", '
'"medianScore", "readCount", "title"\\.$')
six.assertRaisesRegex(self, ValueError, error,
titlesAlignments.filter, maxTitles=0,
sortOn='unknown')
def testMaxTitlesZero(self):
"""
The filter function must return an empty result when maxTitles is zero.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=0, sortOn='maxScore')
self.assertEqual(0, len(result))
def testMaxTitlesOne(self):
"""
The filter function must return just the best title when maxTitles
is one.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=1, sortOn='maxScore')
self.assertEqual(
[
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
],
sorted(result))
def testMaxTitlesTwoSortOnLength(self):
"""
The filter function must return the two titles whose sequences are the
longest when maxTitles is 2 and sortOn is 'length'.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(maxTitles=2, sortOn='length')
self.assertEqual(
[
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
],
sorted(result))
def testTitleRegex(self):
"""
The filter function must return only titles that match a passed regex.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(titleRegex='squirrelpox')
self.assertEqual(
[
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
],
sorted(result))
def testTitleNegativeRegex(self):
"""
The filter function must not return titles that match a passed negative
regex.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(negativeTitleRegex='squirrelpox')
self.assertEqual(
[
'gi|887699|gb|DQ37780 Cowpox virus 15',
'gi|887699|gb|DQ37780 Monkeypox virus 456',
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
],
sorted(result))
def testTitleRegexThenNegativeRegex(self):
"""
The filter function must return only titles that match a passed
positive title regex and then a negative regex.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.filter(
titleRegex='squirrelpox').filter(negativeTitleRegex='1296')
self.assertEqual(
[
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
],
sorted(result))
class TestTitleSorting(TestCase):
"""
Tests for the L{dark.titles.TitlesAlignments.sortTitles} function.
"""
def testUnknown(self):
"""
Sorting on an unknown attribute must raise C{ValueError}.
"""
mockOpener = mock_open(read_data=dumps(PARAMS) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
self.assertRaises(ValueError, titlesAlignments.sortTitles, 'xxx')
def testEmpty(self):
"""
Sorting when there are no titles must return the empty list.
"""
mockOpener = mock_open(read_data=dumps(PARAMS) + '\n')
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('title')
self.assertEqual([], result)
def testMedianScore_Bits(self):
"""
Sorting on median score must work when scores are bit scores,
including a secondary sort on title.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n' + dumps(RECORD4) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
reads.add(Read('id4', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('medianScore')
self.assertEqual([
'gi|887699|gb|DQ37780 Squirrelpox virus 55', # 25
'gi|887699|gb|DQ37780 Monkeypox virus 456', # 20
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', # 20
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', # 20
'gi|887699|gb|DQ37780 Cowpox virus 15', # 20
], result)
def testMedianScore_EValue(self):
"""
Sorting on median score must work when scores are bit scores,
including a secondary sort on title.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n' + dumps(RECORD4) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
reads.add(Read('id4', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('medianScore')
self.assertEqual([
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', # 1e-11
'gi|887699|gb|DQ37780 Squirrelpox virus 55', # 1e-10
'gi|887699|gb|DQ37780 Monkeypox virus 456', # 1e-8
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', # 1e-7
'gi|887699|gb|DQ37780 Cowpox virus 15', # worst :-)
], result)
def testMaxScore_Bits(self):
"""
Sorting on max score must work when scores are bit scores, including a
secondary sort on title.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(reads, 'file.json')
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('maxScore')
self.assertEqual([
'gi|887699|gb|DQ37780 Squirrelpox virus 55', # 25
'gi|887699|gb|DQ37780 Cowpox virus 15', # 20
'gi|887699|gb|DQ37780 Monkeypox virus 456', # 20
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', # 20
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', # 20
], result)
def testMaxScore_EValue(self):
"""
Sorting on max score must work when scores are e values, including a
secondary sort on title.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('maxScore')
# self.assertEqual([
# 'gi|887699|gb|DQ37780 Cowpox virus 15', # 1e-6
# 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', # 1e-7
# 'gi|887699|gb|DQ37780 Monkeypox virus 456', # 1e-8
# 'gi|887699|gb|DQ37780 Squirrelpox virus 55', # 1e-10
# 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', # 1e-11
# ], result)
self.assertEqual([
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', # 1e-11
'gi|887699|gb|DQ37780 Squirrelpox virus 55', # 1e-10
'gi|887699|gb|DQ37780 Monkeypox virus 456', # 1e-8
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', # 1e-7
'gi|887699|gb|DQ37780 Cowpox virus 15', # 1e-6
], result)
def testReadCount(self):
"""
Sorting on read count must work, including a secondary sort on title.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('readCount')
self.assertEqual([
'gi|887699|gb|DQ37780 Cowpox virus 15', # 3
'gi|887699|gb|DQ37780 Monkeypox virus 456', # 1
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', # 1
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', # 1
'gi|887699|gb|DQ37780 Squirrelpox virus 55', # 1
], result)
def testLength(self):
"""
Sorting on sequence length must work, including a secondary sort on
title.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('length')
self.assertEqual([
'gi|887699|gb|DQ37780 Squirrelpox virus 55', # 38000
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99', # 37000
'gi|887699|gb|DQ37780 Monkeypox virus 456', # 35000
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.', # 35000
'gi|887699|gb|DQ37780 Cowpox virus 15', # 30000
], result)
def testTitle(self):
"""
Sorting on title must work.
"""
mockOpener = mock_open(read_data=(
dumps(PARAMS) + '\n' + dumps(RECORD0) + '\n' +
dumps(RECORD1) + '\n' + dumps(RECORD2) + '\n' +
dumps(RECORD3) + '\n'))
with patch.object(builtins, 'open', mockOpener):
reads = Reads()
reads.add(Read('id0', 'A' * 70))
reads.add(Read('id1', 'A' * 70))
reads.add(Read('id2', 'A' * 70))
reads.add(Read('id3', 'A' * 70))
readsAlignments = BlastReadsAlignments(
reads, 'file.json', scoreClass=LowerIsBetterScore)
titlesAlignments = TitlesAlignments(readsAlignments)
result = titlesAlignments.sortTitles('title')
self.assertEqual([
'gi|887699|gb|DQ37780 Cowpox virus 15',
'gi|887699|gb|DQ37780 Monkeypox virus 456',
'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
'gi|887699|gb|DQ37780 Squirrelpox virus 55',
], result)
|
terrycojones/dark-matter
|
test/blast/test_titles.py
|
Python
|
mit
| 55,975
|
[
"BLAST"
] |
b9c32ff5fd825683500a4cf341854cfdbc91b945eff5d7469115f84ddb103871
|
"""
Python interface to GnuPG.
This module is used to invoke GnuPG to check the digital signatures on interfaces.
@see: L{iface_cache.PendingFeed}
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import subprocess
import base64, re
import os
import tempfile
from logging import info, warn
from zeroinstall.support import find_in_path, basedir
from zeroinstall.injector.trust import trust_db
from zeroinstall.injector.model import SafeException
_gnupg_options = None
def _run_gpg(args, **kwargs):
global _gnupg_options
if _gnupg_options is None:
gpg_path = find_in_path('gpg') or find_in_path('gpg2') or 'gpg'
_gnupg_options = [gpg_path, '--no-secmem-warning']
if hasattr(os, 'geteuid') and os.geteuid() == 0 and 'GNUPGHOME' not in os.environ:
_gnupg_options += ['--homedir', os.path.join(basedir.home, '.gnupg')]
info(_("Running as root, so setting GnuPG home to %s"), _gnupg_options[-1])
return subprocess.Popen(_gnupg_options + args, **kwargs)
class Signature(object):
"""Abstract base class for signature check results.
@ivar status: the raw data returned by GPG
@ivar messages: any messages printed by GPG which may be relevant to this signature
"""
status = None
messages = None
def __init__(self, status):
self.status = status
def is_trusted(self, domain = None):
"""Whether this signature is trusted by the user."""
return False
def need_key(self):
"""Returns the ID of the key that must be downloaded to check this signature."""
return None
class ValidSig(Signature):
"""A valid signature check result."""
FINGERPRINT = 0
TIMESTAMP = 2
def __str__(self):
return "Valid signature from " + self.status[self.FINGERPRINT]
def is_trusted(self, domain = None):
"""Asks the L{trust.trust_db}."""
return trust_db.is_trusted(self.status[self.FINGERPRINT], domain)
def get_timestamp(self):
"""Get the time this signature was made."""
return int(self.status[self.TIMESTAMP])
fingerprint = property(lambda self: self.status[self.FINGERPRINT])
def get_details(self):
"""Call 'gpg --list-keys' and return the results split into lines and columns.
@rtype: [[str]]"""
# Note: GnuPG 2 always uses --fixed-list-mode
child = _run_gpg(['--fixed-list-mode', '--with-colons', '--list-keys', self.fingerprint], stdout = subprocess.PIPE)
cout, unused = child.communicate()
if child.returncode:
info(_("GPG exited with code %d") % child.returncode)
details = []
for line in cout.split('\n'):
details.append(line.split(':'))
return details
class BadSig(Signature):
"""A bad signature (doesn't match the message)."""
KEYID = 0
def __str__(self):
return _("BAD signature by %s (the message has been tampered with)") \
% self.status[self.KEYID]
class ErrSig(Signature):
"""Error while checking a signature."""
KEYID = 0
ALG = 1
RC = -1
def __str__(self):
msg = _("ERROR signature by %s: ") % self.status[self.KEYID]
rc = int(self.status[self.RC])
if rc == 4:
msg += _("Unknown or unsupported algorithm '%s'") % self.status[self.ALG]
elif rc == 9:
msg += _("Unknown key. Try 'gpg --recv-key %s'") % self.status[self.KEYID]
else:
msg += _("Unknown reason code %d") % rc
return msg
def need_key(self):
rc = int(self.status[self.RC])
if rc == 9:
return self.status[self.KEYID]
return None
class Key:
"""A GPG key.
@since: 0.27
@param fingerprint: the fingerprint of the key
@type fingerprint: str
@ivar name: a short name for the key, extracted from the full name
@type name: str
"""
def __init__(self, fingerprint):
self.fingerprint = fingerprint
self.name = '(unknown)'
def get_short_name(self):
return self.name.split(' (', 1)[0].split(' <', 1)[0]
def load_keys(fingerprints):
"""Load a set of keys at once.
This is much more efficient than making individual calls to L{load_key}.
@return: a list of loaded keys, indexed by fingerprint
@rtype: {str: L{Key}}
@since: 0.27"""
import codecs
keys = {}
# Otherwise GnuPG returns everything...
if not fingerprints: return keys
for fp in fingerprints:
keys[fp] = Key(fp)
current_fpr = None
current_uid = None
child = _run_gpg(['--fixed-list-mode', '--with-colons', '--list-keys',
'--with-fingerprint', '--with-fingerprint'] + fingerprints, stdout = subprocess.PIPE)
try:
for line in child.stdout:
if line.startswith('pub:'):
current_fpr = None
current_uid = None
if line.startswith('fpr:'):
current_fpr = line.split(':')[9]
if current_fpr in keys and current_uid:
# This is probably a subordinate key, where the fingerprint
# comes after the uid, not before. Note: we assume the subkey is
# cross-certified, as recent always ones are.
try:
keys[current_fpr].name = codecs.decode(current_uid, 'utf-8')
except:
warn("Not UTF-8: %s", current_uid)
keys[current_fpr].name = current_uid
if line.startswith('uid:'):
assert current_fpr is not None
# Only take primary UID
if current_uid: continue
parts = line.split(':')
current_uid = parts[9]
if current_fpr in keys:
keys[current_fpr].name = current_uid
finally:
if child.wait():
warn(_("gpg --list-keys failed with exit code %d") % child.returncode)
return keys
def load_key(fingerprint):
"""Query gpg for information about this key.
@return: a new key
@rtype: L{Key}
@since: 0.27"""
return load_keys([fingerprint])[fingerprint]
def import_key(stream):
"""Run C{gpg --import} with this stream as stdin."""
errors = tempfile.TemporaryFile()
child = _run_gpg(['--quiet', '--import', '--batch'],
stdin = stream, stderr = errors)
status = child.wait()
errors.seek(0)
error_messages = errors.read().strip()
errors.close()
if error_messages:
import codecs
decoder = codecs.lookup('utf-8')
error_messages = decoder.decode(error_messages, errors = 'replace')[0]
if status != 0:
if error_messages:
raise SafeException(_("Errors from 'gpg --import':\n%s") % error_messages)
else:
raise SafeException(_("Non-zero exit code %d from 'gpg --import'") % status)
elif error_messages:
warn(_("Warnings from 'gpg --import':\n%s") % error_messages)
def _check_xml_stream(stream):
xml_comment_start = '<!-- Base64 Signature'
data_to_check = stream.read()
last_comment = data_to_check.rfind('\n' + xml_comment_start)
if last_comment < 0:
raise SafeException(_("No signature block in XML. Maybe this file isn't signed?"))
last_comment += 1 # Include new-line in data
data = tempfile.TemporaryFile()
data.write(data_to_check[:last_comment])
data.flush()
os.lseek(data.fileno(), 0, 0)
errors = tempfile.TemporaryFile()
sig_lines = data_to_check[last_comment:].split('\n')
if sig_lines[0].strip() != xml_comment_start:
raise SafeException(_('Bad signature block: extra data on comment line'))
while sig_lines and not sig_lines[-1].strip():
del sig_lines[-1]
if sig_lines[-1].strip() != '-->':
raise SafeException(_('Bad signature block: last line is not end-of-comment'))
sig_data = '\n'.join(sig_lines[1:-1])
if re.match('^[ A-Za-z0-9+/=\n]+$', sig_data) is None:
raise SafeException(_("Invalid characters found in base 64 encoded signature"))
try:
sig_data = base64.decodestring(sig_data) # (b64decode is Python 2.4)
except Exception as ex:
raise SafeException(_("Invalid base 64 encoded signature: %s") % str(ex))
sig_fd, sig_name = tempfile.mkstemp(prefix = 'injector-sig-')
try:
sig_file = os.fdopen(sig_fd, 'w')
sig_file.write(sig_data)
sig_file.close()
# Note: Should ideally close status_r in the child, but we want to support Windows too
child = _run_gpg([# Not all versions support this:
#'--max-output', str(1024 * 1024),
'--batch',
# Windows GPG can only cope with "1" here
'--status-fd', '1',
# Don't try to download missing keys; we'll do that
'--keyserver-options', 'no-auto-key-retrieve',
'--verify', sig_name, '-'],
stdin = data,
stdout = subprocess.PIPE,
stderr = errors)
try:
sigs = _get_sigs_from_gpg_status_stream(child.stdout, child, errors)
finally:
os.lseek(stream.fileno(), 0, 0)
stream.seek(0)
finally:
os.unlink(sig_name)
return (stream, sigs)
def check_stream(stream):
"""Pass stream through gpg --decrypt to get the data, the error text,
and a list of signatures (good or bad). If stream starts with "<?xml "
then get the signature from a comment at the end instead (and the returned
data is the original stream). stream must be seekable.
@note: Stream returned may or may not be the one passed in. Be careful!
@return: (data_stream, [Signatures])"""
stream.seek(0)
start = stream.read(6)
stream.seek(0)
if start == "<?xml ":
return _check_xml_stream(stream)
elif start == '-----B':
raise SafeException(_("Plain GPG-signed feeds no longer supported"))
else:
raise SafeException(_("This is not a Zero Install feed! It should be an XML document, but it starts:\n%s") % repr(stream.read(120)))
def _get_sigs_from_gpg_status_stream(status_r, child, errors):
"""Read messages from status_r and collect signatures from it.
When done, reap 'child'.
If there are no signatures, throw SafeException (using errors
for the error message if non-empty)."""
sigs = []
# Should we error out on bad signatures, even if there's a good
# signature too?
for line in status_r:
assert line.endswith('\n')
if not line.startswith('[GNUPG:] '):
# The docs says every line starts with this, but if auto-key-retrieve
# is on then they might not. See bug #3420548
warn("Invalid output from GnuPG: %r", line)
continue
line = line[9:-1]
split_line = line.split(' ')
code = split_line[0]
args = split_line[1:]
if code == 'VALIDSIG':
sigs.append(ValidSig(args))
elif code == 'BADSIG':
sigs.append(BadSig(args))
elif code == 'ERRSIG':
sigs.append(ErrSig(args))
child.wait() # (ignore exit status)
errors.seek(0)
error_messages = errors.read().strip()
errors.close()
if not sigs:
if error_messages:
raise SafeException(_("No signatures found. Errors from GPG:\n%s") % error_messages)
else:
raise SafeException(_("No signatures found. No error messages from GPG."))
elif error_messages:
# Attach the warnings to all the signatures, in case they're useful.
for s in sigs:
s.messages = error_messages
return sigs
|
dabrahams/zeroinstall
|
zeroinstall/injector/gpg.py
|
Python
|
lgpl-2.1
| 10,404
|
[
"VisIt"
] |
fedca3cf9e893cb558c21148e2d164a4a941c6a1bb505abe80a2df21bc3358eb
|
from collections import defaultdict
from .block import Block
from .common import fail
from .errors import Errors
from .ir import *
from .irvisitor import IRVisitor, IRTransformer
from .loop import Loop
from .scope import SymbolReplacer
from .type import Type
from logging import getLogger
logger = getLogger(__name__)
class LoopUnroller(object):
def process(self, scope):
self.scope = scope
self.unrolled = False
if self._unroll_loop_tree_leaf(scope.top_region()):
# re-order blocks
for blk in scope.traverse_blocks():
blk.order = -1
for stm in blk.stms:
assert stm.block is blk
Block.set_order(scope.entry_block, 0)
return True
return False
def _unroll_loop_tree_leaf(self, loop):
children = sorted(self.scope.child_regions(loop), key=lambda c: c.head.order)
for c in children.copy():
assert isinstance(c, Loop)
if self.scope.is_leaf_region(c):
if not c.head.synth_params['unroll']:
continue
factor = self._parse_factor(c.head.synth_params)
if self._unroll(c, factor):
return True
else:
#del c.head.synth_params['unroll']
for b in c.blocks():
del b.synth_params['unroll']
else:
if self._unroll_loop_tree_leaf(c):
return True
if c.head.synth_params['unroll']:
fail(c.head.stms[-1], Errors.RULE_UNROLL_NESTED_LOOP)
return False
def _parse_factor(self, synth_params):
if isinstance(synth_params['unroll'], str):
if synth_params['unroll'] == 'full':
factor = -1
else:
try:
factor = int(synth_params['unroll'])
except:
factor = 0
elif isinstance(synth_params['unroll'], int):
factor = synth_params['unroll']
else:
assert False, 'Invalid unroll parameter'
return factor
def _unroll(self, loop, factor):
if factor == 1:
return False
assert self.scope.is_leaf_region(loop)
assert loop.counter
assert loop.init
assert loop.update
assert loop.cond
if len(loop.bodies) > 1:
for b in loop.bodies:
if len(b.succs) > 1:
fail(loop.head.stms[-1],
Errors.RULE_UNROLL_CONTROL_BRANCH)
assert len(loop.bodies) == 1
ret = self._find_loop_range(loop)
if not ret:
fail(loop.head.stms[-1],
Errors.RULE_UNROLL_UNFIXED_LOOP)
loop_min, loop_max, loop_step = ret
if loop_max.is_a(CONST) and loop_min.is_a(CONST):
initial_trip = (((loop_max.value - 1) + loop_step) - loop_min.value) // loop_step
if initial_trip < 1:
return False
if factor == -1 or factor >= loop_max.value:
factor = initial_trip
has_unroll_remain = True if initial_trip % factor else False
is_full_unroll = factor == initial_trip
else:
initial_trip = -1
if factor == -1:
fail(loop.head.stms[-1],
Errors.RULE_UNROLL_UNFIXED_LOOP)
has_unroll_remain = True
is_full_unroll = False
#unroll_trip = initial_trip // factor
origin_body = loop.bodies[0]
defsyms = self.scope.usedef.get_syms_defined_at(loop.head)
origin_ivs = [sym for sym in defsyms if sym.is_induction()]
new_ivs = self._new_ivs(factor, origin_ivs, is_full_unroll)
if is_full_unroll:
unroll_head, iv_updates, loop_cond = self._make_full_unroll_head(loop,
new_ivs)
sym_map = {}
else:
unroll_head, iv_updates, lphis, sym_map = self._make_unroll_head(loop,
loop_max,
loop_step,
factor,
new_ivs)
defsyms = self.scope.usedef.get_syms_defined_at(origin_body)
unroll_blks = self._make_unrolling_blocks(origin_body,
defsyms,
new_ivs,
iv_updates,
sym_map,
factor,
unroll_head)
if is_full_unroll:
self._reconnect_full_unroll_blocks(loop, unroll_head, unroll_blks)
self._replace_outer_uses(loop, new_ivs, factor, {})
# emigrate unrolled blocks
parent = self.scope.parent_region(loop)
for b in [unroll_head] + unroll_blks:
parent.append_body(b)
self.scope.remove_region(loop)
self._remove_loop_condition(loop_cond)
for blk in [unroll_head] + unroll_blks:
blk.synth_params = unroll_head.preds[0].synth_params.copy()
else:
if has_unroll_remain:
remain_start_blk = Block(self.scope)
else:
remain_start_blk = None
new_loop = Loop(unroll_head, unroll_blks, [unroll_head] + unroll_blks)
self._reconnect_unroll_blocks(loop, new_loop, unroll_head, unroll_blks, lphis, remain_start_blk)
self.scope.append_sibling_region(loop, new_loop)
if has_unroll_remain:
assert loop.counter in new_ivs
origin_lphis = {s.var.symbol():s for s in loop.head.stms if s.is_a(LPHI)}
for sym, new_syms in new_ivs.items():
new_sym = new_syms[0]
lphi = origin_lphis[sym]
arg = TEMP(new_sym, Ctx.LOAD)
lphi.args[0] = arg
remain_start_blk.append_stm(EXPR(CONST(0))) # guard from reduceblk
remain_start_blk.append_stm(JUMP(loop.head))
remain_start_blk.succs = [loop.head]
loop.head.preds[0] = remain_start_blk
del loop.head.synth_params['unroll']
parent = self.scope.parent_region(loop)
parent.append_body(remain_start_blk)
else:
self.scope.remove_region(loop)
self._replace_outer_uses(loop, new_ivs, 0, sym_map)
for blk in [unroll_head] + unroll_blks:
del blk.synth_params['unroll']
return True
def _replace_jump_target(self, block, old, new):
jmp = block.stms[-1]
if jmp.is_a(JUMP):
jmp.target = new
elif jmp.is_a(CJUMP):
if jmp.true is old:
jmp.true = new
else:
assert jmp.false is old
jmp.false = new
elif jmp.is_a(MCJUMP):
for i, t in enumerate(jmp.targets):
if t is old:
jmp.targets[i] = new
else:
assert False
def _reconnect_full_unroll_blocks(self, loop, unroll_head, unroll_blks):
loop_pred = loop.head.preds[0]
loop_exit = loop.head.succs[1]
first_blk = unroll_blks[0]
last_blk = unroll_blks[-1]
# loop_pred -> unroll_head
loop_pred.replace_succ(loop.head, unroll_head)
assert unroll_head.preds[0] is loop_pred
assert not loop_pred.succs_loop
# unroll_head
assert len(unroll_head.succs) == 1 and unroll_head.succs[0] is first_blk
assert len(first_blk.preds) == 1 and first_blk.preds[0] is unroll_head
assert not unroll_head.succs_loop
# no loop-back path
unroll_head.preds = [loop_pred]
unroll_head.preds_loop = []
# last_blk -> loop_exit
last_blk.succs = [loop_exit]
last_blk.succs_loop = []
# loop exit
loop_exit.replace_pred(loop.head, last_blk)
jmp = last_blk.stms[-1]
assert jmp.is_a(JUMP)
jmp.typ = ''
jmp.target = loop_exit
def _reconnect_unroll_blocks(self, loop, new_loop, unroll_head, unroll_blks, lphis, remain_start_blk):
loop_pred = loop.head.preds[0]
if remain_start_blk:
loop_exit = remain_start_blk
loop_exit.preds = [unroll_head]
else:
loop_exit = loop.head.succs[1]
loop_exit.replace_pred(loop.head, unroll_head)
first_blk = unroll_blks[0]
last_blk = unroll_blks[-1]
# loop_pred -> unroll_head
loop_pred.replace_succ(loop.head, unroll_head)
assert not loop_pred.succs_loop
# unroll_head -> first_blk | loop_exit
assert len(unroll_head.succs) == 1 and unroll_head.succs[0] is first_blk
assert len(first_blk.preds) == 1 and first_blk.preds[0] is unroll_head
assert not unroll_head.succs_loop
unroll_head.succs.append(loop_exit)
cjmp = unroll_head.stms[-1]
assert cjmp.is_a(CJUMP)
assert cjmp.false is None
cjmp.false = loop_exit
# add loop-back path from last_blk
unroll_head.preds = [loop_pred, last_blk]
unroll_head.preds_loop = [last_blk]
# last_blk -> unroll_head
last_blk.succs = [unroll_head]
last_blk.succs_loop = [unroll_head]
jmp = last_blk.stms[-1]
assert jmp.is_a(JUMP)
assert jmp.typ == 'L'
jmp.target = unroll_head
def _make_full_unroll_head(self, loop, new_ivs):
unroll_head, stm_map = self._clone_block(loop.head, 'unroll_head')
head_stms = []
iv_updates = {}
# append initial move for each lphi
# i#2 = phi(init, i#3) -> i#2_0 = 0
# x#2 = phi(x_init, x#3) -> x#2_0 = x_init
for _, stm in stm_map.items():
if stm.is_a(LPHI):
assert len(stm.args) == 2
orig_sym = stm.var.symbol()
new_sym_0 = new_ivs[orig_sym][0]
dst = TEMP(new_sym_0, Ctx.STORE)
src = stm.args[0]
iv_updates[stm.args[1].symbol()] = new_ivs[orig_sym]
mv = MOVE(dst, src)
head_stms.append(mv)
orig_cjump_cond = unroll_head.stms[-2]
assert orig_cjump_cond.is_a(MOVE) and orig_cjump_cond.src.is_a(RELOP)
src = CONST(1)
mv = MOVE(orig_cjump_cond.dst.clone(), src)
head_stms.append(mv)
orig_cjump = unroll_head.stms[-1]
assert orig_cjump.is_a(CJUMP)
jump = JUMP(None)
jump.loc = orig_cjump.loc
head_stms.append(jump)
unroll_head.stms = []
for stm in head_stms:
unroll_head.append_stm(stm)
return unroll_head, iv_updates, orig_cjump_cond.dst.symbol()
def _make_unroll_head(self, loop, loop_max, loop_step, factor, new_ivs):
unroll_head, stm_map = self._clone_block(loop.head, 'unroll_head')
head_stms = []
iv_updates = {}
lphis = []
sym_map = {}
# append modified lphi
# i#2 = phi(init, i#3) -> i#2_0 = phi(0, i#2_n)
# x#2 = phi(x_init, x#3) -> x#2_0 = phi(x_init, x#2_n)
for _, stm in stm_map.items():
if stm.is_a(LPHI):
assert len(stm.args) == 2
orig_sym = stm.var.symbol()
new_sym_0 = new_ivs[orig_sym][0]
new_sym_n = new_ivs[orig_sym][factor]
stm.var.set_symbol(new_sym_0)
iv_updates[stm.args[1].symbol()] = new_ivs[orig_sym]
stm.args[1].set_symbol(new_sym_n)
head_stms.append(stm)
lphis.append(stm)
orig_cjump_cond = unroll_head.stms[-2]
assert orig_cjump_cond.is_a(MOVE) and orig_cjump_cond.src.is_a(RELOP)
orig_cond_sym = orig_cjump_cond.dst.symbol()
orig_cjump = unroll_head.stms[-1]
assert orig_cjump.is_a(CJUMP)
new_loop_iv = new_ivs[loop.counter][0]
tmp = self.scope.add_temp()
tmp.typ = new_loop_iv.typ.clone()
mv = MOVE(TEMP(tmp, Ctx.STORE),
BINOP('Add',
TEMP(new_loop_iv, Ctx.LOAD),
CONST((factor - 1) * loop_step)))
head_stms.append(mv)
cond_rhs = RELOP('Lt', TEMP(tmp, Ctx.LOAD), loop_max)
cond_sym = self.scope.add_condition_sym()
sym_map[orig_cond_sym] = cond_sym
cond_sym.typ = Type.bool_t
cond_lhs = TEMP(cond_sym, Ctx.STORE)
cond_stm = MOVE(cond_lhs, cond_rhs)
head_stms.append(cond_stm)
cond_exp = TEMP(cond_sym, Ctx.LOAD)
cjump = CJUMP(cond_exp, None, None)
cjump.loc = orig_cjump.loc
head_stms.append(cjump)
unroll_head.stms = []
for stm in head_stms:
unroll_head.append_stm(stm)
return unroll_head, iv_updates, lphis, sym_map
def _clone_block(self, blk, nametag):
stm_map = {}
clone_blk = blk.clone(self.scope, stm_map, nametag)
return clone_blk, stm_map
def _make_unrolling_blocks(self, origin_block, defsyms, new_ivs, iv_updates, sym_map, factor, head):
pred_blk = head
assert factor > 0
new_blks = []
for i in range(factor):
new_blk, stm_map = self._clone_block(origin_block, 'unroll_body')
new_blk.preds_loop = []
new_blk.succs_loop = []
ivreplacer = IVReplacer(self.scope, defsyms, new_ivs, iv_updates, i)
symreplacer = SymbolReplacer(sym_map)
for stm in new_blk.stms:
ivreplacer.visit(stm)
symreplacer.visit(stm)
pred_blk.succs = [new_blk]
jmp = pred_blk.stms[-1]
jmp.typ = ''
if jmp.is_a(CJUMP):
jmp.true = new_blk
else:
assert jmp.is_a(JUMP)
jmp.target = new_blk
new_blk.preds = [pred_blk]
pred_blk = new_blk
new_blks.append(new_blk)
return new_blks
def _new_ivs(self, factor, ivs, is_full_unroll):
new_iv_map = defaultdict(list)
for i in range(factor + 1):
for iv in ivs:
new_name = '{}_{}'.format(iv.name, i)
new_iv = self.scope.inherit_sym(iv, new_name)
new_iv_map[iv].append(new_iv)
if i != 0 or is_full_unroll:
new_iv.del_tag('induction')
return new_iv_map
def _replace_outer_uses(self, loop, new_ivs, index, sym_map):
for u in loop.outer_uses:
usestms = self.scope.usedef.get_stms_using(u)
for ustm in usestms:
if u in new_ivs:
ustm.replace(u, new_ivs[u][index])
if u in sym_map:
ustm.replace(u, sym_map[u])
def _remove_loop_condition(self, cond):
PHICondRemover(cond).process(self.scope)
def _find_loop_range(self, loop):
loop_min = self._find_loop_min(loop)
if loop_min is None:
return None
loop_max = self._find_loop_max(loop)
if loop_max is None:
return None
loop_step = self._find_loop_step(loop)
if not isinstance(loop_step, int):
return None
return (loop_min, loop_max, loop_step)
def _find_loop_min(self, loop):
if loop.init.is_a(CONST):
return loop.init
elif loop.init.is_a(TEMP):
return loop.init
raise NotImplementedError('unsupported loop')
def _find_loop_max(self, loop):
loop_cond_sym = loop.cond
loop_cond_defs = self.scope.usedef.get_stms_defining(loop_cond_sym)
assert len(loop_cond_defs) == 1
loop_cond_stm = list(loop_cond_defs)[0]
assert loop_cond_stm.is_a(MOVE)
loop_cond_rhs = loop_cond_stm.src
if loop_cond_rhs.is_a(RELOP):
# We focus on simple increasing loops
if loop_cond_rhs.op in ('Lt'):
if loop_cond_rhs.left.symbol() is loop.counter:
may_max = loop_cond_rhs.right
if may_max.is_a(CONST):
return may_max
elif may_max.is_a(TEMP):
return may_max
raise NotImplementedError('unsupported loop')
def _find_loop_step(self, loop):
loop_update = loop.update
assert loop_update.is_a(TEMP)
update_sym = loop_update.symbol()
update_defs = self.scope.usedef.get_stms_defining(update_sym)
assert len(update_defs) == 1
update_stm = list(update_defs)[0]
assert update_stm.is_a(MOVE)
update_rhs = update_stm.src
if update_rhs.is_a(BINOP):
if update_rhs.op == 'Add':
if update_rhs.left.symbol() is loop.counter:
may_step = update_rhs.right
if may_step.is_a(CONST):
return may_step.value
else:
fail(update_stm, Errors.RULE_UNROLL_VARIABLE_STEP)
fail(update_stm, Errors.RULE_UNROLL_UNKNOWN_STEP)
class IVReplacer(IRVisitor):
def __init__(self, scope, defsyms, new_ivs, iv_updates, idx):
self.scope = scope
self.defsyms = defsyms
self.new_ivs = new_ivs
self.iv_updates = iv_updates
self.idx = idx
def visit_TEMP(self, ir):
if ir.sym not in self.defsyms and ir.sym not in self.new_ivs.keys():
# this is loop invariant
return
if not ir.sym.typ.is_scalar():
return
if ir.sym.is_induction():
assert ir.sym in self.new_ivs.keys()
new_sym = self.new_ivs[ir.sym][self.idx]
elif ir.sym in self.iv_updates:
new_ivs = self.iv_updates[ir.sym]
new_sym = new_ivs[self.idx + 1]
else:
new_name = '{}_{}'.format(ir.sym.name, self.idx)
new_sym = self.scope.inherit_sym(ir.sym, new_name)
ir.set_symbol(new_sym)
class PHICondRemover(IRTransformer):
def __init__(self, sym):
self.sym = sym
def visit_UNOP(self, ir):
if ir.exp.is_a(TEMP) and ir.exp.symbol() is self.sym:
return CONST(1)
return ir
def visit_TEMP(self, ir):
if ir.ctx & Ctx.STORE == 0 and ir.symbol() is self.sym:
return CONST(1)
return ir
|
ktok07b6/polyphony
|
polyphony/compiler/unroll.py
|
Python
|
mit
| 18,901
|
[
"VisIt"
] |
fa3706410eb8b32e42d43466e62e2a267ed90b19601fd3f51c4aa9e84afb20a5
|
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from setuptools.extern import six
from setuptools.extern.six.moves import configparser, map
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if six.PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
six.text_type(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('install-layout=', None, "installation layout to choose (known values: deb)"),
('force-installation-into-system-dir', '0', "force installation into /usr"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version', 'force-installation-into-system-dir'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# enable custom installation, known values: deb
self.install_layout = None
self.force_installation_into_system_dir = None
self.multiarch = None
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = sys.version[:3]
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
if self.install_layout:
if not self.install_layout.lower() in ['deb']:
raise DistutilsOptionError("unknown value for --install-layout")
self.install_layout = self.install_layout.lower()
import sysconfig
if sys.version_info[:2] >= (3, 3):
self.multiarch = sysconfig.get_config_var('MULTIARCH')
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
if self.prefix == '/usr' and not self.force_installation_into_system_dir:
raise DistutilsOptionError("""installation into /usr
Trying to install into the system managed parts of the file system. Please
consider to install to another location, or use the option
--force-installation-into-system-dir to overwrite this warning.
""")
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, six.string_types):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
]
self._expand_attrs(dirs)
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except Exception:
pid = random.randint(0, sys.maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, filter(None, PYTHONPATH)):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip()
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip()
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip()
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write(tmpl.format(**locals()))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
if not self.editable:
self.install_site_py()
try:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e))
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://github.com/pypa/setuptools/issues/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
(m + " %s to %s") % (
os.path.basename(egg_path),
os.path.dirname(destination)
),
)
update_dist_caches(
destination,
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip()
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist,
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""").lstrip()
def no_default_version_msg(self):
template = self.__no_default_msg
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
source = source.decode('utf-8')
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
with io.open(sitepy) as strm:
current = strm.read()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
with io.open(sitepy, 'w', encoding='utf-8') as strm:
strm.write(source)
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in six.iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
if sys.version[:3] in ('2.3', '2.4', '2.5') or 'real_prefix' in sys.__dict__:
sitedir_name = 'site-packages'
else:
sitedir_name = 'dist-packages'
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
unix_local = dict(
install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name,
script_dir = '$base/local/bin',
),
posix_local = dict(
install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name,
script_dir = '$base/local/bin',
),
deb_system = dict(
install_dir = '$base/lib/python3/%s' % sitedir_name,
script_dir = '$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix or self.install_layout:
if self.install_layout and self.install_layout in ['deb']:
scheme_name = "deb_system"
self.prefix = '/usr'
elif self.prefix or 'real_prefix' in sys.__dict__:
scheme_name = os.name
else:
scheme_name = "posix_local"
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(scheme_name,self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([
os.path.join(
prefix,
"local/lib",
"python" + sys.version[:3],
"dist-packages",
),
os.path.join(
prefix,
"lib",
"python" + sys.version[:3],
"dist-packages",
),
os.path.join(prefix, "lib", "site-python"),
])
else:
sitedirs.extend([
prefix,
os.path.join(prefix, "lib", "site-packages"),
])
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
home_sp = os.path.join(
home,
'Library',
'Python',
sys.version[:3],
'site-packages',
)
sitedirs.append(home_sp)
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(six.StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if six.PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
prelude = _one_liner("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _one_liner("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func is os.remove and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
if item.startswith(q) and item.endswith(q):
return item[1:-1]
return item
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if six.PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
|
victor-prado/broker-manager
|
environment/lib/python3.5/site-packages/setuptools/command/easy_install.py
|
Python
|
mit
| 88,265
|
[
"VisIt"
] |
779aba165031ea23a9ef5009ac8640534d0451df4c03e060e6b52d10a4570d83
|
#!/usr/local/bin/python-2.5/bin/python
import sys, os
from getopt import gnu_getopt as getopt
Info="""
Module name: VMD-input.py
Author: (c) Andres Jaramillo-Botero
California Institute of Technology
ajaramil@wag.caltech.edu
Project: pEFF
Version: August 2009
Usage: python VMD-input.py lammps_dump_filename radii_column xpos_column
Example: python VMD-input.py dump.lammpstrj 5 6
1. Extracts the electron radii from a lammps trajectory dump into %s.out
2. Creates %s.xyz file
3. Creates appropriate %.vmd file which can be sourced using TCL/TK in VMD
"""
from lmp2radii_col import makeradii
from lmp2xyz import lmp2xyz
def printHelp(input):
Info%(input,input,input)
if __name__ == '__main__':
# if no input, print help and exit
if len(sys.argv) < 2:
print "Usage: python VMD-input.py lammps_dump_filename radii_column xpos_column\n"
sys.exit(1)
else:
infile=sys.argv[1]
workdir=os.getcwd()
tools=sys.argv[0].split('VMD-input.py')[0]
# set defaults
outfile = infile.split('.')[0]
print sys.argv
if len(sys.argv) == 4:
column = int(sys.argv[2])
xpos = int(sys.argv[3])
print "Assuming xpos=eradius+1"
elif len(sys.argv) == 3:
column = int(sys.argv[2])
xpos = column+1
print "Assuming xpos=eradius+1"
elif len(sys.argv) == 2:
column=5 # default = radius for dump -> id type q spin eradius x y z
xpos=6
else: print "Incorrect number of arguments"
# check for input:
opts, argv = getopt(sys.argv[1:], 'c:o:ha')
# read options
for opt, arg in opts:
if opt == '-h': # -h: print help
Info%(input,input,input)
if opt == '-o': # output file name
outfile=arg
if opt == '-c': # select column from lammpstrj file to tabulate
column=int(arg)
print column,xpos
makeradii(infile,outfile+".out",column,True)
lmp2xyz(infile,outfile+".xyz",xpos)
print "Creating %s file ..."%(outfile+".vmd")
os.system("cat %s | sed 's/xyzfile/%s/' > %s"%(tools+"radii.vmd",outfile+".xyz","temp"))
os.system("cat %s | sed 's/radiifile/%s/' > %s; rm temp"%("temp",outfile+".out",workdir+'/'+outfile+".vmd"))
print "Done !! (you can now source %s using VMD's console) \n"%(outfile+".vmd")
print "NOTE: In VMD, set graphics representation for electrons to transparency,"
print "and change the atom types in the xyz file according to your values,"
print "for simplicity, they are set using the same mass sequence definition\nfrom your lammps data file\n"
|
quang-ha/lammps
|
tools/eff/VMD-input.py
|
Python
|
gpl-2.0
| 2,599
|
[
"LAMMPS",
"VMD"
] |
73c1eb92806b166e28e8ac7d6451d325b52a55d6f879d0d32db63638f2866c92
|
import Scientific.IO.NetCDF as nc
import numpy as np
import sys
import math
import pylab as pl
import matplotlib.colors as colors
from numpy import floor, sqrt, sin, cos, arccos, arctan2, pi
class gth_hemisphere:
"""Class implementing the gathering hemisphere."""
def __init__(self, resTheta=1, nThetaI=1, nDataLevels=1, phiRange=2.0*pi, type='Hemisphere'):
self.Type = type
self.resTheta = resTheta
self.dTheta = 0.5 * pi / float(resTheta)
self.dThetaInv = 1.0 / self.dTheta
self.nCells = 1
self.nThetaI = nThetaI
self.nLevels = nDataLevels
self.type = type
self.dPhi = np.zeros(resTheta)
self.dPhiInv = np.zeros(resTheta)
self.dA = np.zeros(resTheta)
self.mTheta = np.zeros(resTheta)
self.nPhi = np.zeros(resTheta, np.int64)
self.cIdx = np.zeros(resTheta)
self.phiRange = phiRange
dA0 = self.phiRange * (1.0 - cos(self.dTheta))
self.nPhi [0] = 1
self.cIdx [0] = 0
self.dPhi [0] = self.phiRange
self.dPhiInv [0] = 1.0 / self.phiRange
self.dA [0] = dA0
self.mTheta [0] = 0.5 * self.dTheta
for i in range(1, resTheta):
dPhi = dA0 / (cos(i * self.dTheta) - cos((i+1) * self.dTheta))
rPhi = round(self.phiRange / dPhi)
dPhi = self.phiRange / float(rPhi)
self.nPhi [i] = rPhi
self.dPhi [i] = dPhi
self.dPhiInv [i] = 1.0 / dPhi
self.dA [i] = dPhi * (cos(i * self.dTheta) - cos((i+1) * self.dTheta))
self.mTheta [i] = self.dTheta * (float(i) - 0.5)
self.cIdx [i] = self.cIdx[i-1] + self.nPhi[i-1]
self.nCells = self.nCells + rPhi
self.dAMean = self.phiRange / float(self.nCells)
self.hsData = {}
self.data = np.zeros([nDataLevels, self.nCells, nThetaI])
self.weight = np.zeros([nDataLevels, self.nCells, nThetaI])
def load(self, fName):
"""
Loads the hemisphere data from a netCDF file.
Returns: nothing
"""
try:
dFile = nc.NetCDFFile(fName, "r")
except IOError:
print "Error reading file, exiting."
sys.exit()
if "Hemisphere" not in dFile.variables.keys():
print "Error: not a proper hemisphere file."
sys.exit()
if "Elements" in dir(dFile):
self.Elements = str(dFile.Elements).split()
self.Type = dFile.Type
self.nPhi = np.array(dFile.nPhi)
self.cIdx = np.array(dFile.cIdx)
## Convert Fortran indices to numpy indices
if self.cIdx[0] == 1:
self.cIdx -= 1
self.dPhi = np.array(dFile.dPhi)
self.dPhiInv = 1.0 / self.dPhi
self.nThetaI = int(dFile.nThetaI)
self.nLevels = int(dFile.nLevels)
self.resTheta = int(dFile.nThetaE)
self.dTheta = 0.5 * math.pi / float(self.resTheta)
self.dThetaInv = 1.0/self.dTheta
self.dA = dFile.dA
self.nCells = int(dFile.nCells)
self.type = dFile.Type
try:
self.w = float(dFile.Single_scattering_albedo)
except:
pass
self.hsData['Simulation'] = np.array(dFile.variables['Hemisphere'].getValue())
self.data = np.array(dFile.variables['Hemisphere'].getValue())
dFile.close()
def divideBySolidAngle(self):
for i in range(self.resTheta):
self.data[:, self.cIdx[i] : self.cIdx[i] + self.nPhi[i], :] /= self.dA[i]
def carDirToCell(self, D):
r = sqrt ( (D**2).sum() )
theta = arccos ( D[2] / r )
phi = arctan2 ( D[1] / r, D[0] / r )
if( phi < 0.0 ):
phi = 2.0*pi + phi
t = floor( theta * self.dThetaInv )
p = floor( phi * self.dPhiInv[t] )
return self.cIdx[t] + p
def addDataCar(self, D, v, set=0, lvl=0):
c = self.carDirToCell(D)
self.data[set, c, lvl] += v
def toArray(self, set=0, lvl=0, hsDataSet='Simulation'):
"""
Unpacks the gathering hemisphere into a 2-dimensional array.
Returns: numpy.array
"""
resTheta = self.resTheta
resPhi = self.nPhi.max()
if(self.Type == 'Hemisphere'):
dp = math.pi * 2.0 / float(resPhi)
else:
dp = math.pi / float(resPhi)
data = np.zeros([resTheta, resPhi])
for i in range(resTheta):
dPhiI = dp * self.dPhiInv[i]
for j in range(resPhi):
data[i,j] = self.hsData[hsDataSet][lvl, self.cIdx[i] + int(math.floor(j * dPhiI)), set]
return data
def phiSlice(self, theta, set=0, lvl=0, hsDataSet='Simulation'):
"""
Returns: numpy.array
"""
iTheta = int(math.floor(theta * self.dThetaInv))
resPhi = self.nPhi[iTheta]
dPhi = self.dPhi[iTheta]
data = np.zeros([resPhi,2])
for i in range(resPhi):
data[i,0] = (i + 0.5) * dPhi
data[i,1] = self.rows[set][iTheta][i,lvl]
return data
def thetaSlice(self, phi, set=0, lvl=0, hsDataSet='Simulation'):
"""
Returns: numpy.array
"""
data = np.zeros([self.resTheta, 2])
for i in range(self.resTheta):
data[i,0] = (i+0.5) * self.dTheta
#data[i,1] = self.hsData[set][i][phi * self.dPhiInv[i],lvl]
data[i,1] = self.hsData[hsDataSet][lvl, phi * self.dPhiInv[i], set]
return data
def eval(self, thtI, thtE, phi):
#NOTE: QUICK FIX! NEEDS CORRECTING!
iThtI = int(math.floor(thtI/10.))
iThtE = int(math.floor(math.radians(thtE) * self.dThetaInv))
iPhi = int(math.floor(math.radians(phi) * self.dPhiInv[iThtE]))
x = (thtI % 10.) / 10.
if iThtI+1 < 9:
return ((1.0 - x)*self.hsData['Simulation'][0, self.cIdx[iThtE]+iPhi, iThtI] +
x*self.hsData['Simulation'][0, self.cIdx[iThtE]+iPhi, iThtI+1]) * 4.0 * math.pi
else:
return self.hsData['Simulation'][0, self.cIdx[iThtE]+iPhi, iThtI] * 4.0 * math.pi
def asArray(self, set=0, lvl=0, hsDataSet='Simulation'):
if self.type == 'Quartersphere':
data = np.zeros([self.nCells*2,4])
data[0:self.nCells,0] = self.hsData[hsDataSet][lvl, :, set]
data[self.nCells:2*self.nCells,0] = self.hsData[hsDataSet][lvl, :, set]
else:
data = np.zeros([self.nCells,4])
data[:,0] = self.hsData[hsDataSet][lvl, :, set]
for iThtE in range(self.resTheta):
if(iThtE != 0):
data[self.cIdx[iThtE] : self.cIdx[iThtE] + self.nPhi[iThtE], 1] = (iThtE + 0.5) * self.dTheta
else:
data[0, 1] = 0.0
for iPhi in range(self.nPhi[iThtE]):
data[self.cIdx[iThtE] + iPhi, 2] = pi - (iPhi + 0.5) * self.dPhi[iThtE]
data[self.cIdx[iThtE] : self.cIdx[iThtE] + self.nPhi[iThtE], 3] = self.dA[iThtE]
if self.type == 'Quartersphere':
data[self.nCells:2*self.nCells,1] = data[0:self.nCells,1]
data[self.nCells:2*self.nCells,2] = 2*pi - data[0:self.nCells,2]
data[self.nCells:2*self.nCells,3] = data[0:self.nCells,3]
return data
class xrHemisphere(gth_hemisphere):
def __init__(self, resTheta=1, nThetaI=1, nDataLevels=1):
gth_hemisphere.__init__(self, resTheta, nThetaI, nDataLevels)
def load(self, fName):
"""
Loads the hemisphere data from a netCDF file.
Returns: nothing
"""
try:
dFile = nc.NetCDFFile(fName, "r")
except IOError:
print "Error reading file, exiting."
sys.exit()
if "Hemisphere" not in dFile.variables.keys():
print "Error: not a proper hemisphere file."
sys.exit()
try:
self.Elements = str(dFile.Elements).split()
self.muPhotoIon = np.array(dFile.variables['Photoionization_coefficient'].getValue())
self.muAbs = np.array(dFile.variables['Fluorescence_line_coefficient'].getValue())
self.muAbsCDF = np.array(dFile.variables['Fluorescence_line_cdf'].getValue())
self.muRay = np.array(dFile.variables['Rayleigh_coefficient'].getValue())
self.muExt = np.array(dFile.variables['Extinction_coefficient'].getValue())
self.lEnergy = np.array(dFile.Fluorescence_line_energy)
self.eEnergy = np.array(dFile.Absorbtion_edge_energy)
self.energy = np.array(dFile.variables['Material_energy'].getValue())
self.Method = dFile.Simulation_method
print self.lEnergy[0], self.eEnergy[0]
print self.energy.min()
except (KeyError, AttributeError), e:
print "Error: Malformed input file, missing data.", e
sys.exit(1)
## Spectrum data
##
print "Loading spectrum data..."
try:
self.spcType = dFile.Spectrum_type
self.SpectrumE = np.array(dFile.variables['Spectrum_energy'].getValue())
self.Spectrum = np.array(dFile.variables['Spectrum_intensity'].getValue())
if self.spcType == 'Spectrum':
self.SpectrumCdf = np.array(dFile.variables['Spectrum_cdf'].getValue())
self.SpectrumCdfInv = np.array(dFile.variables['Spectrum_inverse_cdf'].getValue())
print "\tSpectrum data loaded."
except KeyError, e:
print "\tNo spectrum data found."
## Medium data
##
print "Loading medium data..."
try:
self.medHeightmap = np.array(dFile.variables['Medium_heightmap'].getValue())
self.medDensitymap = np.array(dFile.variables['Medium_densitymap'].getValue())
print "\tMedium data loaded."
except KeyError, e:
print "\tNo medium data found."
self.Type = dFile.Type
self.nPhi = np.array(dFile.nPhi)
self.cIdx = np.array(dFile.cIdx)
## Convert Fortran indices to numpy indices
if self.cIdx[0] == 1:
self.cIdx -= 1
self.dPhi = np.array(dFile.dPhi)
self.dPhiInv = 1.0 / self.dPhi
self.nThetaI = int(dFile.nThetaI)
self.thetaI = np.array(dFile.Theta_in)
self.nLevels = int(dFile.nLevels)
self.resTheta = int(dFile.nThetaE)
self.dTheta = 0.5 * math.pi / float(self.resTheta)
self.dThetaInv = 1.0/self.dTheta
self.dA = dFile.dA
self.hsData = {}
self.hsData['Simulation'] = np.array(dFile.variables['Hemisphere'].getValue())
if "Hemisphere_analytic" in dFile.variables.keys():
self.hsData['Analytic'] = np.array(dFile.variables['Hemisphere_analytic'].getValue())
dFile.close()
|
dronir/EM
|
python/gather.py
|
Python
|
gpl-3.0
| 10,232
|
[
"NetCDF"
] |
b73e4d75c9305bc3662d3c7c2b90c6a5da82ffb6f08c032dae3bf398c88ea65b
|
#!/usr/bin/env python3
# Copyright 2016-2018 Brian Warner
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# Git repo maintenance
#
# This script is responsible for cloning new repos and keeping existing repos up
# to date. It can be run as often as you want (and will detect when it's
# already running, so as not to spawn parallel processes), but once or twice per
# day should be more than sufficient. Each time it runs, it updates the repo
# and checks for any parents of HEAD that aren't already accounted for in the
# repos. It also rebuilds analysis data, checks any changed affiliations and
# aliases, and caches data for display.
import sys
import platform
import imp
import time
import datetime
import html.parser
import subprocess
import os
import getopt
import xlsxwriter
import configparser
from facade_worker.facade02utilitymethods import update_repo_log, trim_commit, store_working_author, trim_author
# if platform.python_implementation() == 'PyPy':
# import pymysql
# else:
# import MySQLdb
def nuke_affiliations(cfg):
# Delete all stored affiliations in the database. Normally when you
# add/remove/change affiliation data via the web UI, any potentially affected
# records will be deleted and then rebuilt on the next run. However, if you
# manually add affiliation records via the database or import them by some other
# means, there's no elegant way to discover which affiliations are affected. So
# this is the scorched earth way: remove them all to force a total rebuild.
# Brutal but effective.
cfg.log_activity('Info','Nuking affiliations')
nuke = ("UPDATE commits SET cmt_author_affiliation = NULL, "
"cmt_committer_affiliation = NULL")
cfg.cursor.execute(nuke)
cfg.db.commit()
cfg.log_activity('Info','Nuking affiliations (complete)')
def fill_empty_affiliations(cfg):
# When a record is added, it has no affiliation data. Also, when an affiliation
# mapping is changed via the UI, affiliation data will be set to NULL. This
# function finds any records with NULL affiliation data and fills them.
### Local helper functions ###
def update_affiliation(email_type,email,affiliation,start_date):
update = ("UPDATE commits "
"SET cmt_%s_affiliation = %%s "
"WHERE cmt_%s_email = %%s "
"AND cmt_%s_affiliation IS NULL "
"AND cmt_%s_date >= %%s" %
(email_type, email_type, email_type, email_type))
cfg.cursor.execute(update, (affiliation, email, start_date))
cfg.db.commit()
def discover_null_affiliations(attribution,email):
# Try a bunch of ways to match emails to attributions in the database. First it
# tries to match exactly. If that doesn't work, it tries to match by domain. If
# domain doesn't work, it strips subdomains from the email and tries again.
# First we see if there's an exact match. This will also catch malformed or
# intentionally mangled emails (e.g. "developer at domain.com") that have
# been added as an affiliation rather than an alias.
find_exact_match = ("SELECT ca_affiliation,ca_start_date "
"FROM contributor_affiliations "
"WHERE ca_domain = %s "
"AND ca_active = 1 "
"ORDER BY ca_start_date DESC")
cfg.cursor_people.execute(find_exact_match, (email, ))
cfg.db_people.commit()
matches = list(cfg.cursor_people)
if not matches and email.find('@') < 0:
# It's not a properly formatted email, leave it NULL and log it.
cfg.log_activity('Info','Unmatchable email: %s' % email)
return
if not matches:
# Now we go for a domain-level match. Try for an exact match.
domain = email[email.find('@')+1:]
find_exact_domain = ("SELECT ca_affiliation,ca_start_date "
"FROM contributor_affiliations "
"WHERE ca_domain= %s "
"AND ca_active = 1 "
"ORDER BY ca_start_date DESC")
cfg.cursor_people.execute(find_exact_domain, (domain, ))
cfg.db_people.commit()
matches = list(cfg.cursor_people)
if not matches:
# Then try stripping any subdomains.
find_domain = ("SELECT ca_affiliation,ca_start_date "
"FROM contributor_affiliations "
"WHERE ca_domain = %s "
"AND ca_active = 1 "
"ORDER BY ca_start_date DESC")
cfg.cursor_people.execute(find_domain, (domain[domain.rfind('.',0,domain.rfind('.',0))+1:], ))
cfg.db_people.commit()
matches = list(cfg.cursor_people)
if not matches:
# One last check to see if it's an unmatched academic domain.
if domain[-4:] in '.edu':
matches.append({'ca_affiliation':'(Academic)','ca_start_date':'1970-01-01'})
# Done looking. Now we process any matches that were found.
if matches:
cfg.log_activity('Debug','Found domain match for %s' % email)
for match in matches:
update = ("UPDATE commits "
"SET cmt_%s_affiliation = %%s "
"WHERE cmt_%s_email = %%s "
"AND cmt_%s_affiliation IS NULL "
"AND cmt_%s_date::date >= %%s::date" %
(attribution, attribution, attribution, attribution))
cfg.log_activity('Info', 'attr: {} \nmatch:{}\nsql: {}'.format(attribution, match, update))
try:
cfg.cursor.execute(update, (match[0], email, match[1]))
cfg.db.commit()
except Exception as e:
cfg.log_activity('Info', 'Error encountered: {}'.format(e))
cfg.log_activity('Info', 'Affiliation insertion failed for %s ' % email)
def discover_alias(email):
# Match aliases with their canonical email
fetch_canonical = ("SELECT canonical_email "
"FROM contributors_aliases "
"WHERE alias_email=%s "
"AND cntrb_active = 1")
cfg.cursor_people.execute(fetch_canonical, (email, ))
cfg.db_people.commit()
canonical = list(cfg.cursor_people)
if canonical:
for email in canonical:
return email[0]
else:
return email
### The real function starts here ###
cfg.update_status('Filling empty affiliations')
cfg.log_activity('Info','Filling empty affiliations')
# Process any changes to the affiliations or aliases, and set any existing
# entries in commits to NULL so they are filled properly.
# First, get the time we started fetching since we'll need it later
cfg.cursor.execute("SELECT current_timestamp(6) as fetched")
affiliations_fetched = cfg.cursor.fetchone()[0]#['fetched']
# Now find the last time we worked on affiliations, to figure out what's new
affiliations_processed = cfg.get_setting('affiliations_processed')
get_changed_affiliations = ("SELECT ca_domain FROM contributor_affiliations")# WHERE "
#"ca_last_used >= timestamptz %s")
cfg.cursor_people.execute(get_changed_affiliations)#, (affiliations_processed, ))
changed_affiliations = list(cfg.cursor_people)
# Process any affiliations which changed since we last checked
for changed_affiliation in changed_affiliations:
cfg.log_activity('Debug','Resetting affiliation for %s' %
changed_affiliation[0])
set_author_to_null = ("UPDATE commits SET cmt_author_affiliation = NULL "
"WHERE cmt_author_email LIKE CONCAT('%%',%s)")
cfg.cursor.execute(set_author_to_null, (changed_affiliation[0], ))
cfg.db.commit()
set_committer_to_null = ("UPDATE commits SET cmt_committer_affiliation = NULL "
"WHERE cmt_committer_email LIKE CONCAT('%%',%s)")
cfg.cursor.execute(set_committer_to_null, (changed_affiliation[0], ))
cfg.db.commit()
# Update the last fetched date, so we know where to start next time.
update_affiliations_date = ("UPDATE settings SET value=%s "
"WHERE setting = 'affiliations_processed'")
cfg.cursor.execute(update_affiliations_date, (affiliations_fetched, ))
cfg.db.commit()
# On to the aliases, now
# First, get the time we started fetching since we'll need it later
cfg.cursor.execute("SELECT current_timestamp(6) as fetched")
aliases_fetched = cfg.cursor.fetchone()[0]#['fetched']
# Now find the last time we worked on aliases, to figure out what's new
aliases_processed = cfg.get_setting('aliases_processed')
get_changed_aliases = ("SELECT alias_email FROM contributors_aliases WHERE "
"cntrb_last_modified >= %s")
cfg.cursor_people.execute(get_changed_aliases, (aliases_processed, ))
changed_aliases = list(cfg.cursor_people)
# Process any aliases which changed since we last checked
for changed_alias in changed_aliases:
cfg.log_activity('Debug','Resetting affiliation for %s' %
changed_alias[0])
set_author_to_null = ("UPDATE commits SET cmt_author_affiliation = NULL "
"WHERE cmt_author_raw_email LIKE CONCAT('%%',%s)")
cfg.cursor.execute(set_author_to_null,(changed_alias[0], ))
cfg.db.commit()
set_committer_to_null = ("UPDATE commits SET cmt_committer_affiliation = NULL "
"WHERE cmt_committer_raw_email LIKE CONCAT('%%',%s)")
cfg.cursor.execute(set_committer_to_null, (changed_alias[0], ))
cfg.db.commit()
reset_author = ("UPDATE commits "
"SET cmt_author_email = %s "
"WHERE cmt_author_raw_email = %s")
cfg.cursor.execute(reset_author, (discover_alias(changed_alias[0]),changed_alias[0]))
cfg.db.commit
reset_committer = ("UPDATE commits "
"SET cmt_committer_email = %s "
"WHERE cmt_committer_raw_email = %s")
cfg.cursor.execute(reset_committer, (discover_alias(changed_alias[0]),changed_alias[0]))
cfg.db.commit
# Update the last fetched date, so we know where to start next time.
update_aliases_date = ("UPDATE settings SET value=%s "
"WHERE setting = 'aliases_processed'")
cfg.cursor.execute(update_aliases_date, (aliases_fetched, ))
cfg.db.commit()
# Now rebuild the affiliation data
working_author = cfg.get_setting('working_author')
if working_author != 'done':
cfg.log_activity('Error','Trimming author data in affiliations: %s' %
working_author)
trim_author(cfg, working_author)
# Figure out which projects have NULL affiliations so they can be recached
set_recache = ("""UPDATE repo_groups
SET rg_recache=1
FROM repo_groups x, repo y, commits z
where x.repo_group_id = y.repo_group_id
and
y."repo_id" = z.repo_id
and
(z.cmt_author_affiliation IS NULL OR
z.cmt_committer_affiliation IS NULL)""")
# ("UPDATE repo_groups p "
# "JOIN repo r ON p.repo_group_id = r.repo_group_id "
# "JOIN commits a ON r.repo_id = a.repo_id "
# "SET rg_recache=TRUE WHERE "
# "author_affiliation IS NULL OR "
# "committer_affiliation IS NULL")
cfg.cursor.execute(set_recache)
cfg.db.commit()
# Find any authors with NULL affiliations and fill them
find_null_authors = ("SELECT DISTINCT cmt_author_email AS email, "
"MIN(cmt_author_date) AS earliest "
"FROM commits "
"WHERE cmt_author_affiliation IS NULL "
"GROUP BY cmt_author_email")
cfg.cursor.execute(find_null_authors)
null_authors = list(cfg.cursor)
cfg.log_activity('Debug','Found %s authors with NULL affiliation' %
len(null_authors))
for null_author in null_authors:
email = null_author[0]
store_working_author(cfg, email)
discover_null_affiliations('author',email)
store_working_author(cfg, 'done')
# Find any committers with NULL affiliations and fill them
find_null_committers = ("SELECT DISTINCT cmt_committer_email AS email, "
"MIN(cmt_committer_date) AS earliest "
"FROM commits "
"WHERE cmt_committer_affiliation IS NULL "
"GROUP BY cmt_committer_email")
cfg.cursor.execute(find_null_committers)
null_committers = list(cfg.cursor)
cfg.log_activity('Debug','Found %s committers with NULL affiliation' %
len(null_committers))
for null_committer in null_committers:
email = null_committer[0]
store_working_author(cfg, email)
discover_null_affiliations('committer',email)
# Now that we've matched as much as possible, fill the rest as (Unknown)
fill_unknown_author = ("UPDATE commits "
"SET cmt_author_affiliation = '(Unknown)' "
"WHERE cmt_author_affiliation IS NULL")
cfg.cursor.execute(fill_unknown_author)
cfg.db.commit()
fill_unknown_committer = ("UPDATE commits "
"SET cmt_committer_affiliation = '(Unknown)' "
"WHERE cmt_committer_affiliation IS NULL")
cfg.cursor.execute(fill_unknown_committer)
cfg.db.commit()
store_working_author(cfg, 'done')
cfg.log_activity('Info','Filling empty affiliations (complete)')
def invalidate_caches(cfg):
# Invalidate all caches
cfg.update_status('Invalidating caches')
cfg.log_activity('Info','Invalidating caches')
invalidate_cache = "UPDATE repo_groups SET rg_recache = 1"
cfg.cursor.execute(invalidate_cache)
cfg.db.commit()
cfg.log_activity('Info','Invalidating caches (complete)')
def rebuild_unknown_affiliation_and_web_caches(cfg):
# When there's a lot of analysis data, calculating display data on the fly gets
# pretty expensive. Instead, we crunch the data based upon the user's preferred
# statistics (author or committer) and store them. We also store all records
# with an (Unknown) affiliation for display to the user.
cfg.update_status('Caching data for display')
cfg.log_activity('Info','Caching unknown affiliations and web data for display')
report_date = cfg.get_setting('report_date')
report_attribution = cfg.get_setting('report_attribution')
# Clear stale caches
clear_dm_repo_group_weekly = ("""
DELETE
FROM
dm_repo_group_weekly C USING repo_groups P
WHERE
P.repo_group_id = C.repo_group_id
AND P.rg_recache = 1
""")
# ("DELETE c.* FROM dm_repo_group_weekly c "
# "JOIN repo_groups p ON c.repo_group_id = p.repo_group_id WHERE "
# "p.rg_recache=TRUE")
cfg.cursor.execute(clear_dm_repo_group_weekly)
cfg.db.commit()
clear_dm_repo_group_monthly = ("""
DELETE
FROM
dm_repo_group_monthly C USING repo_groups P
WHERE
P.repo_group_id = C.repo_group_id
AND P.rg_recache = 1
""")
# ("DELETE c.* FROM dm_repo_group_monthly c "
# "JOIN repo_groups p ON c.repo_group_id = p.repo_group_id WHERE "
# "p.rg_recache=TRUE")
cfg.cursor.execute(clear_dm_repo_group_monthly)
cfg.db.commit()
clear_dm_repo_group_annual = ("""
DELETE
FROM
dm_repo_group_annual C USING repo_groups P
WHERE
P.repo_group_id = C.repo_group_id
AND P.rg_recache = 1
""")
# ("DELETE c.* FROM dm_repo_group_annual c "
# "JOIN repo_groups p ON c.repo_group_id = p.repo_group_id WHERE "
# "p.rg_recache=TRUE")
cfg.cursor.execute(clear_dm_repo_group_annual)
cfg.db.commit()
clear_dm_repo_weekly = ("""
DELETE
FROM
dm_repo_weekly C USING repo r,
repo_groups P
WHERE
C.repo_id = r.repo_id
AND P.repo_group_id = r.repo_group_id
AND P.rg_recache = 1
""")
# ("DELETE c.* FROM dm_repo_weekly c "
# "JOIN repo r ON c.repo_id = r.repo_id "
# "JOIN repo_groups p ON r.repo_group_id = p.repo_group_id WHERE "
# "p.rg_recache=TRUE")
cfg.cursor.execute(clear_dm_repo_weekly)
cfg.db.commit()
clear_dm_repo_monthly = ("""
DELETE
FROM
dm_repo_monthly C USING repo r,
repo_groups P
WHERE
C.repo_id = r.repo_id
AND P.repo_group_id = r.repo_group_id
AND P.rg_recache = 1
""")
# ("DELETE c.* FROM dm_repo_monthly c "
# "JOIN repo r ON c.repo_id = r.repo_id "
# "JOIN repo_groups p ON r.repo_group_id = p.repo_group_id WHERE "
# "p.rg_recache=TRUE")
cfg.cursor.execute(clear_dm_repo_monthly)
cfg.db.commit()
clear_dm_repo_annual = ("""
DELETE
FROM
dm_repo_annual C USING repo r,
repo_groups P
WHERE
C.repo_id = r.repo_id
AND P.repo_group_id = r.repo_group_id
AND P.rg_recache = 1
""")
# ("DELETE c.* FROM dm_repo_annual c "
# "JOIN repo r ON c.repo_id = r.repo_id "
# "JOIN repo_groups p ON r.repo_group_id = p.repo_group_id WHERE "
# "p.rg_recache=TRUE")
cfg.cursor.execute(clear_dm_repo_annual)
cfg.db.commit()
clear_unknown_cache = ("""
DELETE
FROM
unknown_cache C USING repo_groups P
WHERE
P.repo_group_id = C.repo_group_id
AND P.rg_recache = 1
""")
# ("DELETE c.* FROM unknown_cache c "
# "JOIN repo_groups p ON c.repo_group_id = p.repo_group_id WHERE "
# "p.rg_recache=TRUE")
cfg.cursor.execute(clear_unknown_cache)
cfg.db.commit()
cfg.log_activity('Verbose','Caching unknown authors and committers')
# Cache the unknown authors
unknown_authors = ("""
INSERT INTO unknown_cache
SELECT 'author',
r.repo_group_id,
a.cmt_author_email,
SPLIT_PART(a.cmt_author_email,'@',2),
SUM(a.cmt_added),
info.a AS tool_source, info.b AS tool_version, info.c AS data_source
FROM (VALUES(%s,%s,%s)) info(a,b,c),
commits a
JOIN repo r ON r.repo_id = a.repo_id
JOIN repo_groups p ON p.repo_group_id = r.repo_group_id
WHERE a.cmt_author_affiliation = '(Unknown)'
AND p.rg_recache = 1
GROUP BY r.repo_group_id,a.cmt_author_email, info.a, info.b, info.c
""")
cfg.cursor.execute(unknown_authors, (cfg.tool_source, cfg.tool_version, cfg.data_source))
cfg.db.commit()
# Cache the unknown committers
unknown_committers = ("""INSERT INTO unknown_cache
SELECT 'committer',
r.repo_group_id,
a.cmt_committer_email,
SPLIT_PART(a.cmt_committer_email,'@',2),
SUM(a.cmt_added),
info.a AS tool_source, info.b AS tool_version, info.c AS data_source
FROM (VALUES(%s,%s,%s)) info(a,b,c),
commits a
JOIN repo r ON r.repo_id = a.repo_id
JOIN repo_groups p ON p.repo_group_id = r.repo_group_id
WHERE a.cmt_committer_affiliation = '(Unknown)'
AND p.rg_recache = 1
GROUP BY r.repo_group_id,a.cmt_committer_email, info.a, info.b, info.c """)
cfg.cursor.execute(unknown_committers, (cfg.tool_source, cfg.tool_version, cfg.data_source))
cfg.db.commit()
# Start caching by project
cfg.log_activity('Verbose','Caching projects')
cache_projects_by_week = ("""INSERT INTO dm_repo_group_weekly
SELECT r.repo_group_id AS repo_group_id,
a.cmt_%s_email AS email,
a.cmt_%s_affiliation AS affiliation,
date_part('week', TO_TIMESTAMP(a.cmt_%s_date, 'YYYY-MM-DD')) AS week,
date_part('year', TO_TIMESTAMP(a.cmt_%s_date, 'YYYY-MM-DD')) AS year,
SUM(a.cmt_added) AS added,
SUM(a.cmt_removed) AS removed,
SUM(a.cmt_whitespace) AS whitespace,
COUNT(DISTINCT a.cmt_filename) AS files,
COUNT(DISTINCT a.cmt_commit_hash) AS patches,
info.a AS tool_source, info.b AS tool_version, info.c AS data_source
FROM (VALUES(%s,%s,%s)) info(a,b,c),
commits a
JOIN repo r ON r.repo_id = a.repo_id
JOIN repo_groups p ON p.repo_group_id = r.repo_group_id
LEFT JOIN exclude e ON
(a.cmt_author_email = e.email
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
OR (a.cmt_author_email LIKE CONCAT('%%',e.domain)
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
WHERE e.email IS NULL
AND e.domain IS NULL
AND p.rg_recache = 1
GROUP BY week,
year,
affiliation,
a.cmt_%s_email,
r.repo_group_id, info.a, info.b, info.c"""
% (report_attribution,report_attribution,
report_date,report_date,
cfg.tool_source, cfg.tool_version, cfg.data_source,
report_attribution))
cfg.cursor.execute(cache_projects_by_week)
cfg.db.commit()
cache_projects_by_month = ("""INSERT INTO dm_repo_group_monthly
SELECT r.repo_group_id AS repo_group_id,
a.cmt_%s_email AS email,
a.cmt_%s_affiliation AS affiliation,
date_part('month', TO_TIMESTAMP(a.cmt_%s_date, 'YYYY-MM-DD')) AS month,
date_part('year', TO_TIMESTAMP(a.cmt_%s_date, 'YYYY-MM-DD')) AS year,
SUM(a.cmt_added) AS added,
SUM(a.cmt_removed) AS removed,
SUM(a.cmt_whitespace) AS whitespace,
COUNT(DISTINCT a.cmt_filename) AS files,
COUNT(DISTINCT a.cmt_commit_hash) AS patches,
info.a AS tool_source, info.b AS tool_version, info.c AS data_source
FROM (VALUES(%s,%s,%s)) info(a,b,c),
commits a
JOIN repo r ON r.repo_id = a.repo_id
JOIN repo_groups p ON p.repo_group_id = r.repo_group_id
LEFT JOIN exclude e ON
(a.cmt_author_email = e.email
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
OR (a.cmt_author_email LIKE CONCAT('%%',e.domain)
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
WHERE e.email IS NULL
AND e.domain IS NULL
AND p.rg_recache = 1
GROUP BY month,
year,
affiliation,
a.cmt_%s_email,
r.repo_group_id, info.a, info.b, info.c"""
% (report_attribution,report_attribution,
report_date,report_date,
cfg.tool_source, cfg.tool_version, cfg.data_source,
report_attribution))
cfg.cursor.execute(cache_projects_by_month)
cfg.db.commit()
cache_projects_by_year = ("""INSERT INTO dm_repo_group_annual
SELECT r.repo_group_id AS repo_group_id,
a.cmt_%s_email AS email,
a.cmt_%s_affiliation AS affiliation,
date_part('year', TO_TIMESTAMP(a.cmt_%s_date, 'YYYY-MM-DD')) AS year,
SUM(a.cmt_added) AS added,
SUM(a.cmt_removed) AS removed,
SUM(a.cmt_whitespace) AS whitespace,
COUNT(DISTINCT a.cmt_filename) AS files,
COUNT(DISTINCT a.cmt_commit_hash) AS patches,
info.a AS tool_source, info.b AS tool_version, info.c AS data_source
FROM (VALUES(%s,%s,%s)) info(a,b,c),
commits a
JOIN repo r ON r.repo_id = a.repo_id
JOIN repo_groups p ON p.repo_group_id = r.repo_group_id
LEFT JOIN exclude e ON
(a.cmt_author_email = e.email
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
OR (a.cmt_author_email LIKE CONCAT('%%',e.domain)
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
WHERE e.email IS NULL
AND e.domain IS NULL
AND p.rg_recache = 1
GROUP BY year,
affiliation,
a.cmt_%s_email,
r.repo_group_id, info.a, info.b, info.c"""
% (report_attribution,report_attribution,
report_date,
cfg.tool_source, cfg.tool_version, cfg.data_source, report_attribution))
cfg.cursor.execute(cache_projects_by_year)
cfg.db.commit()
# Start caching by repo
cfg.log_activity('Verbose','Caching repos')
cache_repos_by_week = ("""INSERT INTO dm_repo_weekly
SELECT a.repo_id AS repo_id,
a.cmt_%s_email AS email,
a.cmt_%s_affiliation AS affiliation,
date_part('week', TO_TIMESTAMP(a.cmt_%s_date, 'YYYY-MM-DD')) AS week,
date_part('year', TO_TIMESTAMP(a.cmt_%s_date, 'YYYY-MM-DD')) AS year,
SUM(a.cmt_added) AS added,
SUM(a.cmt_removed) AS removed,
SUM(a.cmt_whitespace) AS whitespace,
COUNT(DISTINCT a.cmt_filename) AS files,
COUNT(DISTINCT a.cmt_commit_hash) AS patches,
info.a AS tool_source, info.b AS tool_version, info.c AS data_source
FROM (VALUES(%s,%s,%s)) info(a,b,c),
commits a
JOIN repo r ON r.repo_id = a.repo_id
JOIN repo_groups p ON p.repo_group_id = r.repo_group_id
LEFT JOIN exclude e ON
(a.cmt_author_email = e.email
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
OR (a.cmt_author_email LIKE CONCAT('%%',e.domain)
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
WHERE e.email IS NULL
AND e.domain IS NULL
AND p.rg_recache = 1
GROUP BY week,
year,
affiliation,
a.cmt_%s_email,
a.repo_id, info.a, info.b, info.c"""
% (report_attribution,report_attribution,
report_date,report_date,
cfg.tool_source, cfg.tool_version, cfg.data_source,
report_attribution))
cfg.cursor.execute(cache_repos_by_week)
cfg.db.commit()
cache_repos_by_month = ("""INSERT INTO dm_repo_monthly
SELECT a.repo_id AS repo_id,
a.cmt_%s_email AS email,
a.cmt_%s_affiliation AS affiliation,
date_part('month', TO_TIMESTAMP(a.cmt_%s_date, 'YYYY-MM-DD')) AS month,
date_part('year', TO_TIMESTAMP(a.cmt_%s_date, 'YYYY-MM-DD')) AS year,
SUM(a.cmt_added) AS added,
SUM(a.cmt_removed) AS removed,
SUM(a.cmt_whitespace) AS whitespace,
COUNT(DISTINCT a.cmt_filename) AS files,
COUNT(DISTINCT a.cmt_commit_hash) AS patches,
info.a AS tool_source, info.b AS tool_version, info.c AS data_source
FROM (VALUES(%s,%s,%s)) info(a,b,c),
commits a
JOIN repo r ON r.repo_id = a.repo_id
JOIN repo_groups p ON p.repo_group_id = r.repo_group_id
LEFT JOIN exclude e ON
(a.cmt_author_email = e.email
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
OR (a.cmt_author_email LIKE CONCAT('%%',e.domain)
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
WHERE e.email IS NULL
AND e.domain IS NULL
AND p.rg_recache = 1
GROUP BY month,
year,
affiliation,
a.cmt_%s_email,
a.repo_id, info.a, info.b, info.c"""
% (report_attribution,report_attribution,
report_date,report_date,
cfg.tool_source, cfg.tool_version, cfg.data_source,
report_attribution))
cfg.cursor.execute(cache_repos_by_month)
cfg.db.commit()
cache_repos_by_year = ("""INSERT INTO dm_repo_annual
SELECT a.repo_id AS repo_id,
a.cmt_%s_email AS email,
a.cmt_%s_affiliation AS affiliation,
date_part('year', TO_TIMESTAMP(a.cmt_%s_date, 'YYYY-MM-DD')) AS year,
SUM(a.cmt_added) AS added,
SUM(a.cmt_removed) AS removed,
SUM(a.cmt_whitespace) AS whitespace,
COUNT(DISTINCT a.cmt_filename) AS files,
COUNT(DISTINCT a.cmt_commit_hash) AS patches,
info.a AS tool_source, info.b AS tool_version, info.c AS data_source
FROM (VALUES(%s,%s,%s)) info(a,b,c),
commits a
JOIN repo r ON r.repo_id = a.repo_id
JOIN repo_groups p ON p.repo_group_id = r.repo_group_id
LEFT JOIN exclude e ON
(a.cmt_author_email = e.email
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
OR (a.cmt_author_email LIKE CONCAT('%%',e.domain)
AND (e.projects_id = r.repo_group_id
OR e.projects_id = 0))
WHERE e.email IS NULL
AND e.domain IS NULL
AND p.rg_recache = 1
GROUP BY year,
affiliation,
a.cmt_%s_email,
a.repo_id, info.a, info.b, info.c"""
% (report_attribution,report_attribution,
report_date,
cfg.tool_source, cfg.tool_version, cfg.data_source,
report_attribution))
cfg.cursor.execute(cache_repos_by_year)
cfg.db.commit()
# Reset cache flags
reset_recache = "UPDATE repo_groups SET rg_recache = 0"
cfg.cursor.execute(reset_recache)
cfg.db.commit()
cfg.log_activity('Info','Caching unknown affiliations and web data for display (complete)')
|
OSSHealth/ghdata
|
workers/facade_worker/facade_worker/facade07rebuildcache.py
|
Python
|
mit
| 30,270
|
[
"Brian"
] |
ccaaeaf285d7112cfb978abf1e68dffbf124acac10c46ac713730b7fc58c7708
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import os
import re
from glob import glob
from itertools import chain
from typing import Iterable, List, Optional, Set
from docs.exts.docs_build.errors import DocBuildError # pylint: disable=no-name-in-module
ROOT_PROJECT_DIR = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir)
)
ROOT_PACKAGE_DIR = os.path.join(ROOT_PROJECT_DIR, "airflow")
DOCS_DIR = os.path.join(ROOT_PROJECT_DIR, "docs")
def find_existing_guide_operator_names(src_dir: str) -> Set[str]:
"""
Find names of existing operators.
:return names of existing operators.
"""
operator_names = set()
paths = glob(f"{src_dir}/**/*.rst", recursive=True)
for path in paths:
with open(path) as f:
operator_names |= set(re.findall(".. _howto/operator:(.+?):", f.read()))
return operator_names
def extract_ast_class_def_by_name(ast_tree, class_name):
"""
Extracts class definition by name
:param ast_tree: AST tree
:param class_name: name of the class.
:return: class node found
"""
class ClassVisitor(ast.NodeVisitor):
"""Visitor."""
def __init__(self):
self.found_class_node = None
def visit_ClassDef(self, node): # pylint: disable=invalid-name
"""
Visit class definition.
:param node: node.
:return:
"""
if node.name == class_name:
self.found_class_node = node
visitor = ClassVisitor()
visitor.visit(ast_tree)
return visitor.found_class_node
def check_guide_links_in_operator_descriptions() -> List[DocBuildError]:
"""Check if there are links to guides in operator's descriptions."""
# TODO: We should also check the guides in the provider documentations.
# For now, we are only checking the core documentation.
# This is easiest to do after the content has been fully migrated.
build_errors = []
def generate_build_error(path, line_no, operator_name):
return DocBuildError(
package_name=None,
file_path=path,
line_no=line_no,
message=(
f"Link to the guide is missing in operator's description: {operator_name}.\n"
f"Please add link to the guide to the description in the following form:\n"
f"\n"
f".. seealso::\n"
f" For more information on how to use this operator, take a look at the guide:\n"
f" :ref:`apache-airflow:howto/operator:{operator_name}`\n"
),
)
# Extract operators for which there are existing .rst guides
operator_names = find_existing_guide_operator_names(f"{DOCS_DIR}/howto/operator")
# Extract all potential python modules that can contain operators
python_module_paths = chain(
glob(f"{ROOT_PACKAGE_DIR}/operators/*.py"),
glob(f"{ROOT_PACKAGE_DIR}/sensors/*.py"),
glob(f"{ROOT_PACKAGE_DIR}/providers/**/operators/*.py", recursive=True),
glob(f"{ROOT_PACKAGE_DIR}/providers/**/sensors/*.py", recursive=True),
glob(f"{ROOT_PACKAGE_DIR}/providers/**/transfers/*.py", recursive=True),
)
for py_module_path in python_module_paths:
with open(py_module_path) as f:
py_content = f.read()
if "This module is deprecated" in py_content:
continue
for existing_operator in operator_names:
if f"class {existing_operator}" not in py_content:
continue
# This is a potential file with necessary class definition.
# To make sure it's a real Python class definition, we build AST tree
ast_tree = ast.parse(py_content)
class_def = extract_ast_class_def_by_name(ast_tree, existing_operator)
if class_def is None:
continue
docstring = ast.get_docstring(class_def)
if "This class is deprecated." in docstring:
continue
if f":ref:`apache-airflow:howto/operator:{existing_operator}`" in ast.get_docstring(
class_def
) or f":ref:`howto/operator:{existing_operator}`" in ast.get_docstring(class_def):
continue
build_errors.append(generate_build_error(py_module_path, class_def.lineno, existing_operator))
return build_errors
def assert_file_not_contains(file_path: str, pattern: str, message: str) -> Optional[DocBuildError]:
"""
Asserts that file does not contain the pattern. Return message error if it does.
:param file_path: file
:param pattern: pattern
:param message: message to return
"""
with open(file_path, "rb", 0) as doc_file:
pattern_compiled = re.compile(pattern)
for num, line in enumerate(doc_file, 1):
line_decode = line.decode()
if re.search(pattern_compiled, line_decode):
return DocBuildError(file_path=file_path, line_no=num, message=message)
return None
def filter_file_list_by_pattern(file_paths: Iterable[str], pattern: str) -> List[str]:
"""
Filters file list to those tha content matches the pattern
:param file_paths: file paths to check
:param pattern: pattern to match
:return: list of files matching the pattern
"""
output_paths = []
pattern_compiled = re.compile(pattern)
for file_path in file_paths:
with open(file_path, "rb", 0) as text_file:
text_file_content = text_file.read().decode()
if re.findall(pattern_compiled, text_file_content):
output_paths.append(file_path)
return output_paths
def find_modules(deprecated_only: bool = False) -> Set[str]:
"""
Finds all modules.
:param deprecated_only: whether only deprecated modules should be found.
:return: set of all modules found
"""
file_paths = glob(f"{ROOT_PACKAGE_DIR}/**/*.py", recursive=True)
# Exclude __init__.py
file_paths = [f for f in file_paths if not f.endswith("__init__.py")]
if deprecated_only:
file_paths = filter_file_list_by_pattern(file_paths, r"This module is deprecated.")
# Make path relative
file_paths = [os.path.relpath(f, ROOT_PROJECT_DIR) for f in file_paths]
# Convert filename to module
modules_names = {file_path.rpartition(".")[0].replace("/", ".") for file_path in file_paths}
return modules_names
def check_exampleinclude_for_example_dags() -> List[DocBuildError]:
"""Checks all exampleincludes for example dags."""
all_docs_files = glob(f"${DOCS_DIR}/**/*rst", recursive=True)
build_errors = []
for doc_file in all_docs_files:
build_error = assert_file_not_contains(
file_path=doc_file,
pattern=r"literalinclude::.+example_dags",
message=(
"literalinclude directive is prohibited for example DAGs. \n"
"You should use the exampleinclude directive to include example DAGs."
),
)
if build_error:
build_errors.append(build_error)
return build_errors
def check_enforce_code_block() -> List[DocBuildError]:
"""Checks all code:: blocks."""
all_docs_files = glob(f"{DOCS_DIR}/**/*rst", recursive=True)
build_errors = []
for doc_file in all_docs_files:
build_error = assert_file_not_contains(
file_path=doc_file,
pattern=r"^.. code::",
message=(
"We recommend using the code-block directive instead of the code directive. "
"The code-block directive is more feature-full."
),
)
if build_error:
build_errors.append(build_error)
return build_errors
|
mrkm4ntr/incubator-airflow
|
docs/exts/docs_build/lint_checks.py
|
Python
|
apache-2.0
| 8,575
|
[
"VisIt"
] |
8b0919461d57d0a9d24f9ac39ae7e6a25a7d37a8fb5f1d643435c78fba4d9a5b
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Generate DFT grids and weights, based on the code provided by Gerald Knizia <>
Reference for Lebedev-Laikov grid:
V. I. Lebedev, and D. N. Laikov "A quadrature formula for the sphere of the
131st algebraic order of accuracy", Doklady Mathematics, 59, 477-481 (1999)
'''
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import gto
from pyscf.dft import radi
from pyscf import __config__
libdft = lib.load_library('libdft')
BLKSIZE = 128 # needs to be the same to lib/gto/grid_ao_drv.c
# ~= (L+1)**2/3
LEBEDEV_ORDER = {
0: 1,
3: 6,
5: 14,
7: 26,
9: 38,
11: 50,
13: 74,
15: 86,
17: 110,
19: 146,
21: 170,
23: 194,
25: 230,
27: 266,
29: 302,
31: 350,
35: 434,
41: 590,
47: 770,
53: 974,
59: 1202,
65: 1454,
71: 1730,
77: 2030,
83: 2354,
89: 2702,
95: 3074,
101: 3470,
107: 3890,
113: 4334,
119: 4802,
125: 5294,
131: 5810
}
LEBEDEV_NGRID = numpy.asarray((
1 , 6 , 14 , 26 , 38 , 50 , 74 , 86 , 110 , 146 ,
170 , 194 , 230 , 266 , 302 , 350 , 434 , 590 , 770 , 974 ,
1202, 1454, 1730, 2030, 2354, 2702, 3074, 3470, 3890, 4334,
4802, 5294, 5810))
# SG0
# S. Chien and P. Gill, J. Comput. Chem. 27 (2006) 730-739.
def sg1_prune(nuc, rads, n_ang, radii=radi.SG1RADII):
'''SG1, CPL, 209, 506
Args:
nuc : int
Nuclear charge.
rads : 1D array
Grid coordinates on radical axis.
n_ang : int
Max number of grids over angular part.
Kwargs:
radii : 1D array
radii (in Bohr) for atoms in periodic table
Returns:
A list has the same length as rads. The list element is the number of
grids over angular part for each radial grid.
'''
# In SG1 the ang grids for the five regions
# 6 38 86 194 86
leb_ngrid = numpy.array([6, 38, 86, 194, 86])
alphas = numpy.array((
(0.25 , 0.5, 1.0, 4.5),
(0.1667, 0.5, 0.9, 3.5),
(0.1 , 0.4, 0.8, 2.5)))
r_atom = radii[nuc] + 1e-200
if nuc <= 2: # H, He
place = ((rads/r_atom).reshape(-1,1) > alphas[0]).sum(axis=1)
elif nuc <= 10: # Li - Ne
place = ((rads/r_atom).reshape(-1,1) > alphas[1]).sum(axis=1)
else:
place = ((rads/r_atom).reshape(-1,1) > alphas[2]).sum(axis=1)
return leb_ngrid[place]
def nwchem_prune(nuc, rads, n_ang, radii=radi.BRAGG_RADII):
'''NWChem
Args:
nuc : int
Nuclear charge.
rads : 1D array
Grid coordinates on radical axis.
n_ang : int
Max number of grids over angular part.
Kwargs:
radii : 1D array
radii (in Bohr) for atoms in periodic table
Returns:
A list has the same length as rads. The list element is the number of
grids over angular part for each radial grid.
'''
alphas = numpy.array((
(0.25 , 0.5, 1.0, 4.5),
(0.1667, 0.5, 0.9, 3.5),
(0.1 , 0.4, 0.8, 2.5)))
leb_ngrid = LEBEDEV_NGRID[4:] # [38, 50, 74, 86, ...]
if n_ang < 50:
return numpy.repeat(n_ang, len(rads))
elif n_ang == 50:
leb_l = numpy.array([1, 2, 2, 2, 1])
else:
idx = numpy.where(leb_ngrid==n_ang)[0][0]
leb_l = numpy.array([1, 3, idx-1, idx, idx-1])
r_atom = radii[nuc] + 1e-200
if nuc <= 2: # H, He
place = ((rads/r_atom).reshape(-1,1) > alphas[0]).sum(axis=1)
elif nuc <= 10: # Li - Ne
place = ((rads/r_atom).reshape(-1,1) > alphas[1]).sum(axis=1)
else:
place = ((rads/r_atom).reshape(-1,1) > alphas[2]).sum(axis=1)
angs = leb_l[place]
angs = leb_ngrid[angs]
return angs
# Prune scheme JCP 102, 346
def treutler_prune(nuc, rads, n_ang, radii=None):
'''Treutler-Ahlrichs
Args:
nuc : int
Nuclear charge.
rads : 1D array
Grid coordinates on radical axis.
n_ang : int
Max number of grids over angular part.
Returns:
A list has the same length as rads. The list element is the number of
grids over angular part for each radial grid.
'''
nr = len(rads)
leb_ngrid = numpy.empty(nr, dtype=int)
leb_ngrid[:nr//3] = 14 # l=5
leb_ngrid[nr//3:nr//2] = 50 # l=11
leb_ngrid[nr//2:] = n_ang
return leb_ngrid
###########################################################
# Becke partitioning
# Stratmann, Scuseria, Frisch. CPL, 257, 213 (1996), eq.11
def stratmann(g):
'''Stratmann, Scuseria, Frisch. CPL, 257, 213 (1996)'''
a = .64 # for eq. 14
g = numpy.asarray(g)
ma = g/a
ma2 = ma * ma
g1 = numpy.asarray((1/16.)*(ma*(35 + ma2*(-35 + ma2*(21 - 5 *ma2)))))
g1[g<=-a] = -1
g1[g>= a] = 1
return g1
def original_becke(g):
'''Becke, JCP, 88, 2547 (1988)'''
# This funciton has been optimized in the C code VXCgen_grid
# g = (3 - g**2) * g * .5
# g = (3 - g**2) * g * .5
# g = (3 - g**2) * g * .5
# return g
pass
def gen_atomic_grids(mol, atom_grid={}, radi_method=radi.gauss_chebyshev,
level=3, prune=nwchem_prune, **kwargs):
'''Generate number of radial grids and angular grids for the given molecule.
Returns:
A dict, with the atom symbol for the dict key. For each atom type,
the dict value has two items: one is the meshgrid coordinates wrt the
atom center; the second is the volume of that grid.
'''
if isinstance(atom_grid, (list, tuple)):
atom_grid = dict([(mol.atom_symbol(ia), atom_grid)
for ia in range(mol.natm)])
atom_grids_tab = {}
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if symb not in atom_grids_tab:
chg = gto.charge(symb)
if symb in atom_grid:
n_rad, n_ang = atom_grid[symb]
if n_ang not in LEBEDEV_NGRID:
if n_ang in LEBEDEV_ORDER:
logger.warn(mol, 'n_ang %d for atom %d %s is not '
'the supported Lebedev angular grids. '
'Set n_ang to %d', n_ang, ia, symb,
LEBEDEV_ORDER[n_ang])
n_ang = LEBEDEV_ORDER[n_ang]
else:
raise ValueError('Unsupported angular grids %d' % n_ang)
else:
n_rad = _default_rad(chg, level)
n_ang = _default_ang(chg, level)
rad, dr = radi_method(n_rad, chg, ia, **kwargs)
rad_weight = 4*numpy.pi * rad**2 * dr
if callable(prune):
angs = prune(chg, rad, n_ang)
else:
angs = [n_ang] * n_rad
logger.debug(mol, 'atom %s rad-grids = %d, ang-grids = %s',
symb, n_rad, angs)
angs = numpy.array(angs)
coords = []
vol = []
for n in sorted(set(angs)):
grid = numpy.empty((n,4))
libdft.MakeAngularGrid(grid.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(n))
idx = numpy.where(angs==n)[0]
for i0, i1 in prange(0, len(idx), 12): # 12 radi-grids as a group
coords.append(numpy.einsum('i,jk->jik',rad[idx[i0:i1]],
grid[:,:3]).reshape(-1,3))
vol.append(numpy.einsum('i,j->ji', rad_weight[idx[i0:i1]],
grid[:,3]).ravel())
atom_grids_tab[symb] = (numpy.vstack(coords), numpy.hstack(vol))
return atom_grids_tab
def gen_partition(mol, atom_grids_tab,
radii_adjust=None, atomic_radii=radi.BRAGG_RADII,
becke_scheme=original_becke):
'''Generate the mesh grid coordinates and weights for DFT numerical integration.
We can change radii_adjust, becke_scheme functions to generate different meshgrid.
Returns:
grid_coord and grid_weight arrays. grid_coord array has shape (N,3);
weight 1D array has N elements.
'''
if callable(radii_adjust) and atomic_radii is not None:
f_radii_adjust = radii_adjust(mol, atomic_radii)
else:
f_radii_adjust = None
atm_coords = numpy.asarray(mol.atom_coords() , order='C')
atm_dist = gto.inter_distance(mol)
if (becke_scheme is original_becke and
(radii_adjust is radi.treutler_atomic_radii_adjust or
radii_adjust is radi.becke_atomic_radii_adjust or
f_radii_adjust is None)):
if f_radii_adjust is None:
p_radii_table = lib.c_null_ptr()
else:
f_radii_table = numpy.asarray([f_radii_adjust(i, j, 0)
for i in range(mol.natm)
for j in range(mol.natm)])
p_radii_table = f_radii_table.ctypes.data_as(ctypes.c_void_p)
def gen_grid_partition(coords):
coords = numpy.asarray(coords, order='F')
ngrids = coords.shape[0]
pbecke = numpy.empty((mol.natm,ngrids))
libdft.VXCgen_grid(pbecke.ctypes.data_as(ctypes.c_void_p),
coords.ctypes.data_as(ctypes.c_void_p),
atm_coords.ctypes.data_as(ctypes.c_void_p),
p_radii_table,
ctypes.c_int(mol.natm), ctypes.c_int(ngrids))
return pbecke
else:
def gen_grid_partition(coords):
ngrids = coords.shape[0]
grid_dist = numpy.empty((mol.natm,ngrids))
for ia in range(mol.natm):
dc = coords - atm_coords[ia]
grid_dist[ia] = numpy.sqrt(numpy.einsum('ij,ij->i',dc,dc))
pbecke = numpy.ones((mol.natm,ngrids))
for i in range(mol.natm):
for j in range(i):
g = 1/atm_dist[i,j] * (grid_dist[i]-grid_dist[j])
if f_radii_adjust is not None:
g = f_radii_adjust(i, j, g)
g = becke_scheme(g)
pbecke[i] *= .5 * (1-g)
pbecke[j] *= .5 * (1+g)
return pbecke
coords_all = []
weights_all = []
for ia in range(mol.natm):
coords, vol = atom_grids_tab[mol.atom_symbol(ia)]
coords = coords + atm_coords[ia]
pbecke = gen_grid_partition(coords)
weights = vol * pbecke[ia] * (1./pbecke.sum(axis=0))
coords_all.append(coords)
weights_all.append(weights)
return numpy.vstack(coords_all), numpy.hstack(weights_all)
def make_mask(mol, coords, relativity=0, shls_slice=None, verbose=None):
'''Mask to indicate whether a shell is zero on grid
Args:
mol : an instance of :class:`Mole`
coords : 2D array, shape (N,3)
The coordinates of grids.
Kwargs:
relativity : bool
No effects.
shls_slice : 2-element list
(shl_start, shl_end).
If given, only part of AOs (shl_start <= shell_id < shl_end) are
evaluated. By default, all shells defined in mol will be evaluated.
verbose : int or object of :class:`Logger`
No effects.
Returns:
2D mask array of shape (N,nbas), where N is the number of grids, nbas
is the number of shells.
'''
coords = numpy.asarray(coords, order='F')
ngrids = len(coords)
if shls_slice is None:
shls_slice = (0, mol.nbas)
nbas = shls_slice[1] - shls_slice[0]
non0tab = numpy.empty(((ngrids+BLKSIZE-1)//BLKSIZE, nbas),
dtype=numpy.uint8)
libdft.VXCnr_ao_screen(non0tab.ctypes.data_as(ctypes.c_void_p),
coords.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(ngrids),
mol._atm.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(mol.natm),
mol._bas[shls_slice[0]:].ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nbas),
mol._env.ctypes.data_as(ctypes.c_void_p))
return non0tab
class Grids(lib.StreamObject):
'''DFT mesh grids
Attributes for Grids:
level : int
To control the number of radial and angular grids. Large number
leads to large mesh grids. The default level 3 corresponds to
(50,302) for H, He;
(75,302) for second row;
(80~105,434) for rest.
Grids settings at other levels can be found in
pyscf.dft.gen_grid.RAD_GRIDS and pyscf.dft.gen_grid.ANG_ORDER
atomic_radii : 1D array
| radi.BRAGG_RADII (default)
| radi.COVALENT_RADII
| None : to switch off atomic radii adjustment
radii_adjust : function(mol, atomic_radii) => (function(atom_id, atom_id, g) => array_like_g)
Function to adjust atomic radii, can be one of
| radi.treutler_atomic_radii_adjust
| radi.becke_atomic_radii_adjust
| None : to switch off atomic radii adjustment
radi_method : function(n) => (rad_grids, rad_weights)
scheme for radial grids, can be one of
| radi.treutler (default)
| radi.delley
| radi.mura_knowles
| radi.gauss_chebyshev
becke_scheme : function(v) => array_like_v
weight partition function, can be one of
| gen_grid.original_becke (default)
| gen_grid.stratmann
prune : function(nuc, rad_grids, n_ang) => list_n_ang_for_each_rad_grid
scheme to reduce number of grids, can be one of
| gen_grid.nwchem_prune (default)
| gen_grid.sg1_prune
| gen_grid.treutler_prune
| None : to switch off grid pruning
symmetry : bool
whether to symmetrize mesh grids (TODO)
atom_grid : dict
Set (radial, angular) grids for particular atoms.
Eg, grids.atom_grid = {'H': (20,110)} will generate 20 radial
grids and 110 angular grids for H atom.
Examples:
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1.1')
>>> grids = dft.gen_grid.Grids(mol)
>>> grids.level = 4
>>> grids.build()
'''
def __init__(self, mol):
import sys
self.mol = mol
self.stdout = mol.stdout
self.verbose = mol.verbose
self.symmetry = mol.symmetry
self.atom_grid = {}
self.non0tab = None
cur_mod = sys.modules[__name__]
def _load_conf(mod, name, default):
var = getattr(__config__, name, None)
if var is None:
return default
elif callable(var):
return var
elif mod is None:
return cur_mod[var]
else:
return getattr(mod, var)
self.atomic_radii = _load_conf(radi, 'dft_gen_grid_Grids_atomic_radii',
radi.BRAGG_RADII)
#self.atomic_radii = radi.COVALENT_RADII
self.radii_adjust = _load_conf(radi, 'dft_gen_grid_Grids_radii_adjust',
radi.treutler_atomic_radii_adjust)
#self.radii_adjust = radi.becke_atomic_radii_adjust
#self.radii_adjust = None # to switch off atomic radii adjustment
self.radi_method = _load_conf(radi, 'dft_gen_grid_Grids_radi_method',
radi.treutler)
#self.radi_method = radi.gauss_chebyshev
#self.radi_method = radi.mura_knowles
#self.radi_method = radi.delley
self.becke_scheme = _load_conf(None, 'dft_gen_grid_Grids_becke_scheme',
original_becke)
#self.becke_scheme = stratmann
self.prune = _load_conf(None, 'dft_gen_grid_Grids_prune', nwchem_prune)
self.level = getattr(__config__, 'dft_gen_grid_Grids_level', 3)
##################################################
# don't modify the following attributes, they are not input options
self.coords = None
self.weights = None
self._keys = set(self.__dict__.keys())
@property
def size(self):
return getattr(self.weights, 'size', 0)
def __setattr__(self, key, val):
if key in ('atom_grid', 'atomic_radii', 'radii_adjust', 'radi_method',
'becke_scheme', 'prune', 'level'):
self.reset()
super(Grids, self).__setattr__(key, val)
def dump_flags(self, verbose=None):
logger.info(self, 'radial grids: %s', self.radi_method.__doc__)
logger.info(self, 'becke partition: %s', self.becke_scheme.__doc__)
logger.info(self, 'pruning grids: %s', self.prune)
logger.info(self, 'grids dens level: %d', self.level)
logger.info(self, 'symmetrized grids: %s', self.symmetry)
if self.radii_adjust is not None:
logger.info(self, 'atomic radii adjust function: %s',
self.radii_adjust)
logger.debug2(self, 'atomic_radii : %s', self.atomic_radii)
if self.atom_grid:
logger.info(self, 'User specified grid scheme %s', str(self.atom_grid))
return self
def build(self, mol=None, with_non0tab=False, **kwargs):
if mol is None: mol = self.mol
if self.verbose >= logger.WARN:
self.check_sanity()
atom_grids_tab = self.gen_atomic_grids(mol, self.atom_grid,
self.radi_method,
self.level, self.prune, **kwargs)
self.coords, self.weights = \
self.gen_partition(mol, atom_grids_tab,
self.radii_adjust, self.atomic_radii,
self.becke_scheme)
if with_non0tab:
self.non0tab = self.make_mask(mol, self.coords)
else:
self.non0tab = None
logger.info(self, 'tot grids = %d', len(self.weights))
return self
def kernel(self, mol=None, with_non0tab=False):
self.dump_flags()
return self.build(mol, with_non0tab)
def reset(self, mol=None):
'''Reset mol and clean up relevant attributes for scanner mode'''
if mol is not None:
self.mol = mol
self.coords = None
self.weights = None
self.non0tab = None
return self
@lib.with_doc(gen_atomic_grids.__doc__)
def gen_atomic_grids(self, mol, atom_grid=None, radi_method=None,
level=None, prune=None, **kwargs):
''' See gen_grid.gen_atomic_grids function'''
if atom_grid is None: atom_grid = self.atom_grid
if radi_method is None: radi_method = self.radi_method
if level is None: level = self.level
if prune is None: prune = self.prune
return gen_atomic_grids(mol, atom_grid, self.radi_method, level, prune, **kwargs)
@lib.with_doc(gen_partition.__doc__)
def gen_partition(self, mol, atom_grids_tab,
radii_adjust=None, atomic_radii=radi.BRAGG_RADII,
becke_scheme=original_becke):
''' See gen_grid.gen_partition function'''
return gen_partition(mol, atom_grids_tab, radii_adjust, atomic_radii,
becke_scheme)
@lib.with_doc(make_mask.__doc__)
def make_mask(self, mol=None, coords=None, relativity=0, shls_slice=None,
verbose=None):
if mol is None: mol = self.mol
if coords is None: coords = self.coords
return make_mask(mol, coords, relativity, shls_slice, verbose)
def _default_rad(nuc, level=3):
'''Number of radial grids '''
tab = numpy.array( (2 , 10, 18, 36, 54, 86, 118))
period = (nuc > tab).sum()
return RAD_GRIDS[level,period]
# Period 1 2 3 4 5 6 7 # level
RAD_GRIDS = numpy.array((( 10, 15, 20, 30, 35, 40, 50), # 0
( 30, 40, 50, 60, 65, 70, 75), # 1
( 40, 60, 65, 75, 80, 85, 90), # 2
( 50, 75, 80, 90, 95,100,105), # 3
( 60, 90, 95,105,110,115,120), # 4
( 70,105,110,120,125,130,135), # 5
( 80,120,125,135,140,145,150), # 6
( 90,135,140,150,155,160,165), # 7
(100,150,155,165,170,175,180), # 8
(200,200,200,200,200,200,200),)) # 9
def _default_ang(nuc, level=3):
'''Order of angular grids. See LEBEDEV_ORDER for the mapping of
the order and the number of angular grids'''
tab = numpy.array( (2 , 10, 18, 36, 54, 86, 118))
period = (nuc > tab).sum()
return LEBEDEV_ORDER[ANG_ORDER[level,period]]
# Period 1 2 3 4 5 6 7 # level
ANG_ORDER = numpy.array(((11, 15, 17, 17, 17, 17, 17 ), # 0
(17, 23, 23, 23, 23, 23, 23 ), # 1
(23, 29, 29, 29, 29, 29, 29 ), # 2
(29, 29, 35, 35, 35, 35, 35 ), # 3
(35, 41, 41, 41, 41, 41, 41 ), # 4
(41, 47, 47, 47, 47, 47, 47 ), # 5
(47, 53, 53, 53, 53, 53, 53 ), # 6
(53, 59, 59, 59, 59, 59, 59 ), # 7
(59, 59, 59, 59, 59, 59, 59 ), # 8
(65, 65, 65, 65, 65, 65, 65 ),)) # 9
def prange(start, end, step):
for i in range(start, end, step):
yield i, min(i+step, end)
if __name__ == '__main__':
from pyscf import gto
h2o = gto.Mole()
h2o.verbose = 0
h2o.output = None#"out_h2o"
h2o.atom = [
['O' , (0. , 0. , 0.)],
['H' , (0. , -0.757 , 0.587)],
['H' , (0. , 0.757 , 0.587)] ]
h2o.build()
import time
t0 = time.clock()
g = Grids(h2o)
g.build()
print(g.coords.shape)
print(time.clock() - t0)
|
gkc1000/pyscf
|
pyscf/dft/gen_grid.py
|
Python
|
apache-2.0
| 23,081
|
[
"NWChem",
"PySCF"
] |
b98505e05799e7b2d48caee526b6d2c49b1dcc10587fe5cbe337bed018d4b3f8
|
# -*- coding: utf-8 -*-
from sympy.matrices import Matrix
from sympy.core import Add, diff, Symbol
from sympy.simplify import simplify
from sympy.tensor.arraypy import Arraypy, TensorArray, matrix2arraypy, \
matrix2tensor, list2arraypy, list2tensor
from sympy.tensor.tensor_methods import is_symmetric
from sympy.tensor.helper_functions import check_vector_of_arguments, \
check_metric_tensor, check_the_vector_field, replace_index_to_k, \
check_the_christoffel_symbols_2
"""Module riemannian_geometry contains functions for work with tensor fields:
- the calculation of the scalar product;
- the Christoffel symbols of the first and second kind;
- the covariant derivative of the curvature tensor;
- the Ricci tensor;
- scalar and sectional curvature;
- the covariant derivative the tensor field;
- the covariant divergence of a tensor field;
- the Riemann curvature tensor and sectional curvature for left-invariant metric;
- the product of Kulkarni-Nomizu;
- the Gaussian curvature;
- the second quadratic form.
To implement the functions used modules: matrices and tensor
(with classes arraypy and tensor). All functions take arguments,
the types of which may be such as list, matrix, or array Arraypy tensor.
Some functions have optional parameter indicating the type of the function result.
Starting index of arguments with type Arraypy or TensorArray is not necessarily
and by default equal to 0. The function determines the range of the index
in array to return the object with the same range of index.
Functions are work with multidimensional arrays Arraypy and tensors,
classes and methods are contained in the module Arraypy.
"""
def scal_prod(X, Y, g):
"""Returns scalar product of vectors g(X,Y).
g(X,Y)=sum_{i,j}([g[i,j]*X[i]*Y[j])
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import scal_prod
>>> from sympy import symbols, cos
>>> from sympy.tensor.arraypy import Arraypy, TensorArray
>>> x1, x2 = symbols('x1, x2')
X, Y it's a vector or a vector field. They can be a list,
one-dimensional arraypy or TensorArray with valence of indices (+1):
>>> X = [1, 2]
>>> Y = [3, 4]
g it's a metric tensor must be symmetric matrix, array of arraypy or
covariant tensor with valence of indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
The scalar product:
>>> sc = scal_prod(X, Y, g)
>>> print(sc)
3*cos(x2)**2 + 8
"""
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
g = g.to_matrix()
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
# Handling of a input arguments - vector or vector fields X
check_the_vector_field(X)
if isinstance(X, (TensorArray, Arraypy)):
X = X.to_list()
# Handling of a input arguments - vector or vector fields Y
check_the_vector_field(Y)
if isinstance(Y, (TensorArray, Arraypy)):
Y = Y.to_list()
if not len(X) == len(Y):
raise ValueError('The vectors must be identical length')
elif len(X) != g.rows:
raise ValueError(
'The vector fields and dimension of metric tensor must be identical length')
# Calculation
indices = range(len(X))
scal = sum([g[i, j] * X[i] * Y[j] for i in indices
for j in indices])
# Output
return scal
def christoffel_1(g, var, type_output='t'):
"""Return the tensor type (-1,-1,-1) - array of Christoffel symbols for the given metric.
This returns the Christoffel symbol of first kind that represents the
Levi-Civita connection for the given metric.
christoffel_1[i,j,k] =
=(diff(g[j,k],x[i])+diff(g[i,k],x[j])-diff(g[i,j],x[k]))/2.
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import christoffel_1
>>> from sympy.tensor.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var is a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The Christoffel symbols of the first kind:
>>> ch_1 = christoffel_1(g, var, 't')
>>> print(ch_1)
0 sin(x2)*cos(x2)
-sin(x2)*cos(x2) 0
-sin(x2)*cos(x2) 0
0 0
>>> ch_1.type_pq
(0, 3)
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_start = g.start_index[0]
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_start = 0
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
Ch = Arraypy([3, n, idx_start])
# Calculation
for i in indices:
for j in indices:
for k in indices:
Ch[i,
j,
k] = (diff(g[j,
k],
var[i - idx_start]) + diff(g[i,
k],
var[j - idx_start]) - diff(g[i,
j],
var[k - idx_start])) / 2
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
christoffel_1 = Ch.to_tensor((-1, -1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
christoffel_1 = Ch
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return christoffel_1
def christoffel_2(g, var, type_output='t'):
"""Return the tensor type (-1, -1, 1) - array of Christoffel symbols for the given metric.
This returns the Christoffel symbol of second kind that represents the
Levi-Civita connection for the given metric.
christoffel_2[i,j,k] =
=Sum_{l}(g^{-1}[k,l]/2*(diff(g[j, l],x[i])+diff(g[i,l],x[j])-diff(g[i,j],x[l]))/2
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import christoffel_2
>>> from sympy.tensor.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The Christoffel symbols of the second kind:
>>> ch_2 = christoffel_2(g, var, 'a')
>>> print(ch_2)
0 sin(x2)*cos(x2)
-sin(x2)/cos(x2) 0
-sin(x2)/cos(x2) 0
0 0
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_start = g.start_index[0]
g_inv = (g.to_matrix()).inv()
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_start = 0
g_inv = g.inv()
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
Ch = Arraypy([3, n, idx_start])
# Calculation
for i in indices:
for j in indices:
for k in indices:
Ch[i,
j,
k] = sum([g_inv[k - idx_start,
l - idx_start] * (diff(g[j,
l],
var[i - idx_start]) + diff(g[i,
l],
var[j - idx_start]) - diff(g[i,
j],
var[l - idx_start])) / 2 for l in indices])
# Other variant calculation
# christ_1 = christoffel_1(g, var)
# for i in indices:
# for j in indices:
# for k in indices:
# Ch[i,
# j,
# k] = Add(*[g_inv[k,
# l] *christ_1[i,
# j,
# l] for l in indices])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
christoffel_2 = Ch.to_tensor((-1, -1, 1))
elif type_output == str('a') or type_output == Symbol('a'):
christoffel_2 = Ch
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return christoffel_2
def covar_der(X, g, var, type_output='t'):
"""Return the covariant derivative the vector field.
nabla X[i,j] = diff(X[j],x[i])+Sum_{k}(Gamma2[k,i,j]*X[k])
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import covar_der
>>> from sympy.tensor.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
X it's vector field can be a list, one-dimensional arraypy, or one-dimensional
tensor with valences of indices (+1):
>>> X = [x1 * x2**3, x1 - cos(x2)]
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The covariant derivative:
>>> c_v = covar_der(X, g, var, 't')
>>> print(c_v)
x2**3 - (x1 - cos(x2))*sin(x2)/cos(x2) x1*x2**3*sin(x2)*cos(x2) + 1
-x1*x2**3*sin(x2)/cos(x2) + 3*x1*x2**2 sin(x2)
>>> c_v.type_pq
(1, 1)
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_g = g.start_index[0]
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_g = 0
# Handling of a input argument - vector field X
check_the_vector_field(X)
if isinstance(X, (Arraypy, TensorArray)):
idx_X = X.start_index[0]
elif isinstance(X, list):
idx_X = 0
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
if (idx_g != idx_X):
raise ValueError(
'The start index of the metric tensor and vector field must be equal')
else:
idx_start = idx_g
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
cov = Arraypy([2, n, idx_start])
ch_2 = christoffel_2(g, var)
# Calculation
for i in indices:
for j in indices:
cov[i, j] = diff(X[j], var[i - idx_start]) + \
Add(*[ch_2[k, i, j] * X[k] for k in indices])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
cov_der = cov.to_tensor((-1, 1))
elif type_output == str('a') or type_output == Symbol('a'):
cov_der = cov
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return cov_der
def covar_der_xy(X, Y, g, var, type_output='t'):
"""Return the covariant derivative the vector field along another field.
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import covar_der_xy
>>> from sympy.tensor.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valences indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
X, Y it's vector fields may be lists, one-dimensional arraypy,
or one-dimensional tensor indices with valences (+ 1):
>>> X = [x1 * x2**3, x1 - cos(x2)]
>>> Y = [1, 2]
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The covariant derivative along another vector field:
>>> c_v_XY = covar_der_xy(X, Y, g, var, 't')
>>> print(c_v_XY)
-2*x1*x2**3*sin(x2)/cos(x2) + 6*x1*x2**2 + x2**3 - (x1 - cos(x2))*sin(x2)/cos(x2) \
x1*x2**3*sin(x2)*cos(x2) + 2*sin(x2) + 1
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_g = g.start_index[0]
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_g = 0
# Handling of a input argument - vector field X
check_the_vector_field(X)
if isinstance(X, (Arraypy, TensorArray)):
idx_X = X.start_index[0]
elif isinstance(X, list):
idx_X = 0
# Handling of a input argument - vector field Y
check_the_vector_field(Y)
if isinstance(Y, (Arraypy, TensorArray)):
idx_Y = Y.start_index[0]
elif isinstance(Y, list):
idx_Y = 0
[n1, n2] = g.shape
if not len(X) == len(Y):
raise ValueError('The vectors must be identical length')
elif not idx_X == idx_Y:
raise ValueError('The start index of vector fields must be equal')
elif not(idx_g == idx_X):
raise ValueError(
'The start index of the metric tensor and vector field must be equal')
else:
idx_start = idx_g
if len(X) != n1:
raise ValueError(
'The vector fields and dimension of metric tensor must be identical length')
# The definition of diapason changes in an index
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not concide with the number of variables.')
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
nabla_XY = Arraypy([1, n, idx_start])
nabla_X = covar_der(X, g, var)
# Calculation
for j in indices:
nabla_XY[j] = sum([nabla_X[i, j] * Y[i] for i in indices])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
cov_der_XY = nabla_XY.to_tensor((1))
elif type_output == str('a') or type_output == Symbol('a'):
cov_der_XY = nabla_XY
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return cov_der_XY
def riemann(g, var, type_output='t'):
"""Return the Riemann curvature tensor of type (-1, -1, -1, 1)
for the given metric tensor.
Riemann[i,j,k,l] = diff(Gamma_2[j,k,l],x[i])-diff(Gamma_2[i,k,l],x[j]) +
+ Sum_{p}( Gamma_2[i,p,l]*Gamma_2[j,k,p] -Gamma_2[j,p,l]*Gamma_2[i,k,p]
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import riemann
>>> from sympy.tensor.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The curvature tensor:
>>> r = riemann(g, var, 'a')
>>> print(r)
0 0
0 0
0 -cos(x2)**2
1 0
0 cos(x2)**2
-1 0
0 0
0 0
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_start = g.start_index[0]
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_start = 0
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
R = Arraypy([4, n, idx_start])
ch_2 = christoffel_2(g, var)
# Calculation
for i in indices:
for j in indices:
for k in indices:
for l in indices:
R[i,
j,
k,
l] = diff(ch_2[j,
k,
l],
var[i - idx_start]) - diff(ch_2[i,
k,
l],
var[j - idx_start]) + sum([ch_2[i,
p,
l] * ch_2[j,
k,
p] - ch_2[j,
p,
l] * ch_2[i,
k,
p] for p in indices])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
riemann = R.to_tensor((-1, -1, -1, 1))
elif type_output == str('a') or type_output == Symbol('a'):
riemann = R
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return riemann
def ricci(riemann, var, type_output='t'):
"""Return the tensor Ricci of type (-1, -1), is symmetric tensor
for given Riemann curvature tensor.
Ricci[j,k] = Sum_{i}(Riemann[i,j,k,i])
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import ricci, riemann
>>> from sympy.tensor.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2,2))
>>> g = TensorArray(A,(-1,-1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
riemann it's a Riemann curvature tensor must be symmetric matrix,
arraypy or tensor with valences indices (-1, -1, -1, 1):
>>> cur = riemann(g, var, 't')
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The Ricci tensor:
>>> r = ricci(cur, var, 't')
>>> print(r)
cos(x2)**2 0
0 1
>>> r.type_pq
(0, 2)
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument Riemann curvature tensor - riemann
if not isinstance(riemann, (Matrix, Arraypy, TensorArray)):
raise TypeError(
'The type of Riemann curvature tensor must be Matrix, Arraypy or TensorArray')
else:
if isinstance(riemann, (Arraypy, TensorArray)):
if isinstance(riemann, TensorArray):
if not riemann.type_pq == (1, 3):
raise ValueError(
'The valence of Riemann curvature tensor must be (-1, -1, -1, 1)')
if not (
riemann.start_index.count(
riemann.start_index[0]) == 4):
raise ValueError(
'The starting indices of Riemann curvature tensor must be identical')
idx_start = riemann.start_index[0]
else:
idx_start = 0
# The definition of diapason changes in an index
[n1, n2, n3, n4] = riemann.shape
if not n == n1:
raise ValueError(
'The rank of the Riemann curvature tensor does not coincide with the number of variables.')
indices = range(idx_start, idx_start + n)
# Creating of output array with new indices
Ri = Arraypy([2, n, idx_start])
# Calculation
for j in indices:
for k in indices:
Ri[j, k] = sum([riemann[i, j, k, i] for i in indices])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
ricci = Ri.to_tensor((-1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
ricci = Ri
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return ricci
def scal_curv(g, ricci, var):
"""The scalar curvature (or the Ricci scalar) is the simplest curvature
invariant of a Riemannian manifold.
S=Ricci[j,k]*g_inv[j,k]
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import scal_curv, ricci, riemann
>>> from sympy.tensor.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2,2))
>>> g = TensorArray(A,(-1,-1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
riemann it's a Riemann curvature tensor must be symmetric matrix,
arraypy or tensor with valences indices (-1, -1, -1, 1):
>>> cur = riemann(g, var, 't')
ricci it's Ricci tensor must be a matrix, arraypy or valences with
tensor indices (-1, -1):
>>> r = ricci(cur, var, 't')
The Ricci tensor for the Riemann curvature tensor:
>>> sc_c = scal_curv(g, r, var)
>>> print(sc_c)
2
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
g = g.to_matrix()
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
# The definition of inverse matrix of the metric tensor
g_inv = g.inv()
# Handling of a input argument tensor Ricci - ricci
if not isinstance(ricci, (Matrix, Arraypy, TensorArray)):
raise TypeError(
'The type of tensor Ricci must be Matrix, TensorArray or Arraypy')
else:
if isinstance(ricci, (Arraypy, TensorArray)):
if isinstance(ricci, TensorArray):
if not ricci.type_pq == (0, 2):
raise ValueError(
'The valence of tensor Ricci must be (-1,-1)')
ricci = ricci.to_matrix()
if not ricci.is_symmetric():
raise ValueError('The Ricci tensor must be symmetric.')
if not (g.shape == ricci.shape):
raise ValueError(
'The rank of the metric tensor does not coincide with the rank of tensor Ricci.')
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
# Calculation
indices = range(n)
scal_curv = sum([g_inv[i, j] * ricci[i, j] for i in indices
for j in indices])
# Output
return scal_curv
def k_sigma(X, Y, R, g, var):
"""Return Sectional curvature of thу Riemannian space
in the direction за two-dimensional area formed by
vectors X, Y for the given metric tensor.
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import k_sigma, riemann
>>> from sympy.tensor.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
X, Y it's a vector or a vector field. They can be a list, one-dimensional
arraypy or tensor with valence of indices (+1):
>>> X = [1, 2]
>>> Y = [3, 4]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> A = Arraypy((2, 2))
>>> g = TensorArray(A,(-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
R it's a Riemann curvature tensor must be symmetric matrix, arraypy or tensor
with valences indices (-1, -1, -1, 1):
>>> R = riemann(g, var)
The sectional curvature:
>>> k_sig = k_sigma(X, Y, R, g, var)
>>> print(k_sig)
1
"""
# Handling of input vector of arguments - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
g = g.to_matrix()
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
# Handling of a input arguments - vector or vector fields X
check_the_vector_field(X)
if isinstance(X, (TensorArray, Arraypy)):
X = X.to_list()
# Handling of a input arguments - vector or vector fields Y
check_the_vector_field(Y)
if isinstance(Y, (TensorArray, Arraypy)):
Y = Y.to_list()
if not len(X) == len(Y):
raise ValueError('The vectors must be identical length')
elif len(X) != g.rows:
raise ValueError(
'The vector fields and dimension of metric tensor must be identical length')
# Handling of a input argument Riemann curvature tensor - R
if not isinstance(R, (Matrix, Arraypy, TensorArray)):
raise TypeError(
'The type of Riemann curvature tensor must be Matrix, Arraypy or TensorArray')
else:
if isinstance(R, (Arraypy, TensorArray)):
if isinstance(R, TensorArray):
if not R.type_pq == (1, 3):
raise ValueError(
'The valence of Riemann curvature tensor must be (-1, -1,- 1, 1)')
if not (R.start_index[0] == R.start_index[1]):
raise ValueError(
'The starting indices of Riemann curtivate tensor must be identical')
idx_R = R.start_index[0]
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
[n1, n2, n3, n4] = R.shape
if not n == n1:
raise ValueError(
'The rank of the Riemann curvature tensor does not concide with the number of variables.')
indices = range(len(X))
# Calculation
Sc_pr = scal_prod(X, X, g) * scal_prod(Y, Y, g) - scal_prod(X, Y, g)**2
if (Sc_pr == 0):
raise ValueError('The two-dimensional area is a degenerate!')
numerator = sum([g[r, s] * R[i + idx_R, j + idx_R, k + idx_R, r + idx_R] * X[i] * Y[j] * Y[k] * X[s] for i in indices
for j in indices
for k in indices
for r in indices
for s in indices])
k_sigma = simplify(numerator / Sc_pr)
# Output
return k_sigma
def nabla(T, ch_2, var):
"""Return the covariant derivative the tensor field.
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import nabla
>>> from sympy.tensor.arraypy import Arraypy
>>> from sympy import symbols, cos, sin
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
T it's a tensor field must be tensor:
>>> T = Arraypy([2, 2, 0]).to_tensor((1, -1))
>>> T[0,0] = x2
>>> T[0,1] = -x2
>>> T[1,0] = -x1
>>> T[1,1] = x1
ch_2 it's a Christoffel symbol of second kind must be arraypy or tensor
with valence indices (1, -1, -1):
>>> ch_2 = Arraypy([3, 2, 0]).to_tensor((-1, -1, 1))
>>> ch_2[0,0,0] = 0
>>> ch_2[0,0,1] = sin(x2)*cos(x2)
>>> ch_2[0,1,1] = 0
>>> ch_2[1,1,1] = 0
>>> ch_2[1,0,1] = 0
>>> ch_2[1,1,0] = 0
>>> ch_2[1,0,0] = -sin(x2)*cos(x2)
>>> ch_2[0,1,0] = -sin(x2)*cos(x2)
The covariant derivative of tensor field:
>>> nabla_t = nabla(T, ch_2, var)
>>> print(nabla_t)
-x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2) 0
x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2) x2*sin(x2)*cos(x2) - 1
-x1*sin(x2)*cos(x2) - x2*sin(x2)*cos(x2) -x1*sin(x2)*cos(x2) - 1
-x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2) 0
"""
# Handling of a input argument - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Handling of a input argument - Christoffel symbol of second kind
check_the_christoffel_symbols_2(ch_2)
idx_ch = ch_2.start_index[0]
# Handling of a input argument - tensor field T
if not isinstance(T, TensorArray):
raise TypeError(
'The type of tensor field must be TensorArray')
idx_start_T = T.start_index[0]
if (idx_start_T != idx_ch):
raise ValueError(
'The start index of the tensor field and Christoffel symbol \
of second kind must be equal')
# The definition of diapason changes in an index
# The number of upper indices
p = T.type_pq[0]
# The dimension of the input array
n = T.shape[0]
# The rank of the input array
rank_T = len(T.shape)
# The definition of the start index
idx_char_T = T.ind_char
idx_char_nabla_T = list(idx_char_T) + [-1]
# upper_idx_numbers it is a list with the positions on which are the upper
# indices
upper_idx_numbers = [
k for k in range(len(idx_char_T)) if idx_char_T[k] == 1]
# low_idx_numbers it is a list with the positions on which are the lower
# indices
low_idx_numbers = [
k for k in range(len(idx_char_T)) if idx_char_T[k] == -1]
# Creating the output array in accordance with the start index
nabla_T = Arraypy([rank_T + 1, n, idx_start_T]).to_tensor(idx_char_nabla_T)
index_nabla_T = nabla_T.index_list
# Calculation
for index in index_nabla_T:
index_T = list(index)
del index_T[n]
index_T = tuple(index_T)
s = index[rank_T]
dt = diff(T[index_T], var[index[s]])
k = idx_start_T
nabla_T_up = 0
nabla_T_lo = 0
while k < n + idx_start_T:
for i in upper_idx_numbers:
index_T_ik = replace_index_to_k(index_T, i, k)
nabla_T_up += T[index_T_ik] * ch_2[index_T[i], s, k]
for j in low_idx_numbers:
index_T_jk = replace_index_to_k(index_T, j, k)
nabla_T_lo += T[index_T_jk] * ch_2[index_T[j], s, k]
k = k + 1
nabla_T[index] = dt + nabla_T_up - nabla_T_lo
# Output
return nabla_T
def nabla_x(T, ch_2, X, var):
"""Return the covariant derivative the tensor field along another vector field.
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import nabla_x
>>> from sympy.tensor.arraypy import Arraypy
>>> from sympy import symbols, cos, sin
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
T it's a tensor field must be tensor:
>>> T = Arraypy([2, 2, 0]).to_tensor((1, -1))
>>> T[0,0] = x2
>>> T[0,1] = -x2
>>> T[1,0] = -x1
>>> T[1,1] = x1
ch_2 it's a Christoffel symbol of second kind must be arraypy or tensor
with valence indices (1, -1, -1):
>>> ch_2 = Arraypy([3, 2, 0]).to_tensor((-1, -1, 1))
>>> ch_2[0,0,0] = 0
>>> ch_2[0,0,1] = sin(x2)*cos(x2)
>>> ch_2[0,1,1] = 0
>>> ch_2[1,1,1] = 0
>>> ch_2[1,0,1] = 0
>>> ch_2[1,1,0] = 0
>>> ch_2[1,0,0] = -sin(x2)*cos(x2)
>>> ch_2[0,1,0] = -sin(x2)*cos(x2)
X it's vector field can be a list, one-dimensional arraypy, or one-dimensional
tensor with valences of indices (+1):
>>> X = [x1 * x2**3, x1 - cos(x2)]
The covariant derivative of tensor field along another vector field:
>>> nabla_xt = nabla_x(T, ch_2, X, var)
>>> print(nabla_xt)
x1*x2**3*(-x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2)) x1*x2**3*(x1*sin(x2)*cos(x2) + \
x2*sin(x2)*cos(x2)) + (x1 - cos(x2))*(x2*sin(x2)*cos(x2) - 1)
x1*x2**3*(-x1*sin(x2)*cos(x2) - x2*sin(x2)*cos(x2)) + \
(x1 - cos(x2))*(-x1*sin(x2)*cos(x2) - 1) x1*x2**3*(-x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2))
"""
# Handling of a input argument - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Handling of a input argument - Christoffel symbol of second kind
check_the_christoffel_symbols_2(ch_2)
idx_ch = ch_2.start_index[0]
# Handling of a input argument - vector field X
check_the_vector_field(X)
if isinstance(X, (Arraypy, TensorArray)):
idx_X = X.start_index[0]
elif isinstance(X, list):
idx_X = 0
# Handling of a input argument - tensor field T
if not isinstance(T, TensorArray):
raise TypeError(
'The type of tensor field must be TensorArray')
idx_start_T = T.start_index[0]
if (idx_start_T != idx_ch != idx_X):
raise ValueError(
'The start index of the tensor field and Christoffel symbol \
of second kind and vector field must be equal')
# The definition of diapason changes in an index
# The number of upper indices
p = T.type_pq[0]
# The dimension of the input array
n = T.shape[0]
# The rank of the input array
rank_T = len(T.shape)
# The definition of the start index
idx_char_T = T.ind_char
# Creating the output array in accordance with the start index
nabla_TX = Arraypy([rank_T, n, idx_start_T]).to_tensor(idx_char_T)
index_nabla_TX = nabla_TX.index_list
nabla_T = nabla(T, ch_2, var)
# Calculation
for index in index_nabla_TX:
k = idx_start_T
while k < n + idx_start_T:
idx_nabla_T = tuple(list(index) + [k])
nabla_TX[index] += nabla_T[idx_nabla_T] * X[k]
k = k + 1
# Output
return nabla_TX
def delta(T, g, ch_2, var):
"""Return the covariant divergence of a tensor field T.
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import delta
>>> from sympy.tensor.arraypy import Arraypy
>>> from sympy import symbols, cos, sin
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
T it's a tensor field must be tensor:
>>> T = Arraypy([2, 2, 0]).to_tensor((1, -1))
>>> T[0,0] = x2
>>> T[0,1] = -x2
>>> T[1,0] = -x1
>>> T[1,1] = x1
ch_2 it's a Christoffel symbol of second kind must be arraypy or tensor
with valence indices (1, -1, -1):
>>> ch_2 = Arraypy([3, 2, 0]).to_tensor((-1, -1, 1))
>>> ch_2[0,0,0] = 0
>>> ch_2[0,0,1] = sin(x2)*cos(x2)
>>> ch_2[0,1,1] = 0
>>> ch_2[1,1,1] = 0
>>> ch_2[1,0,1] = 0
>>> ch_2[1,1,0] = 0
>>> ch_2[1,0,0] = -sin(x2)*cos(x2)
>>> ch_2[0,1,0] = -sin(x2)*cos(x2)
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> g = Arraypy((2, 2)).to_tensor((-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
The covariant divergence of a tensor field:
>>> delta_T = delta(T, g, ch_2, var)
>>> print(delta_T)
x1*sin(x2)*cos(x2) + 1 0
"""
# Handling of a input argument - var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
g = g.to_matrix()
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
# Handling of a input argument - Christoffel symbol of second kind
check_the_christoffel_symbols_2(ch_2)
idx_ch = ch_2.start_index[0]
# Handling of a input argument - tensor field T
if not isinstance(T, TensorArray):
raise TypeError(
'The type of vector field must be TensorArray')
idx_start_T = T.start_index[0]
# The definition of inverse matrix of the metric tensor
g_inv = g.inv()
# The definition of diapason changes in an index
# The dimension of the input array
n = T.shape[0]
# The rank of the input array
rank_T = len(T.shape)
index_T = T.index_list
idx_char_delta_T = [(-1) for i in range(rank_T - 1)]
nabla_T = nabla(T, ch_2, var)
# Creating the output array in accordance with the start index
delta_T = Arraypy([rank_T - 1, n, idx_start_T]).to_tensor(idx_char_delta_T)
# Calculation
for index in index_T:
k = idx_start_T
while k < n + idx_start_T:
for j in range(n):
idx_nabla_T = tuple(list(index) + [k])
idx_delta_T = list(index)
del idx_delta_T[0]
idx_delta_T = tuple(idx_delta_T)
delta_T[idx_delta_T] = (-1) * \
nabla_T[idx_nabla_T] * g_inv[k, j]
k = k + 1
# Output
return delta_T
def riemann_li(C, g, var, type_output='t'):
"""Return the Riemann curvature tensor of type (-1, -1, -1, 1)
for the given left-invariant metric tensor.
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import riemann_li
>>> from sympy.tensor.arraypy import Arraypy
>>> from sympy import symbols, cos, sin
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
C it's a structural constant must be tensor with valence indices (1,-1,-1):
>>> C = Arraypy([3, 2, 0]).to_tensor((1, -1, -1))
>>> C[0,0,0] = 0
>>> C[0,0,1] = sin(x2)*cos(x2)
>>> C[0,1,1] = 0
>>> C[1,1,1] = 0
>>> C[1,0,1] = 0
>>> C[1,1,0] = 0
>>> C[1,0,0] = -sin(x2)*cos(x2)
>>> C[0,1,0] = -sin(x2)*cos(x2)
g it's a left-invariant metric tensor must be symmetric matrix, arraypy or
tensor with valence indices (-1, -1):
>>> g = Arraypy((2, 2)).to_tensor((-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The curvature tensor:
>>> r_li = riemann_li(C, g, var, 'a')
>>> print(r_li)
-0.25*sin(x2)**2*cos(x2)**2 0
0 0
0 0
0 0
0 0
0 0
0 0
0 0
"""
# Handling of input vector arguments var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
check_metric_tensor(g)
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_g = g.start_index[0]
g_inv = (g.to_matrix()).inv()
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_g = 0
g_inv = g.inv()
# Handling of a input argument - structure constant
if not isinstance(C, TensorArray):
raise TypeError(
'The type of must be TensorArray')
else:
if isinstance(C, TensorArray):
if not C.type_pq == (1, 2):
raise ValueError(
'The valence or ind_char of must be (1,-1,-1)')
idx_c = C.start_index[0]
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
if (idx_g != idx_c):
raise ValueError(
'The start index of the tensor field and Christoffel symbol \
of second kind must be equal')
else:
idx_start = idx_g
indices = range(idx_start, idx_start + n)
gamma = Arraypy([3, n, idx_start])
for p in indices:
for i in indices:
for j in indices:
for s in indices:
for k in indices:
gamma[p, i, j] = 0.5 * (C[p, i, j] + g[j, s] * C[s, k, i] * g_inv[
k, p] + g[i, s] * C[s, k, j] * g_inv[k, p])
# Creating the output array in accordance with the start index
R = Arraypy([4, n, idx_start])
# Calculation
for s in indices:
for i in indices:
for j in indices:
for k in indices:
for p in indices:
R[i, j, k, s] = gamma[s, i, p] * gamma[p, j, k] - gamma[s, j, p] * gamma[p, i, k] - \
gamma[s, p, k] * gamma[p, i, j]
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
riemann = R.to_tensor((1, -1, -1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
riemann = R
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return riemann
def k_sigma_li(R, g, var):
"""Return Sectional curvature in the direction of coordinate areas.
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import k_sigma_li, riemann_li
>>> from sympy.tensor.arraypy import Arraypy, TensorArray
>>> from sympy import symbols, cos, sin
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional
arraypy or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
g it's a metric tensor must be symmetric matrix, arraypy or tensor
with valence indices (-1, -1):
>>> g = Arraypy((2, 2)).to_tensor((-1, -1))
>>> g[0,0] = cos(x2)**2
>>> g[0,1] = 0
>>> g[1,0] = 0
>>> g[1,1] = 1
C it's a structural constant must be tensor with valence indices (1,-1,-1):
>>> C = Arraypy([3, 2, 0]).to_tensor((1, -1, -1))
>>> C[0,0,0] = 0
>>> C[0,0,1] = sin(x2)
>>> C[0,1,1] = cos(x2)
>>> C[1,1,1] = cos(x2)
>>> C[1,0,1] = cos(x2)
>>> C[1,1,0] = 0
>>> C[1,0,0] = -sin(x2)
>>> C[0,1,0] = -sin(x2)
R it's a Riemann curvature tensor must be symmetric matrix, arraypy or tensor
with valences indices (-1, -1, -1, 1):
>>> R = riemann_li(C, g, var, 't')
The sectional curvature:
>>> k_sig_li = k_sigma_li(R, g, var)
>>> print(k_sig_li)
-0.75*sin(x2)/cos(x2)
"""
# Handling of input vector arguments var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Definition of number of variables
n = len(var)
# Handling of a input argument - metric tensor g
if isinstance(g, (Arraypy, TensorArray)):
if not (g.start_index[0] == g.start_index[1]):
raise ValueError(
'The starting indices of metric tensor must be identical')
idx_start = g.start_index[0]
elif isinstance(g, Matrix):
if not g.is_symmetric():
raise ValueError('The metric tensor must be symmetric.')
idx_start = 0
# Handling of a input argument Riemann curvature tensor - R
if not isinstance(R, (Matrix, Arraypy, TensorArray)):
raise TypeError(
'The type of Riemann curvature tensor must be Matrix, Arraypy or TensorArray')
else:
if isinstance(R, (Arraypy, TensorArray)):
if isinstance(R, TensorArray):
if not R.type_pq == (1, 3):
raise ValueError(
'The valence or ind_char of Riemann curvature tensor must be (-1,-1,-1,+1)')
if not (R.start_index[0] == R.start_index[1]):
raise ValueError(
'The starting indices of Riemann curtivate tensor must be identical')
idx_R = R.start_index[0]
# The definition of diapason changes in an index
[n1, n2] = g.shape
if not n == n1:
raise ValueError(
'The rank of the metric tensor does not coincide with the number of variables.')
[n1, n2, n3, n4] = R.shape
if not n == n1:
raise ValueError(
'The rank of the Riemann curvature tensor does not concide with the number of variables.')
indices = range(n)
for i in indices:
for j in indices:
if j == i:
continue
else:
if (g[i, i] * g[j, j] - g[i, j]**2) == 0:
raise ValueError('Division by zero!')
else:
k_sig_li = sum([(g[k, i] * R[i, j, j, k]) / (g[i, i] * g[j, j] - g[i, j]**2)
for k in indices])
# Output
return k_sig_li
def kulkarni_nomizu(h, k, var, type_output='t'):
"""Return the product of Kulkarni-Nomizu of type (-1, -1, -1, -1)
for the given two symmetric tensor.
Examples:
=========
>>> from sympy.tensor.riemannian_geometry import kulkarni_nomizu
>>> from sympy.tensor.arraypy import Arraypy
>>> from sympy import symbols, cos
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
h,k it's a tensor must be symmetric arraypy or tensor
with valence indices (-1, -1):
>>> h = Arraypy((2, 2)).to_tensor((-1, -1))
>>> h[0,0] = x1
>>> h[0,1] = 0
>>> h[1,0] = 0
>>> h[1,1] = x2
>>> k = Arraypy((2, 2)).to_tensor((-1, -1))
>>> k[0,0] = x2
>>> k[0,1] = 0
>>> k[1,0] = 0
>>> k[1,1] = x1
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The curvature tensor:
>>> k_n = kulkarni_nomizu(h, k, var, 'a')
>>> print(k_n)
0 0
0 0
0 x1**2 + x2**2
-x1**2 - x2**2 0
0 -x1**2 - x2**2
x1**2 + x2**2 0
0 0
0 0
"""
# Handling of input vector arguments var
check_vector_of_arguments(var)
if isinstance(var, (TensorArray, Arraypy)):
var = var.to_list()
# Handling of input symmetric tensor h
if not isinstance(h, TensorArray):
raise TypeError(
'The type of input tensor must be a TensorArray')
if isinstance(h, TensorArray):
if not h.type_pq == (0, 2):
raise ValueError(
'The valence or ind_char of tensor must be (-1,-1)')
if not (h.to_matrix()).is_symmetric():
raise ValueError('The tensor must be symmetric.')
# Handling of input symmetric tensor k
if not isinstance(k, TensorArray):
raise TypeError(
'The type of input tensor must be a TensorArray')
if isinstance(k, TensorArray):
if not k.type_pq == (0, 2):
raise ValueError(
'The valence or ind_char of tensor must be (-1,-1)')
if not (k.to_matrix()).is_symmetric():
raise ValueError('The tensor must be symmetric.')
if (h.start_index[0] != k.start_index[0]):
raise ValueError(
'The start index of the tensors must be equal')
else:
idx_start = h.start_index[0]
# Definition of number of variables
n = len(var)
kul_nom = Arraypy([4, n, idx_start])
indices = range(idx_start, idx_start + n)
# Calculation
for i in indices:
for j in indices:
for t in indices:
for l in indices:
kul_nom[i, j, t, l] = (
h[i, t] * k[j, l] - h[i, l] * k[j, t]) - (h[j, t] * k[i, l] - h[j, l] * k[i, t])
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
K = kul_nom.to_tensor((-1, -1, -1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
K = kul_nom
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.")
# Output
return K
def second_surf(surf, var, type_output='t'):
"""Return the second quadratic form.
Examples:
=========
>>> from sympy import symbols
>>> from sympy.tensor.riemannian_geometry import second_surf
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
surf it's list of functions, must be consist of one or three functions.
type_output it's optional parameter function, indicating the type of calculation
result and receiving the character or string value:
- symbol 't' means that the type of the result will match TensorArray;
- symbol 'a' means that the type of the result will be Arraypy;
- default function takes a parameter 't', so that the result will be a TensorArray.
The the second quadratic form.
>>> surf3 = [x1+x2, 2*x1**2-3*x2, (1+x2)*x1+x2-4]
>>> print(second_surf(surf3, var, 't'))
(-x1 + x2)/(3*x1) -(4*x1 + 3)/((x1 + 1)*(x2 + 1))
-(4*x1 + 3)/((x1 + 1)*(x2 + 1)) 0
>>> surf1 = [x1 + 4*x2**2]
>>> print(second_surf(surf1, var, 't'))
0 0
0 8
"""
# The definition symbols i, j, k
i = Symbol('i')
j = Symbol('j')
k = Symbol('k')
b = Arraypy((2, 2))
# Calculation
if (len(surf) == 1):
b[0, 0] = diff(surf[0], var[0], 2)
b[0, 1] = b[1, 0] = diff((diff(surf[0], var[0])), var[1])
b[1, 1] = diff(surf[0], var[1], 2)
elif (len(surf) == 3):
# The first partial derivatives
r_u = diff(surf[0], var[0]) * i + diff(surf[1], var[0]) * j +\
diff(surf[2], var[0]) * k
r_v = diff(surf[0], var[1]) * i + diff(surf[1], var[1]) * j +\
diff(surf[2], var[1]) * k
# The vector product
vect_prod = (r_u.coeff(j) * r_v.coeff(k) - r_v.coeff(j) * r_u.coeff(k)) * i - \
(r_u.coeff(k) * r_v.coeff(i) - r_v.coeff(k) * r_u.coeff(i)) * j + \
(r_u.coeff(i) * r_v.coeff(j) - r_v.coeff(i) * r_u.coeff(j)) * k
# The length of vector product
len_r_uv = r_u.coeff(i) * r_v.coeff(i) * i + r_u.coeff(j) * r_v.coeff(j) * j + \
r_u.coeff(k) * r_v.coeff(k) * k
if (len_r_uv == 0):
raise ValueError('The two-dimensional area is a degenerate!')
# The components of the normal vector
n = (simplify(vect_prod.coeff(i) / len_r_uv.coeff(i)) * i +
simplify(vect_prod.coeff(j) / len_r_uv.coeff(j)) * j +
simplify(vect_prod.coeff(k) / len_r_uv.coeff(k)) * k)
# The second partial derivatives
r_uu = diff(r_u.coeff(i), var[0]) * i + diff(r_u.coeff(j), var[0]) * j + \
diff(r_u.coeff(k), var[0]) * k
r_uv = diff(r_u.coeff(i), var[1]) * i + diff(r_u.coeff(j), var[1]) * j + \
diff(r_u.coeff(k), var[1]) * k
r_vv = diff(r_v.coeff(i), var[1]) * i + diff(r_v.coeff(j), var[1]) * j + \
diff(r_v.coeff(k), var[1]) * k
b[0, 0] = r_uu.coeff(i) * n.coeff(i) + r_uu.coeff(j) * n.coeff(j) + \
r_uu.coeff(k) * n.coeff(k)
b[0, 1] = b[1, 0] = r_uv.coeff(i) * n.coeff(i) + r_uv.coeff(j) * n.coeff(j) + \
r_uv.coeff(k) * n.coeff(k)
b[1, 1] = r_vv.coeff(i) * n.coeff(i) + r_vv.coeff(j) * n.coeff(j) + \
r_vv.coeff(k) * n.coeff(k)
else:
raise ValueError(
"The argument surf must be consist one function or three functions")
# Handling of an output array
if type_output == str('t') or type_output == Symbol('t'):
b = b.to_tensor((-1, -1))
elif type_output == str('a') or type_output == Symbol('a'):
b = b
elif type_output == str('m') or type_output == Symbol('m'):
b = b.to_matrix()
else:
raise ValueError(
"The parameter of type output result must 'a' - Arraypy or 'm' - Matrix\
't' and None - TensorArray.")
# Output
return b
def k_surf(surf, var):
"""Return the Gaussian curvature.
Examples:
=========
>>> from sympy import symbols
>>> from sympy.tensor.riemannian_geometry import k_surf
>>> x1, x2 = symbols('x1, x2')
var it's a list of symbolic arguments. May be a list, one-dimensional arraypy
or one-dimensional tensor with valence of indices (+1):
>>> var = [x1, x2]
surf it's list of functions, must be consist of one or three functions.
The Gaussian curvature:
>>> surf3 = [x1+x2, 2*x1**2-3*x2, (1+x2)*x1+x2-4]
>>> print(k_surf(surf3, var))
-(4*x1 + 3)**2/((x1 + 1)**2*(x2 + 1)**2*(((x1 + 1)**2 + 10)*(16*x1**2 + (x2 + 1)**2 + 1) - (-12*x1 + (x1 + 1)*(x2 + 1) + 1)**2))
>>> surf1 = [x1 + 4*x2**2]
>>> print(k_surf(surf1, var))
0
"""
# Calculation
if (len(surf) == 1):
K = diff(surf[0], var[0], 2) * diff(surf[0], var[1], 2) -\
(diff(diff(surf[0], var[0]), var[1]))**2 / \
(1 + diff(surf[0], var[0])**2 + diff(surf[0], var[1])**2)**2
elif (len(surf) == 3):
g = Arraypy((2, 2))
g[0, 0] = diff(surf[0], var[0])**2 + \
diff(surf[1], var[0])**2 + diff(surf[2], var[0])**2
g[0, 1] = g[1, 0] = diff(surf[0], var[0]) * diff(surf[0], var[1]) + diff(
surf[1], var[0]) * diff(surf[1], var[1]) + diff(surf[2], var[0]) * diff(surf[2], var[1])
g[1, 1] = diff(surf[0], var[1])**2 + \
diff(surf[1], var[1])**2 + diff(surf[2], var[1])**2
b = second_surf(surf, var, 't')
K = simplify(
(b[0, 0] * b[1, 1] - b[0, 1]**2) / (g[0, 0] * g[1, 1] - g[0, 1]**2))
else:
raise ValueError(
"The argument surf must be consist one function or three functions")
# Output
return K
|
AunShiLord/sympy
|
sympy/tensor/riemannian_geometry.py
|
Python
|
bsd-3-clause
| 62,324
|
[
"Gaussian"
] |
6bb54ece550277899e01b38e2abfd6379d637fb25089c44b2fa911958c629958
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView, RedirectView
from django.views import defaults as default_views
urlpatterns = [
# url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^$', RedirectView.as_view(url='resources/loans/'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("flowers_ressources_management.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^resources/', include("flowers_ressources_management.resources.urls"))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
|
show0k/ressources_management_django
|
config/urls.py
|
Python
|
gpl-3.0
| 1,471
|
[
"VisIt"
] |
f80344a47fe3d5bd3130f039008a29e010090faa6948d17abf586d670ffd7c7a
|
"""
Collection of DIRAC useful operating system related modules
by default on Error they return None
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import six
import os
import distutils.spawn # pylint: disable=no-name-in-module,import-error
import DIRAC
from DIRAC.Core.Utilities.Subprocess import shellCall, systemCall
from DIRAC.Core.Utilities import List
__RCSID__ = "$Id$"
DEBUG = 0
def uniquePath(path=None):
"""
Utility to squeeze the string containing a PATH-like value to
leave only unique elements preserving the original order
"""
if not isinstance(path, six.string_types):
return None
try:
elements = List.uniqueElements(List.fromChar(path, ":"))
return ':'.join(elements)
except Exception:
return None
def getDiskSpace(path='.', exclude=None):
""" Get the free disk space in the partition containing the path.
The disk space is reported in MBytes. Returned 0 in case of any
error, e.g. path does not exist
"""
if not os.path.exists(path):
return -1
comm = 'df -P -m %s ' % path
if exclude:
comm += '-x %s ' % exclude
comm += '| tail -1'
resultDF = shellCall(10, comm)
if resultDF['OK'] and not resultDF['Value'][0]:
output = resultDF['Value'][1]
if output.find(' /afs') >= 0: # AFS disk space
comm = 'fs lq | tail -1'
resultAFS = shellCall(10, comm)
if resultAFS['OK'] and not resultAFS['Value'][0]:
output = resultAFS['Value'][1]
fields = output.split()
quota = int(fields[1])
used = int(fields[2])
space = (quota - used) / 1024
return int(space)
else:
return -1
else:
fields = output.split()
try:
value = int(fields[3])
except Exception as error:
print("Exception during disk space evaluation:", str(error))
value = -1
return value
else:
return -1
def getDirectorySize(path):
""" Get the total size of the given directory in MB
"""
comm = "du -s -m %s" % path
result = shellCall(10, comm)
if not result['OK'] or result['Value'][0] != 0:
return 0
else:
output = result['Value'][1]
print(output)
size = int(output.split()[0])
return size
def sourceEnv(timeout, cmdTuple, inputEnv=None):
""" Function to source configuration files in a platform dependent way and get
back the environment
"""
# add appropriate extension to first element of the tuple (the command)
envAsDict = '&& python -c "import os,sys ; print >> sys.stderr, os.environ"'
# 1.- Choose the right version of the configuration file
if DIRAC.getPlatformTuple()[0] == 'Windows':
cmdTuple[0] += '.bat'
else:
cmdTuple[0] += '.sh'
# 2.- Check that it exists
if not os.path.exists(cmdTuple[0]):
result = DIRAC.S_ERROR('Missing script: %s' % cmdTuple[0])
result['stdout'] = ''
result['stderr'] = 'Missing script: %s' % cmdTuple[0]
return result
# Source it in a platform dependent way:
# On windows the execution makes the environment to be inherit
# On Linux or Darwin use bash and source the file.
if DIRAC.getPlatformTuple()[0] == 'Windows':
# this needs to be tested
cmd = ' '.join(cmdTuple) + envAsDict
ret = shellCall(timeout, [cmd], env=inputEnv)
else:
cmdTuple.insert(0, 'source')
cmd = ' '.join(cmdTuple) + envAsDict
ret = systemCall(timeout, ['/bin/bash', '-c', cmd], env=inputEnv)
# 3.- Now get back the result
stdout = ''
stderr = ''
result = DIRAC.S_OK()
if ret['OK']:
# The Command has not timeout, retrieve stdout and stderr
stdout = ret['Value'][1]
stderr = ret['Value'][2]
if ret['Value'][0] == 0:
# execution was OK
try:
result['outputEnv'] = eval(stderr.split('\n')[-2] + '\n')
stderr = '\n'.join(stderr.split('\n')[:-2])
except Exception:
stdout = cmd + '\n' + stdout
result = DIRAC.S_ERROR('Could not parse Environment dictionary from stderr')
else:
# execution error
stdout = cmd + '\n' + stdout
result = DIRAC.S_ERROR('Execution returns %s' % ret['Value'][0])
else:
# Timeout
stdout = cmd
stderr = ret['Message']
result = DIRAC.S_ERROR(stderr)
# 4.- Put stdout and stderr in result structure
result['stdout'] = stdout
result['stderr'] = stderr
return result
def which(executable):
return distutils.spawn.find_executable(executable) # pylint: disable=no-member
|
yujikato/DIRAC
|
src/DIRAC/Core/Utilities/Os.py
|
Python
|
gpl-3.0
| 4,515
|
[
"DIRAC"
] |
0fd4c2404a206d2dbb7e15d19f246936b2e8dea37d7f32d21ec5d42ed9c81207
|
from __future__ import print_function, division
from sympy import (degree_list, Poly, igcd, divisors, sign, symbols, S, Integer, Wild, Symbol, factorint,
Add, Mul, solve, ceiling, floor, sqrt, sympify, Subs, ilcm, Matrix, factor_list, perfect_power,
isprime, nextprime, integer_nthroot, Expr, Pow)
from sympy.core.function import _mexpand
from sympy.simplify.simplify import rad_rationalize
from sympy.ntheory.modular import solve_congruence
from sympy.utilities import default_sort_key, numbered_symbols
from sympy.core.numbers import igcdex
from sympy.ntheory.residue_ntheory import sqrt_mod
from sympy.core.compatibility import xrange
from sympy.core.relational import Eq
from sympy.solvers.solvers import check_assumptions
__all__ = ['diophantine', 'diop_solve', 'classify_diop', 'diop_linear', 'base_solution_linear',
'diop_quadratic', 'diop_DN', 'cornacchia', 'diop_bf_DN', 'transformation_to_DN', 'find_DN',
'diop_ternary_quadratic', 'square_factor', 'descent', 'diop_general_pythagorean',
'diop_general_sum_of_squares', 'partition', 'sum_of_three_squares', 'sum_of_four_squares']
def diophantine(eq, param=symbols("t", integer=True)):
"""
Simplify the solution procedure of diophantine equation ``eq`` by
converting it into a product of terms which should equal zero.
For example, when solving, `x^2 - y^2 = 0` this is treated as
`(x + y)(x - y) = 0` and `x+y = 0` and `x-y = 0` are solved independently
and combined. Each term is solved by calling ``diop_solve()``.
Output of ``diophantine()`` is a set of tuples. Each tuple represents a
solution of the input equation. In a tuple, solution for each variable is
listed according to the alphabetic order of input variables. i.e. if we have
an equation with two variables `a` and `b`, first element of the tuple will
give the solution for `a` and the second element will give the solution for
`b`.
Usage
=====
``diophantine(eq, t)``: Solve the diophantine equation ``eq``.
``t`` is the parameter to be used by ``diop_solve()``.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diophantine
>>> from sympy.abc import x, y, z
>>> diophantine(x**2 - y**2)
set([(-t, -t), (t, -t)])
#>>> diophantine(x*(2*x + 3*y - z))
#set([(0, n1, n2), (3*t - z, -2*t + z, z)])
#>>> diophantine(x**2 + 3*x*y + 4*x)
#set([(0, n1), (3*t - 4, -t)])
See Also
========
diop_solve()
"""
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs
eq = Poly(eq).as_expr()
if not eq.is_polynomial() or eq.is_number:
raise TypeError("Equation input format not supported")
var = list(eq.expand(force=True).free_symbols)
var.sort(key=default_sort_key)
terms = factor_list(eq)[1]
sols = set([])
for term in terms:
base = term[0]
var_t, jnk, eq_type = classify_diop(base)
solution = diop_solve(base, param)
if eq_type in ["linear", "homogeneous_ternary_quadratic", "general_pythagorean"]:
if merge_solution(var, var_t, solution) != ():
sols.add(merge_solution(var, var_t, solution))
elif eq_type in ["binary_quadratic", "general_sum_of_squares", "univariate"]:
for sol in solution:
if merge_solution(var, var_t, sol) != ():
sols.add(merge_solution(var, var_t, sol))
return sols
def merge_solution(var, var_t, solution):
"""
This is used to construct the full solution from the solutions of sub
equations.
For example when solving the equation `(x - y)(x^2 + y^2 - z^2) = 0`,
solutions for each of the equations `x-y = 0` and `x^2 + y^2 - z^2` are
found independently. Solutions for `x - y = 0` are `(x, y) = (t, t)`. But
we should introduce a value for z when we output the solution for the
original equation. This function converts `(t, t)` into `(t, t, n_{1})`
where `n_{1}` is an integer parameter.
"""
l = []
if None in solution:
return ()
solution = iter(solution)
params = numbered_symbols("n", Integer=True, start=1)
for v in var:
if v in var_t:
l.append(next(solution))
else:
l.append(next(params))
for val, symb in zip(l, var):
if check_assumptions(val, **symb.assumptions0) is False:
return tuple()
return tuple(l)
def diop_solve(eq, param=symbols("t", integer=True)):
"""
Solves the diophantine equation ``eq``.
Similar to ``diophantine()`` but doesn't try to factor ``eq`` as latter
does. Uses ``classify_diop()`` to determine the type of the eqaution and
calls the appropriate solver function.
Usage
=====
``diop_solve(eq, t)``: Solve diophantine equation, ``eq`` using ``t``
as a parameter if needed.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is a parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diop_solve
>>> from sympy.abc import x, y, z, w
>>> diop_solve(2*x + 3*y - 5)
(3*t - 5, -2*t + 5)
>>> diop_solve(4*x + 3*y -4*z + 5)
(3*t + 4*z - 5, -4*t - 4*z + 5, z)
>>> diop_solve(x + 3*y - 4*z + w -6)
(t, -t - 3*y + 4*z + 6, y, z)
>>> diop_solve(x**2 + y**2 - 5)
set([(-2, -1), (-2, 1), (2, -1), (2, 1)])
See Also
========
diophantine()
"""
var, coeff, eq_type = classify_diop(eq)
if eq_type == "linear":
return _diop_linear(var, coeff, param)
elif eq_type == "binary_quadratic":
return _diop_quadratic(var, coeff, param)
elif eq_type == "homogeneous_ternary_quadratic":
x_0, y_0, z_0 = _diop_ternary_quadratic(var, coeff)
return _parametrize_ternary_quadratic((x_0, y_0, z_0), var, coeff)
elif eq_type == "general_pythagorean":
return _diop_general_pythagorean(var, coeff, param)
elif eq_type == "univariate":
l = solve(eq)
s = set([])
for soln in l:
if isinstance(soln, Integer):
s.add((soln,))
return s
elif eq_type == "general_sum_of_squares":
return _diop_general_sum_of_squares(var, coeff)
def classify_diop(eq):
"""
Helper routine used by diop_solve() to find the type of the ``eq`` etc.
Returns a tuple containing the type of the diophantine equation along with
the variables(free symbols) and their coefficients. Variables are returned
as a list and coefficients are returned as a dict with the key being the
respective term and the constant term is keyed to Integer(1). Type is an
element in the set {"linear", "binary_quadratic", "general_pythagorean",
"homogeneous_ternary_quadratic", "univariate", "general_sum_of_squares"}
Usage
=====
``classify_diop(eq)``: Return variables, coefficients and type of the
``eq``.
Details
=======
``eq`` should be an expression which is assumed to be zero.
Examples
========
>>> from sympy.solvers.diophantine import classify_diop
>>> from sympy.abc import x, y, z, w, t
>>> classify_diop(4*x + 6*y - 4)
([x, y], {1: -4, x: 4, y: 6}, 'linear')
>>> classify_diop(x + 3*y -4*z + 5)
([x, y, z], {1: 5, x: 1, y: 3, z: -4}, 'linear')
>>> classify_diop(x**2 + y**2 - x*y + x + 5)
([x, y], {1: 5, x: 1, x**2: 1, y: 0, y**2: 1, x*y: -1}, 'binary_quadratic')
"""
eq = eq.expand(force=True)
var = list(eq.free_symbols)
var.sort(key=default_sort_key)
coeff = {}
diop_type = None
coeff = dict([reversed(t.as_independent(*var)) for t in eq.args])
for v in coeff:
if not isinstance(coeff[v], Integer):
raise TypeError("Coefficients should be Integers")
if len(var) == 1:
diop_type = "univariate"
elif Poly(eq).total_degree() == 1:
diop_type = "linear"
elif Poly(eq).total_degree() == 2 and len(var) == 2:
diop_type = "binary_quadratic"
x, y = var[:2]
if isinstance(eq, Mul):
coeff = {x**2: 0, x*y: eq.args[0], y**2: 0, x: 0, y: 0, Integer(1): 0}
else:
for term in [x**2, y**2, x*y, x, y, Integer(1)]:
if term not in coeff.keys():
coeff[term] = Integer(0)
elif Poly(eq).total_degree() == 2 and len(var) == 3 and Integer(1) not in coeff.keys():
for v in var:
if v in coeff.keys():
diop_type = "inhomogeneous_ternary_quadratic"
break
else:
diop_type = "homogeneous_ternary_quadratic"
x, y, z = var[:3]
for term in [x**2, y**2, z**2, x*y, y*z, x*z]:
if term not in coeff.keys():
coeff[term] = Integer(0)
elif Poly(eq).degree() == 2 and len(var) >= 3:
for v in var:
if v in coeff.keys():
diop_type = "inhomogeneous_general_quadratic"
break
else:
if Integer(1) in coeff.keys():
constant_term = True
else:
constant_term = False
non_square_degree_2_terms = False
for v in var:
for u in var:
if u != v and u*v in coeff.keys():
non_square_degree_2_terms = True
break
if non_square_degree_2_terms:
break
if constant_term and non_square_degree_2_terms:
diop_type = "inhomogeneous_general_quadratic"
elif constant_term and not non_square_degree_2_terms:
for v in var:
if coeff[v**2] != 1:
break
else:
diop_type = "general_sum_of_squares"
elif not constant_term and non_square_degree_2_terms:
diop_type = "homogeneous_general_quadratic"
else:
coeff_sign_sum = 0
for v in var:
if not isinstance(sqrt(abs(Integer(coeff[v**2]))), Integer):
break
coeff_sign_sum = coeff_sign_sum + sign(coeff[v**2])
else:
if abs(coeff_sign_sum) == len(var) - 2 and not constant_term:
diop_type = "general_pythagorean"
elif Poly(eq).total_degree() == 3 and len(var) == 2:
x, y = var[:2]
diop_type = "cubic_thue"
for term in [x**3, x**2*y, x*y**2, y**3, Integer(1)]:
if term not in coeff.keys():
coeff[term] == Integer(0)
if diop_type is not None:
return var, coeff, diop_type
else:
raise NotImplementedError("Still not implemented")
def diop_linear(eq, param=symbols("t", integer=True)):
"""
Solves linear diophantine equations.
A linear diophantine equation is an equation of the form `a_{1}x_{1} +
a_{2}x_{2} + .. + a_{n}x_{n} = 0` where `a_{1}, a_{2}, ..a_{n}` are
integer constants and `x_{1}, x_{2}, ..x_{n}` are integer variables.
Usage
=====
``diop_linear(eq)``: Returns a tuple containing solutions to the
diophantine equation ``eq``. Values in the tuple is arranged in the same
order as the sorted variables.
Details
=======
``eq`` is a linear diophantine equation which is assumed to be zero.
``param`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import diop_linear
>>> from sympy.abc import x, y, z, t
>>> from sympy import Integer
>>> diop_linear(2*x - 3*y - 5) #solves equation 2*x - 3*y -5 = 0
(-3*t - 5, -2*t - 5)
Here x = -3*t - 5 and y = -2*t - 5
>>> diop_linear(2*x - 3*y - 4*z -3)
(-3*t - 4*z - 3, -2*t - 4*z - 3, z)
See Also
========
diop_quadratic(), diop_ternary_quadratic(), diop_general_pythagorean(),
diop_general_sum_of_squares()
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "linear":
return _diop_linear(var, coeff, param)
def _diop_linear(var, coeff, param):
x, y = var[:2]
a = coeff[x]
b = coeff[y]
if len(var) == len(coeff):
c = 0
else:
c = -coeff[Integer(1)]
if len(var) == 2:
sol_x, sol_y = base_solution_linear(c, a, b, param)
return (sol_x, sol_y)
elif len(var) > 2:
X = []
Y = []
for v in var[2:]:
sol_x, sol_y = base_solution_linear(-coeff[v], a, b)
X.append(sol_x*v)
Y.append(sol_y*v)
sol_x, sol_y = base_solution_linear(c, a, b, param)
X.append(sol_x)
Y.append(sol_y)
l = []
if None not in X and None not in Y:
l.append(Add(*X))
l.append(Add(*Y))
for v in var[2:]:
l.append(v)
else:
for v in var:
l.append(None)
return tuple(l)
def base_solution_linear(c, a, b, t=None):
"""
Return the base solution for a linear diophantine equation with two
variables.
Used by ``diop_linear()`` to find the base solution of a linear
Diophantine equation. If ``t`` is given then the parametrized solution is
returned.
Usage
=====
``base_solution_linear(c, a, b, t)``: ``a``, ``b``, ``c`` are coefficients
in `ax + by = c` and ``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.solvers.diophantine import base_solution_linear
>>> from sympy.abc import t
>>> base_solution_linear(5, 2, 3) # equation 2*x + 3*y = 5
(-5, 5)
>>> base_solution_linear(0, 5, 7) # equation 5*x + 7*y = 0
(0, 0)
>>> base_solution_linear(5, 2, 3, t) # equation 2*x + 3*y = 5
(3*t - 5, -2*t + 5)
>>> base_solution_linear(0, 5, 7, t) # equation 5*x + 7*y = 0
(7*t, -5*t)
"""
d = igcd(a, igcd(b, c))
a = a // d
b = b // d
c = c // d
if c == 0:
if t != None:
return (b*t , -a*t)
else:
return (S.Zero, S.Zero)
else:
x0, y0, d = extended_euclid(int(abs(a)), int(abs(b)))
x0 = x0 * sign(a)
y0 = y0 * sign(b)
if divisible(c, d):
if t != None:
return (c*x0 + b*t, c*y0 - a*t)
else:
return (Integer(c*x0), Integer(c*y0))
else:
return (None, None)
def extended_euclid(a, b):
"""
For given ``a``, ``b`` returns a tuple containing integers `x`, `y` and `d`
such that `ax + by = d`. Here `d = gcd(a, b)`.
Usage
=====
``extended_euclid(a, b)``: returns `x`, `y` and `\gcd(a, b)`.
Details
=======
``a`` Any instance of Integer.
``b`` Any instance of Integer.
Examples
========
>>> from sympy.solvers.diophantine import extended_euclid
>>> extended_euclid(4, 6)
(-1, 1, 2)
>>> extended_euclid(3, 5)
(2, -1, 1)
"""
if b == 0:
return (1, 0, a)
x0, y0, d = extended_euclid(b, a%b)
x, y = y0, x0 - (a//b) * y0
return x, y, d
def divisible(a, b):
"""
Returns `True` if ``a`` is divisible by ``b`` and `False` otherwise.
"""
return igcd(int(a), int(b)) == abs(int(b))
def diop_quadratic(eq, param=symbols("t", integer=True)):
"""
Solves quadratic diophantine equations.
i.e. equations of the form `Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0`. Returns a
set containing the tuples `(x, y)` which contains the solutions. If there
are no solutions then `(None, None)` is returned.
Usage
=====
``diop_quadratic(eq, param)``: ``eq`` is a quadratic binary diophantine
equation. ``param`` is used to indicate the parameter to be used in the
solution.
Details
=======
``eq`` should be an expression which is assumed to be zero.
``param`` is a parameter to be used in the solution.
Examples
========
>>> from sympy.abc import x, y, t
>>> from sympy.solvers.diophantine import diop_quadratic
>>> diop_quadratic(x**2 + y**2 + 2*x + 2*y + 2, t)
set([(-1, -1)])
References
==========
.. [1] Methods to solve Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0,[online],
Available: http://www.alpertron.com.ar/METHODS.HTM
.. [2] Solving the equation ax^2+ bxy + cy^2 + dx + ey + f= 0, [online],
Available: http://www.jpr2718.org/ax2p.pdf
See Also
========
diop_linear(), diop_ternary_quadratic(), diop_general_sum_of_squares(),
diop_general_pythagorean()
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "binary_quadratic":
return _diop_quadratic(var, coeff, param)
def _diop_quadratic(var, coeff, t):
x, y = var[:2]
for term in [x**2, y**2, x*y, x, y, Integer(1)]:
if term not in coeff.keys():
coeff[term] = Integer(0)
A = coeff[x**2]
B = coeff[x*y]
C = coeff[y**2]
D = coeff[x]
E = coeff[y]
F = coeff[Integer(1)]
d = igcd(A, igcd(B, igcd(C, igcd(D, igcd(E, F)))))
A = A // d
B = B // d
C = C // d
D = D // d
E = E // d
F = F // d
# (1) Linear case: A = B = C = 0 ==> considered under linear diophantine equations
# (2) Simple-Hyperbolic case:A = C = 0, B != 0
# In this case equation can be converted to (Bx + E)(By + D) = DE - BF
# We consider two cases; DE - BF = 0 and DE - BF != 0
# More details, http://www.alpertron.com.ar/METHODS.HTM#SHyperb
l = set([])
if A == 0 and C == 0 and B != 0:
if D*E - B*F == 0:
if divisible(int(E), int(B)):
l.add((-E/B, t))
if divisible(int(D), int(B)):
l.add((t, -D/B))
else:
div = divisors(D*E - B*F)
div = div + [-term for term in div]
for d in div:
if divisible(int(d - E), int(B)):
x0 = (d - E) // B
if divisible(int(D*E - B*F), int(d)):
if divisible(int((D*E - B*F)// d - D), int(B)):
y0 = ((D*E - B*F) // d - D) // B
l.add((x0, y0))
# (3) Parabolic case: B**2 - 4*A*C = 0
# There are two subcases to be considered in this case.
# sqrt(c)D - sqrt(a)E = 0 and sqrt(c)D - sqrt(a)E != 0
# More Details, http://www.alpertron.com.ar/METHODS.HTM#Parabol
elif B**2 - 4*A*C == 0:
if A == 0:
s = _diop_quadratic([y, x], coeff, t)
for soln in s:
l.add((soln[1], soln[0]))
else:
g = igcd(A, C)
g = abs(g) * sign(A)
a = A // g
b = B // g
c = C // g
e = sign(B/A)
if e*sqrt(c)*D - sqrt(a)*E == 0:
z = symbols("z", real=True)
roots = solve(sqrt(a)*g*z**2 + D*z + sqrt(a)*F)
for root in roots:
if isinstance(root, Integer):
l.add((diop_solve(sqrt(a)*x + e*sqrt(c)*y - root)[0], diop_solve(sqrt(a)*x + e*sqrt(c)*y - root)[1]))
elif isinstance(e*sqrt(c)*D - sqrt(a)*E, Integer):
solve_x = lambda u: e*sqrt(c)*g*(sqrt(a)*E - e*sqrt(c)*D)*t**2 - (E + 2*e*sqrt(c)*g*u)*t\
- (e*sqrt(c)*g*u**2 + E*u + e*sqrt(c)*F) // (e*sqrt(c)*D - sqrt(a)*E)
solve_y = lambda u: sqrt(a)*g*(e*sqrt(c)*D - sqrt(a)*E)*t**2 + (D + 2*sqrt(a)*g*u)*t \
+ (sqrt(a)*g*u**2 + D*u + sqrt(a)*F) // (e*sqrt(c)*D - sqrt(a)*E)
for z0 in xrange(0, abs(e*sqrt(c)*D - sqrt(a)*E)):
if divisible(sqrt(a)*g*z0**2 + D*z0 + sqrt(a)*F, e*sqrt(c)*D - sqrt(a)*E):
l.add((solve_x(z0), solve_y(z0)))
# (4) Method used when B**2 - 4*A*C is a square, is descibed in p. 6 of the below paper
# by John P. Robertson.
# http://www.jpr2718.org/ax2p.pdf
elif isinstance(sqrt(B**2 - 4*A*C), Integer):
if A != 0:
r = sqrt(B**2 - 4*A*C)
u, v = symbols("u, v", integer=True)
eq = _mexpand(4*A*r*u*v + 4*A*D*(B*v + r*u + r*v - B*u) + 2*A*4*A*E*(u - v) + 4*A*r*4*A*F)
sol = diop_solve(eq, t)
sol = list(sol)
for solution in sol:
s0 = solution[0]
t0 = solution[1]
x_0 = S(B*t0 + r*s0 + r*t0 - B*s0)/(4*A*r)
y_0 = S(s0 - t0)/(2*r)
if isinstance(s0, Symbol) or isinstance(t0, Symbol):
if check_param(x_0, y_0, 4*A*r, t) != (None, None):
l.add((check_param(x_0, y_0, 4*A*r, t)[0], check_param(x_0, y_0, 4*A*r, t)[1]))
elif divisible(B*t0 + r*s0 + r*t0 - B*s0, 4*A*r):
if divisible(s0 - t0, 2*r):
if is_solution_quad(var, coeff, x_0, y_0):
l.add((x_0, y_0))
else:
_var = var
_var[0], _var[1] = _var[1], _var[0] # Interchange x and y
s = _diop_quadratic(_var, coeff, t)
while len(s) > 0:
sol = s.pop()
l.add((sol[1], sol[0]))
# (5) B**2 - 4*A*C > 0 and B**2 - 4*A*C not a square or B**2 - 4*A*C < 0
else:
P, Q = _transformation_to_DN(var, coeff)
D, N = _find_DN(var, coeff)
solns_pell = diop_DN(D, N)
if D < 0:
for solution in solns_pell:
for X_i in [-solution[0], solution[0]]:
for Y_i in [-solution[1], solution[1]]:
x_i, y_i = (P*Matrix([X_i, Y_i]) + Q)[0], (P*Matrix([X_i, Y_i]) + Q)[1]
if isinstance(x_i, Integer) and isinstance(y_i, Integer):
l.add((x_i, y_i))
else:
# In this case equation can be transformed into a Pell equation
#n = symbols("n", integer=True)
a = diop_DN(D, 1)
T = a[0][0]
U = a[0][1]
if (isinstance(P[0], Integer) and isinstance(P[1], Integer) and isinstance(P[2], Integer)
and isinstance(P[3], Integer) and isinstance(Q[0], Integer) and isinstance(Q[1], Integer)):
for sol in solns_pell:
r = sol[0]
s = sol[1]
x_n = S((r + s*sqrt(D))*(T + U*sqrt(D))**t + (r - s*sqrt(D))*(T - U*sqrt(D))**t)/2
y_n = S((r + s*sqrt(D))*(T + U*sqrt(D))**t - (r - s*sqrt(D))*(T - U*sqrt(D))**t)/(2*sqrt(D))
x_n = _mexpand(x_n)
y_n = _mexpand(y_n)
x_n, y_n = (P*Matrix([x_n, y_n]) + Q)[0], (P*Matrix([x_n, y_n]) + Q)[1]
l.add((x_n, y_n))
else:
L = ilcm(S(P[0]).q, ilcm(S(P[1]).q, ilcm(S(P[2]).q, ilcm(S(P[3]).q, ilcm(S(Q[0]).q, S(Q[1]).q)))))
k = 0
done = False
T_k = T
U_k = U
while not done:
k = k + 1
if (T_k - 1) % L == 0 and U_k % L == 0:
done = True
T_k, U_k = T_k*T + D*U_k*U, T_k*U + U_k*T
for soln in solns_pell:
X = soln[0]
Y = soln[1]
done = False
for i in xrange(k):
X_1 = X*T + D*U*Y
Y_1 = X*U + Y*T
x = (P*Matrix([X_1, Y_1]) + Q)[0]
y = (P*Matrix([X_1, Y_1]) + Q)[1]
if is_solution_quad(var, coeff, x, y):
done = True
x_n = S( (X_1 + sqrt(D)*Y_1)*(T + sqrt(D)*U)**(t*L) + (X_1 - sqrt(D)*Y_1)*(T - sqrt(D)*U)**(t*L) )/ 2
y_n = S( (X_1 + sqrt(D)*Y_1)*(T + sqrt(D)*U)**(t*L) - (X_1 - sqrt(D)*Y_1)*(T - sqrt(D)*U)**(t*L) )/ (2*sqrt(D))
x_n = _mexpand(x_n)
y_n = _mexpand(y_n)
x_n, y_n = (P*Matrix([x_n, y_n]) + Q)[0], (P*Matrix([x_n, y_n]) + Q)[1]
l.add((x_n, y_n))
if done:
break
return l
def is_solution_quad(var, coeff, u, v):
"""
Check whether `(u, v)` is solution to the quadratic binary diophantine
equation with the variable list ``var`` and coefficient dictionary
``coeff``.
Not intended for use by normal users.
"""
x, y = var[:2]
eq = x**2*coeff[x**2] + x*y*coeff[x*y] + y**2*coeff[y**2] + x*coeff[x] + y*coeff[y] + coeff[Integer(1)]
return _mexpand(Subs(eq, (x, y), (u, v)).doit()) == 0
def diop_DN(D, N, t=symbols("t", integer=True)):
"""
Solves the equation `x^2 - Dy^2 = N`.
Mainly concerned in the case `D > 0, D` is not a perfect square, which is
the same as generalized Pell equation. To solve the generalized Pell
equation this function Uses LMM algorithm. Refer [1]_ for more details on
the algorithm.
Returns one solution for each class of the solutions. Other solutions of
the class can be constructed according to the values of ``D`` and ``N``.
Returns a list containing the solution tuples `(x, y)`.
Usage
=====
``diop_DN(D, N, t)``: D and N are integers as in `x^2 - Dy^2 = N` and
``t`` is the parameter to be used in the solutions.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
``t`` is the parameter to be used in the solutions.
Examples
========
>>> from sympy.solvers.diophantine import diop_DN
>>> diop_DN(13, -4) # Solves equation x**2 - 13*y**2 = -4
[(3, 1), (393, 109), (36, 10)]
The output can be interpreted as follows: There are three fundamental
solutions to the equation `x^2 - 13y^2 = -4` given by (3, 1), (393, 109)
and (36, 10). Each tuple is in the form (x, y), i. e solution (3, 1) means
that `x = 3` and `y = 1`.
>>> diop_DN(986, 1) # Solves equation x**2 - 986*y**2 = 1
[(49299, 1570)]
See Also
========
find_DN(), diop_bf_DN()
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Pages 16 - 17. [online], Available:
http://www.jpr2718.org/pell.pdf
"""
if D < 0:
if N == 0:
return [(S.Zero, S.Zero)]
elif N < 0:
return []
elif N > 0:
d = divisors(square_factor(N))
sol = []
for divisor in d:
sols = cornacchia(1, -D, N // divisor**2)
if sols:
for x, y in sols:
sol.append((divisor*x, divisor*y))
return sol
elif D == 0:
if N < 0 or not isinstance(sqrt(N), Integer):
return []
if N == 0:
return [(S.Zero, t)]
if isinstance(sqrt(N), Integer):
return [(sqrt(N), t)]
else: # D > 0
if isinstance(sqrt(D), Integer):
r = sqrt(D)
if N == 0:
return [(r*t, t)]
else:
sol = []
for y in xrange(floor(sign(N)*(N - 1)/(2*r)) + 1):
if isinstance(sqrt(D*y**2 + N), Integer):
sol.append((sqrt(D*y**2 + N), y))
return sol
else:
if N == 0:
return [(S.Zero, S.Zero)]
elif abs(N) == 1:
pqa = PQa(0, 1, D)
a_0 = floor(sqrt(D))
l = 0
G = []
B = []
for i in pqa:
a = i[2]
G.append(i[5])
B.append(i[4])
if l != 0 and a == 2*a_0:
break
l = l + 1
if l % 2 == 1:
if N == -1:
x = G[l-1]
y = B[l-1]
else:
count = l
while count < 2*l - 1:
i = next(pqa)
G.append(i[5])
B.append(i[4])
count = count + 1
x = G[count]
y = B[count]
else:
if N == 1:
x = G[l-1]
y = B[l-1]
else:
return []
return [(x, y)]
else:
fs = []
sol = []
div = divisors(N)
for d in div:
if divisible(N, d**2):
fs.append(d)
for f in fs:
m = N // f**2
zs = sqrt_mod(D, abs(m), True)
zs = [i for i in zs if i <= abs(m) // 2 ]
if abs(m) != 2:
zs = zs + [-i for i in zs]
if S.Zero in zs:
zs.remove(S.Zero) # Remove duplicate zero
for z in zs:
pqa = PQa(z, abs(m), D)
l = 0
G = []
B = []
for i in pqa:
a = i[2]
G.append(i[5])
B.append(i[4])
if l != 0 and abs(i[1]) == 1:
r = G[l-1]
s = B[l-1]
if r**2 - D*s**2 == m:
sol.append((f*r, f*s))
elif diop_DN(D, -1) != []:
a = diop_DN(D, -1)
sol.append((f*(r*a[0][0] + a[0][1]*s*D), f*(r*a[0][1] + s*a[0][0])))
break
l = l + 1
if l == length(z, abs(m), D):
break
return sol
def cornacchia(a, b, m):
"""
Solves `ax^2 + by^2 = m` where `\gcd(a, b) = 1 = gcd(a, m)` and `a, b > 0`.
Uses the algorithm due to Cornacchia. The method only finds primitive
solutions, i.e. ones with `\gcd(x, y) = 1`. So this method can't be used to
find the solutions of `x^2 + y^2 = 20` since the only solution to former is
`(x,y) = (4, 2)` and it is not primitive. When ` a = b = 1`, only the
solutions with `x \geq y` are found. For more details, see the References.
Examples
========
>>> from sympy.solvers.diophantine import cornacchia
>>> cornacchia(2, 3, 35) # equation 2x**2 + 3y**2 = 35
set([(2, 3), (4, 1)])
>>> cornacchia(1, 1, 25) # equation x**2 + y**2 = 25
set([(4, 3)])
References
===========
.. [1] A. Nitaj, "L'algorithme de Cornacchia"
.. [2] Solving the diophantine equation ax**2 + by**2 = m by Cornacchia's
method, [online], Available:
http://www.numbertheory.org/php/cornacchia.html
"""
sols = set([])
a1 = igcdex(a, m)[0]
v = sqrt_mod(-b*a1, m, True)
if v is None:
return None
if not isinstance(v, list):
v = [v]
for t in v:
if t < m // 2:
continue
u, r = t, m
while True:
u, r = r, u % r
if a*r**2 < m:
break
m1 = m - a*r**2
if m1 % b == 0:
m1 = m1 // b
if isinstance(sqrt(m1), Integer):
s = sqrt(m1)
sols.add((int(r), int(s)))
return sols
def PQa(P_0, Q_0, D):
"""
Returns useful information needed to solve the Pell equation.
There are six sequences of integers defined related to the continued
fraction representation of `\\frac{P + \sqrt{D}}{Q}`, namely {`P_{i}`},
{`Q_{i}`}, {`a_{i}`},{`A_{i}`}, {`B_{i}`}, {`G_{i}`}. ``PQa()`` Returns
these values as a 6-tuple in the same order as mentioned above. Refer [1]_
for more detailed information.
Usage
=====
``PQa(P_0, Q_0, D)``: ``P_0``, ``Q_0`` and ``D`` are integers corresponding
to `P_{0}`, `Q_{0}` and `D` in the continued fraction
`\\frac{P_{0} + \sqrt{D}}{Q_{0}}`.
Also it's assumed that `P_{0}^2 == D mod(|Q_{0}|)` and `D` is square free.
Examples
========
>>> from sympy.solvers.diophantine import PQa
>>> pqa = PQa(13, 4, 5) # (13 + sqrt(5))/4
>>> next(pqa) # (P_0, Q_0, a_0, A_0, B_0, G_0)
(13, 4, 3, 3, 1, -1)
>>> next(pqa) # (P_1, Q_1, a_1, A_1, B_1, G_1)
(-1, 1, 1, 4, 1, 3)
References
==========
.. [1] Solving the generalized Pell equation x^2 - Dy^2 = N, John P.
Robertson, July 31, 2004, Pages 4 - 8. http://www.jpr2718.org/pell.pdf
"""
A_i_2 = B_i_1 = 0
A_i_1 = B_i_2 = 1
G_i_2 = -P_0
G_i_1 = Q_0
P_i = P_0
Q_i = Q_0
while(1):
a_i = floor((P_i + sqrt(D))/Q_i)
A_i = a_i*A_i_1 + A_i_2
B_i = a_i*B_i_1 + B_i_2
G_i = a_i*G_i_1 + G_i_2
yield P_i, Q_i, a_i, A_i, B_i, G_i
A_i_1, A_i_2 = A_i, A_i_1
B_i_1, B_i_2 = B_i, B_i_1
G_i_1, G_i_2 = G_i, G_i_1
P_i = a_i*Q_i - P_i
Q_i = (D - P_i**2)/Q_i
def diop_bf_DN(D, N, t=symbols("t", integer=True)):
"""
Uses brute force to solve the equation, `x^2 - Dy^2 = N`.
Mainly concerned with the generalized Pell equation which is the case when
`D > 0, D` is not a perfect square. For more information on the case refer
[1]_. Let `(t, u)` be the minimal positive solution of the equation
`x^2 - Dy^2 = 1`. Then this method requires
`\sqrt{\\frac{\mid N \mid (t \pm 1)}{2D}}` to be small.
Usage
=====
``diop_bf_DN(D, N, t)``: ``D`` and ``N`` are coefficients in
`x^2 - Dy^2 = N` and ``t`` is the parameter to be used in the solutions.
Details
=======
``D`` and ``N`` correspond to D and N in the equation.
``t`` is the parameter to be used in the solutions.
Examples
========
>>> from sympy.solvers.diophantine import diop_bf_DN
>>> diop_bf_DN(13, -4)
[(3, 1), (-3, 1), (36, 10)]
>>> diop_bf_DN(986, 1)
[(49299, 1570)]
See Also
========
diop_DN()
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Page 15. http://www.jpr2718.org/pell.pdf
"""
sol = []
a = diop_DN(D, 1)
u = a[0][0]
v = a[0][1]
if abs(N) == 1:
return diop_DN(D, N)
elif N > 1:
L1 = 0
L2 = floor(sqrt(S(N*(u - 1))/(2*D))) + 1
elif N < -1:
L1 = ceiling(sqrt(S(-N)/D))
L2 = floor(sqrt(S(-N*(u + 1))/(2*D))) + 1
else:
if D < 0:
return [(S.Zero, S.Zero)]
elif D == 0:
return [(S.Zero, t)]
else:
if isinstance(sqrt(D), Integer):
return [(sqrt(D)*t, t), (-sqrt(D)*t, t)]
else:
return [(S.Zero, S.Zero)]
for y in xrange(L1, L2):
if isinstance(sqrt(N + D*y**2), Integer):
x = sqrt(N + D*y**2)
sol.append((x, y))
if not equivalent(x, y, -x, y, D, N):
sol.append((-x, y))
return sol
def equivalent(u, v, r, s, D, N):
"""
Returns True if two solutions `(u, v)` and `(r, s)` of `x^2 - Dy^2 = N`
belongs to the same equivalence class and False otherwise.
Two solutions `(u, v)` and `(r, s)` to the above equation fall to the same
equivalence class iff both `(ur - Dvs)` and `(us - vr)` are divisible by
`N`. See reference [1]_. No test is performed to test whether `(u, v)` and
`(r, s)` are actually solutions to the equation. User should take care of
this.
Usage
=====
``equivalent(u, v, r, s, D, N)``: `(u, v)` and `(r, s)` are two solutions
of the equation `x^2 - Dy^2 = N` and all parameters involved are integers.
Examples
========
>>> from sympy.solvers.diophantine import equivalent
>>> equivalent(18, 5, -18, -5, 13, -1)
True
>>> equivalent(3, 1, -18, 393, 109, -4)
False
References
==========
.. [1] Solving the generalized Pell equation x**2 - D*y**2 = N, John P.
Robertson, July 31, 2004, Page 12. http://www.jpr2718.org/pell.pdf
"""
return divisible(u*r - D*v*s, N) and divisible(u*s - v*r, N)
def length(P, Q, D):
"""
Returns the (length of aperiodic part + length of periodic part) of
continued fraction representation of `\\frac{P + \sqrt{D}}{Q}`.
It is important to remember that this does NOT return the length of the
periodic part but the addition of the legths of the two parts as mentioned
above.
Usage
=====
``length(P, Q, D)``: ``P``, ``Q`` and ``D`` are integers corresponding to
the continued fraction `\\frac{P + \sqrt{D}}{Q}`.
Details
=======
``P``, ``D`` and ``Q`` corresponds to P, D and Q in the continued fraction,
`\\frac{P + \sqrt{D}}{Q}`.
Examples
========
>>> from sympy.solvers.diophantine import length
>>> length(-2 , 4, 5) # (-2 + sqrt(5))/4
3
>>> length(-5, 4, 17) # (-5 + sqrt(17))/4
4
"""
x = P + sqrt(D)
y = Q
x = sympify(x)
v, res = [], []
q = x/y
if q < 0:
v.append(q)
res.append(floor(q))
q = q - floor(q)
num, den = rad_rationalize(1, q)
q = num / den
while 1:
v.append(q)
a = int(q)
res.append(a)
if q == a:
return len(res)
num, den = rad_rationalize(1,(q - a))
q = num / den
if q in v:
return len(res)
def transformation_to_DN(eq):
"""
This function transforms general quadratic,
`ax^2 + bxy + cy^2 + dx + ey + f = 0`
to more easy to deal with `X^2 - DY^2 = N` form.
This is used to solve the general quadratic equation by transforming it to
the latter form. Refer [1]_ for more detailed information on the
transformation. This function returns a tuple (A, B) where A is a 2 X 2
matrix and B is a 2 X 1 matrix such that,
Transpose([x y]) = A * Transpose([X Y]) + B
Usage
=====
``transformation_to_DN(eq)``: where ``eq`` is the quadratic to be
transformed.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.diophantine import transformation_to_DN
>>> from sympy.solvers.diophantine import classify_diop
>>> A, B = transformation_to_DN(x**2 - 3*x*y - y**2 - 2*y + 1)
>>> A
Matrix([
[1/26, 3/26],
[ 0, 1/13]])
>>> B
Matrix([
[-6/13],
[-4/13]])
A, B returned are such that Transpose((x y)) = A * Transpose((X Y)) + B.
Substituting these values for `x` and `y` and a bit of simplifying work
will give an equation of the form `x^2 - Dy^2 = N`.
>>> from sympy.abc import X, Y
>>> from sympy import Matrix, simplify, Subs
>>> u = (A*Matrix([X, Y]) + B)[0] # Transformation for x
>>> u
X/26 + 3*Y/26 - 6/13
>>> v = (A*Matrix([X, Y]) + B)[1] # Transformation for y
>>> v
Y/13 - 4/13
Next we will substitute these formulas for `x` and `y` and do
``simplify()``.
>>> eq = simplify(Subs(x**2 - 3*x*y - y**2 - 2*y + 1, (x, y), (u, v)).doit())
>>> eq
X**2/676 - Y**2/52 + 17/13
By multiplying the denominator appropriately, we can get a Pell equation
in the standard form.
>>> eq * 676
X**2 - 13*Y**2 + 884
If only the final equation is needed, ``find_DN()`` can be used.
See Also
========
find_DN()
References
==========
.. [1] Solving the equation ax^2 + bxy + cy^2 + dx + ey + f = 0,
John P.Robertson, May 8, 2003, Page 7 - 11.
http://www.jpr2718.org/ax2p.pdf
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "binary_quadratic":
return _transformation_to_DN(var, coeff)
def _transformation_to_DN(var, coeff):
x, y = var[:2]
a = coeff[x**2]
b = coeff[x*y]
c = coeff[y**2]
d = coeff[x]
e = coeff[y]
f = coeff[Integer(1)]
g = igcd(a, igcd(b, igcd(c, igcd(d, igcd(e, f)))))
a = a // g
b = b // g
c = c // g
d = d // g
e = e // g
f = f // g
X, Y = symbols("X, Y", integer=True)
if b != Integer(0):
B = (S(2*a)/b).p
C = (S(2*a)/b).q
A = (S(a)/B**2).p
T = (S(a)/B**2).q
# eq_1 = A*B*X**2 + B*(c*T - A*C**2)*Y**2 + d*T*X + (B*e*T - d*T*C)*Y + f*T*B
coeff = {X**2: A*B, X*Y: 0, Y**2: B*(c*T - A*C**2), X: d*T, Y: B*e*T - d*T*C, Integer(1): f*T*B}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [S(1)/B, -S(C)/B, 0, 1])*A_0, Matrix(2, 2, [S(1)/B, -S(C)/B, 0, 1])*B_0
else:
if d != Integer(0):
B = (S(2*a)/d).p
C = (S(2*a)/d).q
A = (S(a)/B**2).p
T = (S(a)/B**2).q
# eq_2 = A*X**2 + c*T*Y**2 + e*T*Y + f*T - A*C**2
coeff = {X**2: A, X*Y: 0, Y**2: c*T, X: 0, Y: e*T, Integer(1): f*T - A*C**2}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [S(1)/B, 0, 0, 1])*A_0, Matrix(2, 2, [S(1)/B, 0, 0, 1])*B_0 + Matrix([-S(C)/B, 0])
else:
if e != Integer(0):
B = (S(2*c)/e).p
C = (S(2*c)/e).q
A = (S(c)/B**2).p
T = (S(c)/B**2).q
# eq_3 = a*T*X**2 + A*Y**2 + f*T - A*C**2
coeff = {X**2: a*T, X*Y: 0, Y**2: A, X: 0, Y: 0, Integer(1): f*T - A*C**2}
A_0, B_0 = _transformation_to_DN([X, Y], coeff)
return Matrix(2, 2, [1, 0, 0, S(1)/B])*A_0, Matrix(2, 2, [1, 0, 0, S(1)/B])*B_0 + Matrix([0, -S(C)/B])
else:
# TODO: pre-simplification: Not necessary but may simplify
# the equation.
return Matrix(2, 2, [S(1)/a, 0, 0, 1]), Matrix([0, 0])
def find_DN(eq):
"""
This function returns a tuple, `(D, N)` of the simplified form,
`x^2 - Dy^2 = N`, corresponding to the general quadratic,
`ax^2 + bxy + cy^2 + dx + ey + f = 0`.
Solving the general quadratic is then equivalent to solving the equation
`X^2 - DY^2 = N` and transforming the solutions by using the transformation
matrices returned by ``transformation_to_DN()``.
Usage
=====
``find_DN(eq)``: where ``eq`` is the quadratic to be transformed.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.solvers.diophantine import find_DN
>>> find_DN(x**2 - 3*x*y - y**2 - 2*y + 1)
(13, -884)
Interpretation of the output is that we get `X^2 -13Y^2 = -884` after
transforming `x^2 - 3xy - y^2 - 2y + 1` using the transformation returned
by ``transformation_to_DN()``.
See Also
========
transformation_to_DN()
References
==========
.. [1] Solving the equation ax^2 + bxy + cy^2 + dx + ey + f = 0,
John P.Robertson, May 8, 2003, Page 7 - 11.
http://www.jpr2718.org/ax2p.pdf
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "binary_quadratic":
return _find_DN(var, coeff)
def _find_DN(var, coeff):
x, y = var[:2]
X, Y = symbols("X, Y", integer=True)
A , B = _transformation_to_DN(var, coeff)
u = (A*Matrix([X, Y]) + B)[0]
v = (A*Matrix([X, Y]) + B)[1]
eq = x**2*coeff[x**2] + x*y*coeff[x*y] + y**2*coeff[y**2] + x*coeff[x] + y*coeff[y] + coeff[Integer(1)]
simplified = _mexpand(Subs(eq, (x, y), (u, v)).doit())
coeff = dict([reversed(t.as_independent(*[X, Y])) for t in simplified.args])
for term in [X**2, Y**2, Integer(1)]:
if term not in coeff.keys():
coeff[term] = Integer(0)
return -coeff[Y**2]/coeff[X**2], -coeff[Integer(1)]/coeff[X**2]
def check_param(x, y, a, t):
"""
Check if there is a number modulo ``a`` such that ``x`` and ``y`` are both
integers. If exist, then find a parametric representation for ``x`` and
``y``.
Here ``x`` and ``y`` are functions of ``t``.
"""
k, m, n = symbols("k, m, n", integer=True)
p = Wild("p", exclude=[k])
q = Wild("q", exclude=[k])
ok = False
for i in xrange(a):
z_x = _mexpand(Subs(x, t, a*k + i).doit()).match(p*k + q)
z_y = _mexpand(Subs(y, t, a*k + i).doit()).match(p*k + q)
if (isinstance(z_x[p], Integer) and isinstance(z_x[q], Integer) and
isinstance(z_y[p], Integer) and isinstance(z_y[q], Integer)):
ok = True
break
if ok == True:
x_param = x.match(p*t + q)
y_param = y.match(p*t + q)
if x_param[p] == 0 or y_param[p] == 0:
if x_param[p] == 0:
l1, junk = Poly(y).clear_denoms()
else:
l1 = 1
if y_param[p] == 0:
l2, junk = Poly(x).clear_denoms()
else:
l2 = 1
return x*ilcm(l1, l2), y*ilcm(l1, l2)
eq = S(m - x_param[q])/x_param[p] - S(n - y_param[q])/y_param[p]
lcm_denom, junk = Poly(eq).clear_denoms()
eq = eq * lcm_denom
return diop_solve(eq, t)[0], diop_solve(eq, t)[1]
else:
return (None, None)
def diop_ternary_quadratic(eq):
"""
Solves the general quadratic ternary form,
`ax^2 + by^2 + cz^2 + fxy + gyz + hxz = 0`.
Returns a tuple `(x, y, z)` which is a base solution for the above
equation. If there are no solutions, `(None, None, None)` is returned.
Usage
=====
``diop_ternary_quadratic(eq)``: Return a tuple containing a basic solution
to ``eq``.
Details
=======
``eq`` should be an homogeneous expression of degree two in three variables
and it is assumed to be zero.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import diop_ternary_quadratic
>>> diop_ternary_quadratic(x**2 + 3*y**2 - z**2)
(1, 0, 1)
>>> diop_ternary_quadratic(4*x**2 + 5*y**2 - z**2)
(1, 0, 2)
>>> diop_ternary_quadratic(45*x**2 - 7*y**2 - 8*x*y - z**2)
(28, 45, 105)
>>> diop_ternary_quadratic(x**2 - 49*y**2 - z**2 + 13*z*y -8*x*y)
(9, 1, 5)
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
return _diop_ternary_quadratic(var, coeff)
def _diop_ternary_quadratic(_var, coeff):
x, y, z = _var[:3]
var = [x]*3
var[0], var[1], var[2] = _var[0], _var[1], _var[2]
# Equations of the form B*x*y + C*z*x + E*y*z = 0 and At least two of the
# coefficients A, B, C are non-zero.
# There are infinitely many solutions for the equation.
# Ex: (0, 0, t), (0, t, 0), (t, 0, 0)
# Equation can be re-written as y*(B*x + E*z) = -C*x*z and we can find rather
# unobviuos solutions. Set y = -C and B*x + E*z = x*z. The latter can be solved by
# using methods for binary quadratic diophantine equations. Let's select the
# solution which minimizes |x| + |z|
if coeff[x**2] == 0 and coeff[y**2] == 0 and coeff[z**2] == 0:
if coeff[x*z] != 0:
sols = diophantine(coeff[x*y]*x + coeff[y*z]*z - x*z)
s = sols.pop()
min_sum = abs(s[0]) + abs(s[1])
for r in sols:
if abs(r[0]) + abs(r[1]) < min_sum:
s = r
min_sum = abs(s[0]) + abs(s[1])
x_0, y_0, z_0 = s[0], -coeff[x*z], s[1]
else:
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
return simplified(x_0, y_0, z_0)
if coeff[x**2] == 0:
# If the coefficient of x is zero change the variables
if coeff[y**2] == 0:
var[0], var[2] = _var[2], _var[0]
z_0, y_0, x_0 = _diop_ternary_quadratic(var, coeff)
else:
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
else:
if coeff[x*y] != 0 or coeff[x*z] != 0:
# Apply the transformation x --> X - (B*y + C*z)/(2*A)
A = coeff[x**2]
B = coeff[x*y]
C = coeff[x*z]
D = coeff[y**2]
E = coeff[y*z]
F = coeff[z**2]
_coeff = dict()
_coeff[x**2] = 4*A**2
_coeff[y**2] = 4*A*D - B**2
_coeff[z**2] = 4*A*F - C**2
_coeff[y*z] = 4*A*E - 2*B*C
_coeff[x*y] = 0
_coeff[x*z] = 0
X_0, y_0, z_0 = _diop_ternary_quadratic(var, _coeff)
if X_0 == None:
return (None, None, None)
l = (S(B*y_0 + C*z_0)/(2*A)).q
x_0, y_0, z_0 = X_0*l - (S(B*y_0 + C*z_0)/(2*A)).p, y_0*l, z_0*l
elif coeff[z*y] != 0:
if coeff[y**2] == 0:
if coeff[z**2] == 0:
# Equations of the form A*x**2 + E*yz = 0.
A = coeff[x**2]
E = coeff[y*z]
b = (S(-E)/A).p
a = (S(-E)/A).q
x_0, y_0, z_0 = b, a, b
else:
# Ax**2 + E*y*z + F*z**2 = 0
var[0], var[2] = _var[2], _var[0]
z_0, y_0, x_0 = _diop_ternary_quadratic(var, coeff)
else:
# A*x**2 + D*y**2 + E*y*z + F*z**2 = 0, C may be zero
var[0], var[1] = _var[1], _var[0]
y_0, x_0, z_0 = _diop_ternary_quadratic(var, coeff)
else:
# Ax**2 + D*y**2 + F*z**2 = 0, C may be zero
x_0, y_0, z_0 = _diop_ternary_quadratic_normal(var, coeff)
return simplified(x_0, y_0, z_0)
def transformation_to_normal(eq):
"""
Returns the transformation Matrix from general ternary quadratic equation
`eq` to normal form.
General form of the ternary quadratic equation is `ax^2 + by^2 cz^2 + dxy +
eyz + fxz`. This function returns a 3X3 transformation Matrix which
transforms the former equation to the form `ax^2 + by^2 + cz^2 = 0`. This
is not used in solving ternary quadratics. Only implemented for the sake
of completeness.
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
return _transformation_to_normal(var, coeff)
def _transformation_to_normal(var, coeff):
_var = [var[0]]*3
_var[1], _var[2] = var[1], var[2]
x, y, z = var[:3]
if coeff[x**2] == 0:
# If the coefficient of x is zero change the variables
if coeff[y**2] == 0:
_var[0], _var[2] = var[2], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 2)
T.col_swap(0, 2)
return T
else:
_var[0], _var[1] = var[1], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
else:
# Apply the transformation x --> X - (B*Y + C*Z)/(2*A)
if coeff[x*y] != 0 or coeff[x*z] != 0:
A = coeff[x**2]
B = coeff[x*y]
C = coeff[x*z]
D = coeff[y**2]
E = coeff[y*z]
F = coeff[z**2]
_coeff = dict()
_coeff[x**2] = 4*A**2
_coeff[y**2] = 4*A*D - B**2
_coeff[z**2] = 4*A*F - C**2
_coeff[y*z] = 4*A*E - 2*B*C
_coeff[x*y] = 0
_coeff[x*z] = 0
T_0 = _transformation_to_normal(_var, _coeff)
return Matrix(3, 3, [1, S(-B)/(2*A), S(-C)/(2*A), 0, 1, 0, 0, 0, 1]) * T_0
elif coeff[y*z] != 0:
if coeff[y**2] == 0:
if coeff[z**2] == 0:
# Equations of the form A*x**2 + E*yz = 0.
# Apply transformation y -> Y + Z ans z -> Y - Z
return Matrix(3, 3, [1, 0, 0, 0, 1, 1, 0, 1, -1])
else:
# Ax**2 + E*y*z + F*z**2 = 0
_var[0], _var[2] = var[2], var[0]
T = _transformtion_to_normal(_var, coeff)
T.row_swap(0, 2)
T.col_swap(0, 2)
return T
else:
# A*x**2 + D*y**2 + E*y*z + F*z**2 = 0, F may be zero
_var[0], _var[1] = var[1], var[0]
T = _transformation_to_normal(_var, coeff)
T.row_swap(0, 1)
T.col_swap(0, 1)
return T
else:
return Matrix(3, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1])
def simplified(x, y, z):
"""
Simplify the solution `(x, y, z)`.
"""
if x == None or y == None or z == None:
return (x, y, z)
g = igcd(x, igcd(y, z))
return x // g, y // g, z // g
def parametrize_ternary_quadratic(eq):
"""
Returns the parametrized general solution for the ternary quadratic
equation ``eq`` which has the form
`ax^2 + by^2 + cz^2 + fxy + gyz + hxz = 0`.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import parametrize_ternary_quadratic
>>> parametrize_ternary_quadratic(x**2 + y**2 - z**2)
(2*p*q, p**2 - q**2, p**2 + q**2)
Here `p` and `q` are two co-prime integers.
>>> parametrize_ternary_quadratic(3*x**2 + 2*y**2 - z**2 - 2*x*y + 5*y*z - 7*y*z)
(2*p**2 - 2*p*q - q**2, 2*p**2 + 2*p*q - q**2, 2*p**2 - 2*p*q + 3*q**2)
>>> parametrize_ternary_quadratic(124*x**2 - 30*y**2 - 7729*z**2)
(-1410*p**2 - 363263*q**2, 2700*p**2 + 30916*p*q - 695610*q**2, -60*p**2 + 5400*p*q + 15458*q**2)
References
==========
.. [1] The algorithmic resolution of Diophantine equations, Nigel P. Smart,
London Mathematical Society Student Texts 41, Cambridge University
Press, Cambridge, 1998.
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
x_0, y_0, z_0 = _diop_ternary_quadratic(var, coeff)
return _parametrize_ternary_quadratic((x_0, y_0, z_0), var, coeff)
def _parametrize_ternary_quadratic(solution, _var, coeff):
x, y, z = _var[:3]
x_0, y_0, z_0 = solution[:3]
v = [x]*3
v[0], v[1], v[2] = _var[0], _var[1], _var[2]
if x_0 == None:
return (None, None, None)
if x_0 == 0:
if y_0 == 0:
v[0], v[2] = v[2], v[0]
z_p, y_p, x_p = _parametrize_ternary_quadratic((z_0, y_0, x_0), v, coeff)
return x_p, y_p, z_p
else:
v[0], v[1] = v[1], v[0]
y_p, x_p, z_p = _parametrize_ternary_quadratic((y_0, x_0, z_0), v, coeff)
return x_p, y_p, z_p
x, y, z = v[:3]
r, p, q = symbols("r, p, q", integer=True)
eq = x**2*coeff[x**2] + y**2*coeff[y**2] + z**2*coeff[z**2] + x*y*coeff[x*y] + y*z*coeff[y*z] + z*x*coeff[z*x]
eq_1 = Subs(eq, (x, y, z), (r*x_0, r*y_0 + p, r*z_0 + q)).doit()
eq_1 = _mexpand(eq_1)
A, B = eq_1.as_independent(r, as_Add=True)
x = A*x_0
y = (A*y_0 - _mexpand(B/r*p))
z = (A*z_0 - _mexpand(B/r*q))
return x, y, z
def diop_ternary_quadratic_normal(eq):
"""
Solves the quadratic ternary diophantine equation,
`ax^2 + by^2 + cz^2 = 0`.
Here the coefficients `a`, `b`, and `c` should be non zero. Otherwise the
equation will be a quadratic binary or univariate equation. If solvable,
returns a tuple `(x, y, z)` that satisifes the given equation. If the
equation does not have integer solutions, `(None, None, None)` is returned.
Usage
=====
``diop_ternary_quadratic_normal(eq)``: where ``eq`` is an equation of the form
`ax^2 + by^2 + cz^2 = 0`.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.solvers.diophantine import diop_ternary_quadratic_normal
>>> diop_ternary_quadratic_normal(x**2 + 3*y**2 - z**2)
(1, 0, 1)
>>> diop_ternary_quadratic_normal(4*x**2 + 5*y**2 - z**2)
(1, 0, 2)
>>> diop_ternary_quadratic_normal(34*x**2 - 3*y**2 - 301*z**2)
(4, 9, 1)
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "homogeneous_ternary_quadratic":
return _diop_ternary_quadratic_normal(var, coeff)
def _diop_ternary_quadratic_normal(var, coeff):
x, y, z = var[:3]
a = coeff[x**2]
b = coeff[y**2]
c = coeff[z**2]
if a*b*c == 0:
raise ValueError("Try factoring out you equation or using diophantine()")
g = igcd(a, igcd(b, c))
a = a // g
b = b // g
c = c // g
a_0 = square_factor(a)
b_0 = square_factor(b)
c_0 = square_factor(c)
a_1 = a // a_0**2
b_1 = b // b_0**2
c_1 = c // c_0**2
a_2, b_2, c_2 = pairwise_prime(a_1, b_1, c_1)
A = -a_2*c_2
B = -b_2*c_2
# If following two conditions are satisified then there are no solutions
if A < 0 and B < 0:
return (None, None, None)
if (sqrt_mod(-b_2*c_2, a_2) == None or sqrt_mod(-c_2*a_2, b_2) == None or
sqrt_mod(-a_2*b_2, c_2) == None):
return (None, None, None)
z_0, x_0, y_0 = descent(A, B)
if divisible(z_0, c_2) == True:
z_0 = z_0 // abs(c_2)
else:
x_0 = x_0*(S(z_0)/c_2).q
y_0 = y_0*(S(z_0)/c_2).q
z_0 = (S(z_0)/c_2).p
x_0, y_0, z_0 = simplified(x_0, y_0, z_0)
# Holzer reduction
if sign(a) == sign(b):
x_0, y_0, z_0 = holzer(x_0, y_0, z_0, abs(a_2), abs(b_2), abs(c_2))
elif sign(a) == sign(c):
x_0, z_0, y_0 = holzer(x_0, z_0, y_0, abs(a_2), abs(c_2), abs(b_2))
else:
y_0, z_0, x_0 = holzer(y_0, z_0, x_0, abs(b_2), abs(c_2), abs(a_2))
x_0 = reconstruct(b_1, c_1, x_0)
y_0 = reconstruct(a_1, c_1, y_0)
z_0 = reconstruct(a_1, b_1, z_0)
l = ilcm(a_0, ilcm(b_0, c_0))
x_0 = abs(x_0*l//a_0)
y_0 = abs(y_0*l//b_0)
z_0 = abs(z_0*l//c_0)
return simplified(x_0, y_0, z_0)
def square_factor(a):
"""
Returns an integer `c` s.t. `a = c^2k, \ c,k \in Z`. Here `k` is square
free.
Examples
========
>>> from sympy.solvers.diophantine import square_factor
>>> square_factor(24)
2
>>> square_factor(36)
6
>>> square_factor(1)
1
"""
f = factorint(abs(a))
c = 1
for p, e in f.items():
c = c * p**(e//2)
return c
def pairwise_prime(a, b, c):
"""
Transform `ax^2 + by^2 + cz^2 = 0` into an equivalent equation
`a'x^2 + b'y^2 + c'z^2 = 0` where `a', b', c'` are pairwise relatively
prime.
Returns a tuple containing `a', b', c'`. `\gcd(a, b, c)` should equal `1`
for this to work. The solutions for `ax^2 + by^2 + cz^2 = 0` can be
recovered from the solutions of `a'x^2 + b'y^2 + c'z^2 = 0`.
Examples
========
>>> from sympy.solvers.diophantine import pairwise_prime
>>> pairwise_prime(6, 15, 10)
(5, 2, 3)
See Also
========
make_prime(), reocnstruct()
"""
a, b, c = make_prime(a, b, c)
b, c, a = make_prime(b, c, a)
c, a, b = make_prime(c, a, b)
return a, b, c
def make_prime(a, b, c):
"""
Transform the equation `ax^2 + by^2 + cz^2 = 0` to an equivalent equation
`a'x^2 + b'y^2 + c'z^2 = 0` with `\gcd(a', b') = 1`.
Returns a tuple `(a', b', c')` which satisfies above conditions. Note that
in the returned tuple `\gcd(a', c')` and `\gcd(b', c')` can take any value.
Examples
========
>>> from sympy.solvers.diophantine import make_prime
>>> make_prime(4, 2, 7)
(2, 1, 14)
See Also
========
pairwaise_prime(), reconstruct()
"""
g = igcd(a, b)
if g != 1:
f = factorint(g)
for p, e in f.items():
a = a // p**e
b = b // p**e
if e % 2 == 1:
c = p*c
return a, b, c
def reconstruct(a, b, z):
"""
Reconstruct the `z` value of an equivalent solution of `ax^2 + by^2 + cz^2`
from the `z` value of a solution of a transformed version of the above
equation.
"""
g = igcd(a, b)
if g != 1:
f = factorint(g)
for p, e in f.items():
if e %2 == 0:
z = z*p**(e//2)
else:
z = z*p**((e//2)+1)
return z
def ldescent(A, B):
"""
Uses Lagrange's method to find a non trivial solution to
`w^2 = Ax^2 + By^2`.
Here, `A \\neq 0` and `B \\neq 0` and `A` and `B` are square free. Output a
tuple `(w_0, x_0, y_0)` which is a solution to the above equation.
Examples
========
>>> from sympy.solvers.diophantine import ldescent
>>> ldescent(1, 1) # w^2 = x^2 + y^2
(1, 1, 0)
>>> ldescent(4, -7) # w^2 = 4x^2 - 7y^2
(2, -1, 0)
This means that `x = -1, y = 0` and `w = 2` is a solution to the equation
`w^2 = 4x^2 - 7y^2`
>>> ldescent(5, -1) # w^2 = 5x^2 - y^2
(2, 1, -1)
References
==========
.. [1] The algorithmic resolution of Diophantine equations, Nigel P. Smart,
London Mathematical Society Student Texts 41, Cambridge University
Press, Cambridge, 1998.
.. [2] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
"""
if abs(A) > abs(B):
w, y, x = ldescent(B, A)
return w, x, y
if A == 1:
return (S.One, S.One, 0)
if B == 1:
return (S.One, 0, S.One)
r = sqrt_mod(A, B)
Q = (r**2 - A) // B
if Q == 0:
B_0 = 1
d = 0
else:
div = divisors(Q)
B_0 = None
for i in div:
if isinstance(sqrt(abs(Q) // i), Integer):
B_0, d = sign(Q)*i, sqrt(abs(Q) // i)
break
if B_0 != None:
W, X, Y = ldescent(A, B_0)
return simplified((-A*X + r*W), (r*X - W), Y*(B_0*d))
# In this module Descent will always be called with inputs which have solutions.
def descent(A, B):
"""
Lagrange's `descent()` with lattice-reduction to find solutions to
`x^2 = Ay^2 + Bz^2`.
Here `A` and `B` should be square free and pairwise prime. Always should be
called with suitable ``A`` and ``B`` so that the above equation has
solutions.
This is more faster than the normal Lagrange's descent algorithm because
the gaussian reduction is used.
Examples
========
>>> from sympy.solvers.diophantine import descent
>>> descent(3, 1) # x**2 = 3*y**2 + z**2
(1, 0, 1)
`(x, y, z) = (1, 0, 1)` is a solution to the above equation.
>>> descent(41, -113)
(-16, -3, 1)
References
==========
.. [1] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
"""
if abs(A) > abs(B):
x, y, z = descent(B, A)
return x, z, y
if B == 1:
return (1, 0, 1)
if A == 1:
return (1, 1, 0)
if B == -1:
return (None, None, None)
if B == -A:
return (0, 1, 1)
if B == A:
x, z, y = descent(-1, A)
return (A*y, z, x)
w = sqrt_mod(A, B)
x_0, z_0 = gaussian_reduce(w, A, B)
t = (x_0**2 - A*z_0**2) // B
t_2 = square_factor(t)
t_1 = t // t_2**2
x_1, z_1, y_1 = descent(A, t_1)
return simplified(x_0*x_1 + A*z_0*z_1, z_0*x_1 + x_0*z_1, t_1*t_2*y_1)
def gaussian_reduce(w, a, b):
"""
Returns a reduced solution `(x, z)` to the congruence
`X^2 - aZ^2 \equiv 0 \ (mod \ b)` so that `x^2 + |a|z^2` is minimal.
Details
=======
Here ``w`` is a solution of the congruence `x^2 \equiv a \ (mod \ b)`
References
==========
.. [1] Gaussian lattice Reduction [online]. Available:
http://home.ie.cuhk.edu.hk/~wkshum/wordpress/?p=404
.. [2] Efficient Solution of Rational Conices, J. E. Cremona and D. Rusin,
Mathematics of Computation, Volume 00, Number 0.
"""
u = (0, 1)
v = (1, 0)
if dot(u, v, w, a, b) < 0:
v = (-v[0], -v[1])
if norm(u, w, a, b) < norm(v, w, a, b):
u, v = v, u
while norm(u, w, a, b) > norm(v, w, a, b):
k = dot(u, v, w, a, b) // dot(v, v, w, a, b)
u, v = v, (u[0]- k*v[0], u[1]- k*v[1])
u, v = v, u
if dot(u, v, w, a, b) < dot(v, v, w, a, b)/2 or norm((u[0]-v[0], u[1]-v[1]), w, a, b) > norm(v, w, a, b):
c = v
else:
c = (u[0] - v[0], u[1] - v[1])
return c[0]*w + b*c[1], c[0]
def dot(u, v, w, a, b):
"""
Returns a special dot product of the vectors `u = (u_{1}, u_{2})` and
`v = (v_{1}, v_{2})` which is defined in order to reduce solution of
the congruence equation `X^2 - aZ^2 \equiv 0 \ (mod \ b)`.
"""
u_1, u_2 = u[:2]
v_1, v_2 = v[:2]
return (w*u_1 + b*u_2)*(w*v_1 + b*v_2) + abs(a)*u_1*v_1
def norm(u, w, a, b):
"""
Returns the norm of the vector `u = (u_{1}, u_{2})` under the dot product
defined by `u \cdot v = (wu_{1} + bu_{2})(w*v_{1} + bv_{2}) + |a|*u_{1}*v_{1}`
where `u = (u_{1}, u_{2})` and `v = (v_{1}, v_{2})`.
"""
u_1, u_2 = u[:2]
return sqrt(dot((u_1, u_2), (u_1, u_2), w, a, b))
def holzer(x_0, y_0, z_0, a, b, c):
"""
Simplify the solution `(x_{0}, y_{0}, z_{0})` of the equation
`ax^2 + by^2 = cz^2` with `a, b, c > 0` and `z_{0}^2 \geq \mid ab \mid` to
a new reduced solution `(x, y, z)` such that `z^2 \leq \mid ab \mid`.
"""
while z_0 > sqrt(a*b):
if c % 2 == 0:
k = c // 2
u_0, v_0 = base_solution_linear(k, y_0, -x_0)
else:
k = 2*c
u_0, v_0 = base_solution_linear(c, y_0, -x_0)
w = -(a*u_0*x_0 + b*v_0*y_0) // (c*z_0)
if c % 2 == 1:
if w % 2 != (a*u_0 + b*v_0) % 2:
w = w + 1
x = (x_0*(a*u_0**2 + b*v_0**2 + c*w**2) - 2*u_0*(a*u_0*x_0 + b*v_0*y_0 + c*w*z_0)) // k
y = (y_0*(a*u_0**2 + b*v_0**2 + c*w**2) - 2*v_0*(a*u_0*x_0 + b*v_0*y_0 + c*w*z_0)) // k
z = (z_0*(a*u_0**2 + b*v_0**2 + c*w**2) - 2*w*(a*u_0*x_0 + b*v_0*y_0 + c*w*z_0)) // k
x_0, y_0, z_0 = x, y, z
return x_0, y_0, z_0
def diop_general_pythagorean(eq, param=symbols("m", integer=True)):
"""
Solves the general pythagorean equation,
`a_{1}^2x_{1}^2 + a_{2}^2x_{2}^2 + . . . + a_{n}^2x_{n}^2 - a_{n + 1}^2x_{n + 1}^2 = 0`.
Returns a tuple which contains a parametrized solution to the equation,
sorted in the same order as the input variables.
Usage
=====
``diop_general_pythagorean(eq, param)``: where ``eq`` is a general
pythagorean equation which is assumed to be zero and ``param`` is the base
parameter used to construct other parameters by subscripting.
Examples
========
>>> from sympy.solvers.diophantine import diop_general_pythagorean
>>> from sympy.abc import a, b, c, d, e
>>> diop_general_pythagorean(a**2 + b**2 + c**2 - d**2)
(m1**2 + m2**2 - m3**2, 2*m1*m3, 2*m2*m3, m1**2 + m2**2 + m3**2)
>>> diop_general_pythagorean(9*a**2 - 4*b**2 + 16*c**2 + 25*d**2 + e**2)
(10*m1**2 + 10*m2**2 + 10*m3**2 - 10*m4**2, 15*m1**2 + 15*m2**2 + 15*m3**2 + 15*m4**2, 15*m1*m4, 12*m2*m4, 60*m3*m4)
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "general_pythagorean":
return _diop_general_pythagorean(var, coeff, param)
def _diop_general_pythagorean(var, coeff, t):
if sign(coeff[var[0]**2]) + sign(coeff[var[1]**2]) + sign(coeff[var[2]**2]) < 0:
for key in coeff.keys():
coeff[key] = coeff[key] * -1
n = len(var)
index = 0
for i, v in enumerate(var):
if sign(coeff[v**2]) == -1:
index = i
m = symbols(str(t) + "1:" + str(n), integer=True)
l = []
ith = 0
for m_i in m:
ith = ith + m_i**2
l.append(ith - 2*m[n - 2]**2)
for i in xrange(n - 2):
l.append(2*m[i]*m[n-2])
sol = l[:index] + [ith] + l[index:]
lcm = 1
for i, v in enumerate(var):
if i == index or (index > 0 and i == 0) or (index == 0 and i == 1):
lcm = ilcm(lcm, sqrt(abs(coeff[v**2])))
else:
lcm = ilcm(lcm, sqrt(coeff[v**2]) if sqrt(coeff[v**2]) % 2 else sqrt(coeff[v**2]) // 2)
for i, v in enumerate(var):
sol[i] = (lcm*sol[i]) / sqrt(abs(coeff[v**2]))
return tuple(sol)
def diop_general_sum_of_squares(eq, limit=1):
"""
Solves the equation `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.
Returns at most ``limit`` number of solutions. Currently there is no way to
set ``limit`` using higher level API's like ``diophantine()`` or
``diop_solve()`` but that will be fixed soon.
Usage
=====
``general_sum_of_squares(eq, limit)`` : Here ``eq`` is an expression which
is assumed to be zero. Also, ``eq`` should be in the form,
`x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`. At most ``limit`` number of
solutions are returned.
Details
=======
When `n = 3` if `k = 4^a(8m + 7)` for some `a, m \in Z` then there will be
no solutions. Refer [1]_ for more details.
Examples
========
>>> from sympy.solvers.diophantine import diop_general_sum_of_squares
>>> from sympy.abc import a, b, c, d, e, f
>>> diop_general_sum_of_squares(a**2 + b**2 + c**2 + d**2 + e**2 - 2345)
set([(0, 48, 5, 4, 0)])
Reference
=========
.. [1] Representing an Integer as a sum of three squares, [online],
Available:
http://www.proofwiki.org/wiki/Integer_as_Sum_of_Three_Squares
"""
var, coeff, diop_type = classify_diop(eq)
if diop_type == "general_sum_of_squares":
return _diop_general_sum_of_squares(var, coeff, limit)
def _diop_general_sum_of_squares(var, coeff, limit=1):
n = len(var)
k = -int(coeff[Integer(1)])
s = set([])
if k < 0:
return set([])
if n == 3:
s.add(sum_of_three_squares(k))
elif n == 4:
s.add(sum_of_four_squares(k))
else:
m = n // 4
f = partition(k, m, True)
for j in xrange(limit):
soln = []
try:
l = next(f)
except StopIteration:
break
for n_i in l:
a, b, c, d = sum_of_four_squares(n_i)
soln = soln + [a, b, c, d]
soln = soln + [0] * (n % 4)
s.add(tuple(soln))
return s
## Functions below this comment can be more suitably grouped under an Additive number theory module
## rather than the Diophantine equation module.
def partition(n, k=None, zeros=False):
"""
Returns a generator that can be used to generate partitions of an integer
`n`.
A partition of `n` is a set of positive integers which add upto `n`. For
example, partitions of 3 are 3 , 1 + 2, 1 + 1+ 1. A partition is returned
as a tuple. If ``k`` equals None, then all possible partitions are returned
irrespective of their size, otherwise only the partitions of size ``k`` are
returned. If there are no partions of `n` with size `k` then an empty tuple
is returned. If the ``zero`` parameter is set to True then a suitable
number of zeros are added at the end of every partition of size less than
``k``.
``zero`` parameter is considered only if ``k`` is not None. When the
partitions are over, the last `next()` call throws the ``StopIteration``
exception, so this function should always be used inside a try - except
block.
Details
=======
``partition(n, k)``: Here ``n`` is a positive integer and ``k`` is the size
of the partition which is also positive integer.
Examples
========
>>> from sympy.solvers.diophantine import partition
>>> f = partition(5)
>>> next(f)
(1, 1, 1, 1, 1)
>>> next(f)
(1, 1, 1, 2)
>>> g = partition(5, 3)
>>> next(g)
(3, 1, 1)
>>> next(g)
(2, 2, 1)
Reference
=========
.. [1] Generating Integer Partitions, [online],
Available: http://homepages.ed.ac.uk/jkellehe/partitions.php
"""
if n < 1:
yield tuple()
if k is not None:
if k < 1:
yield tuple()
elif k > n:
if zeros:
for i in xrange(1, n):
for t in partition(n, i):
yield (t,) + (0,) * (k - i)
else:
yield tuple()
else:
a = [1 for i in xrange(k)]
a[0] = n - k + 1
yield tuple(a)
i = 1
while a[0] >= n // k + 1:
j = 0
while j < i and j + 1 < k:
a[j] = a[j] - 1
a[j + 1] = a[j + 1] + 1
yield tuple(a)
j = j + 1
i = i + 1
if zeros:
for m in xrange(1, k):
for a in partition(n, m):
yield tuple(a) + (0,) * (k - m)
else:
a = [0 for i in xrange(n + 1)]
l = 1
y = n - 1
while l != 0:
x = a[l - 1] + 1
l -= 1
while 2*x <= y:
a[l] = x
y -= x
l += 1
m = l + 1
while x <= y:
a[l] = x
a[m] = y
yield tuple(a[:l + 2])
x += 1
y -= 1
a[l] = x + y
y = x + y - 1
yield tuple(a[:l + 1])
def prime_as_sum_of_two_squares(p):
"""
Represent a prime `p` which is congruent to 1 mod 4, as a sum of two
squares.
Examples
========
>>> from sympy.solvers.diophantine import prime_as_sum_of_two_squares
>>> prime_as_sum_of_two_squares(5)
(2, 1)
Reference
=========
.. [1] Representing a number as a sum of four squares, [online],
Available: http://www.schorn.ch/howto.html
"""
if p % 8 == 5:
b = 2
else:
b = 3
while pow(b, (p - 1) // 2, p) == 1:
b = nextprime(b)
b = pow(b, (p - 1) // 4, p)
a = p
while b**2 > p:
a, b = b, a % b
return (b, a % b)
def sum_of_three_squares(n):
"""
Returns a 3-tuple `(a, b, c)` such that `a^2 + b^2 + c^2 = n` and
`a, b, c \geq 0`.
Returns (None, None, None) if `n = 4^a(8m + 7)` for some `a, m \in Z`. See
[1]_ for more details.
Usage
=====
``sum_of_three_squares(n)``: Here ``n`` is a non-negative integer.
Examples
========
>>> from sympy.solvers.diophantine import sum_of_three_squares
>>> sum_of_three_squares(44542)
(207, 37, 18)
References
==========
.. [1] Representing a number as a sum of three squares, [online],
Available: http://www.schorn.ch/howto.html
"""
special = {1:(1, 0, 0), 2:(1, 1, 0), 3:(1, 1, 1), 10: (1, 3, 0), 34: (3, 3, 4), 58:(3, 7, 0),
85:(6, 7, 0), 130:(3, 11, 0), 214:(3, 6, 13), 226:(8, 9, 9), 370:(8, 9, 15),
526:(6, 7, 21), 706:(15, 15, 16), 730:(1, 27, 0), 1414:(6, 17, 33), 1906:(13, 21, 36),
2986: (21, 32, 39), 9634: (56, 57, 57)}
v = 0
if n == 0:
return (0, 0, 0)
while n % 4 == 0:
v = v + 1
n = n // 4
if n % 8 == 7:
return (None, None, None)
if n in special.keys():
x, y, z = special[n]
return (2**v*x, 2**v*y, 2**v*z)
l = int(sqrt(n))
if n == l**2:
return (2**v*l, 0, 0)
x = None
if n % 8 == 3:
l = l if l % 2 else l - 1
for i in xrange(l, -1, -2):
if isprime((n - i**2) // 2):
x = i
break
y, z = prime_as_sum_of_two_squares((n - x**2) // 2)
return (2**v*x, 2**v*(y + z), 2**v*abs(y - z))
if n % 8 == 2 or n % 8 == 6:
l = l if l % 2 else l - 1
else:
l = l - 1 if l % 2 else l
for i in xrange(l, -1, -2):
if isprime(n - i**2):
x = i
break
y, z = prime_as_sum_of_two_squares(n - x**2)
return (2**v*x, 2**v*y, 2**v*z)
def sum_of_four_squares(n):
"""
Returns a 4-tuple `(a, b, c, d)` such that `a^2 + b^2 + c^2 + d^2 = n`.
Here `a, b, c, d \geq 0`.
Usage
=====
``sum_of_four_squares(n)``: Here ``n`` is a non-negative integer.
Examples
========
>>> from sympy.solvers.diophantine import sum_of_four_squares
>>> sum_of_four_squares(3456)
(8, 48, 32, 8)
>>> sum_of_four_squares(1294585930293)
(0, 1137796, 2161, 1234)
References
==========
.. [1] Representing a number as a sum of four squares, [online],
Available: http://www.schorn.ch/howto.html
"""
if n == 0:
return (0, 0, 0, 0)
v = 0
while n % 4 == 0:
v = v + 1
n = n // 4
if n % 8 == 7:
d = 2
n = n - 4
elif n % 8 == 6 or n % 8 == 2:
d = 1
n = n - 1
else:
d = 0
x, y, z = sum_of_three_squares(n)
return (2**v*d, 2**v*x, 2**v*y, 2**v*z)
def power_representation(n, p, k, zeros=False):
"""
Returns a generator for finding k-tuples `(n_{1}, n_{2}, . . . n_{k})` such
that `n = n_{1}^p + n_{2}^p + . . . n_{k}^p`.
Here `n` is a non-negative integer. StopIteration exception is raised after
all the solutions are generated, so should always be used within a try-
catch block.
Usage
=====
``power_representation(n, p, k, zeros)``: Represent number ``n`` as a sum
of ``k``, ``p``th powers. If ``zeros`` is true, then the solutions will
contain zeros.
Examples
========
>>> from sympy.solvers.diophantine import power_representation
>>> f = power_representation(1729, 3, 2) # Represent 1729 as a sum of two cubes
>>> next(f)
(12, 1)
>>> next(f)
(10, 9)
"""
if p < 1 or k < 1 or n < 1:
raise ValueError("Expected: n > 0 and k >= 1 and p >= 1")
if k == 1:
if perfect_power(n):
yield (perfect_power(n)[0],)
else:
yield tuple()
elif p == 1:
for t in partition(n, k, zeros):
yield t
else:
l = []
a = integer_nthroot(n, p)[0]
for t in pow_rep_recursive(a, k, n, [], p):
yield t
if zeros:
for i in xrange(2, k):
for t in pow_rep_recursive(a, i, n, [], p):
yield t + (0,) * (k - i)
def pow_rep_recursive(n_i, k, n_remaining, terms, p):
if k == 0 and n_remaining == 0:
yield tuple(terms)
else:
if n_i >= 1 and k > 0 and n_remaining >= 0:
if n_i**p <= n_remaining:
for t in pow_rep_recursive(n_i, k - 1, n_remaining - n_i**p, terms + [n_i], p):
yield t
for t in pow_rep_recursive(n_i - 1, k, n_remaining, terms, p):
yield t
|
Cuuuurzel/KiPyCalc
|
sympy/solvers/diophantine.py
|
Python
|
mit
| 79,458
|
[
"Gaussian"
] |
a08df6247d78663f8b6c61a727f59aa22009ad9ebff5d3693bf7294df8ee8f03
|
"""
========================================
Create 2D bar graphs in different planes
========================================
Demonstrates making a 3D plot which has 2D bar graphs projected onto
planes y=0, y=1, etc.
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.ticker as ticker
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#for c, z in zip(['b', 'r', 'y', 'g', 'b'], [40000, 30000, 10000, 5000, 1000]):
for c, z in zip(['b', 'r', 'y', 'g', 'b'], [40,30,20,10,0]):
#xs = np.arange(6)
#ys = np.random.rand(6)
xs = [1,2,3,4,5,6] # pair, neigh, comm, output, modify, other
ys = [6,5,4,3,2,1] # tpair, tneigh, tcomm, toutput, tmodify, tother
# You can provide either a single color or an array. To demonstrate this,
# the first bar of each set will be colored cyan.
cs = [c] * len(xs)
#cs[0] = 'c'
#ax.bar(xs, ys, zs=z, zdir='y', color=cs, alpha=0.8)
#ax.bar(xs, ys, zs=z, zdir='y') #, color=cs, alpha=0.8)
ax.bar(xs, ys, zs=z, zdir='y') #, color=cs, alpha=0.8)
# left The x coordinates of the left sides of the bars.
# height The height of the bars.
# zs Z coordinate of bars, if one value is specified they will all be placed at the same z.
# zdir Which direction to use as z
#ax.set_xlabel('Lammps section')
ax.set_ylabel('# of steps (*$10^3$)')
ax.set_zlabel('% of total time')
my_xticks = ['Pair','Neigh','Comm','Output','Modify','Other']
my_yticks = ['5','','10','','20','','30','','40']
ax.set_xticklabels(my_xticks, rotation=15)
ax.set_yticklabels(my_yticks)
plt.show()
|
jgphpc/linux
|
python/bars3d_demo.py
|
Python
|
gpl-2.0
| 1,620
|
[
"LAMMPS"
] |
9ff9bd67ca074e72b88ad9d9a94cf726cae1377c96cbbed57d142d9a8269ae69
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska
Holds functions to analyse results out of the database.
Note: This part of SPOTPY is in alpha status and not yet ready for production use.
'''
import numpy as np
import spotpy
font = {'family' : 'calibri',
'weight' : 'normal',
'size' : 18}
def load_csv_results(filename, usecols=None):
"""
Get an array of your results in the given file.
:filename: Expects an available filename, without the csv, in your working directory
:type: str
:return: Result array
:rtype: array
"""
if usecols == None:
return np.genfromtxt(filename+'.csv',delimiter=',',names=True,invalid_raise=False)
else:
return np.genfromtxt(filename+'.csv',delimiter=',',names=True,skip_footer=1,invalid_raise=False,usecols=usecols)[1:]
def load_hdf5_results(filename):
"""
Get an array of your results in the given file.
:filename: Expects an available filename, without the .h5 ending,
in your working directory
:type: str
:return: Result array, simulation is an ndarray,
which is different to structured arrays return by the csv/sql/ram databases
:rtype: array
"""
import h5py
with h5py.File(filename+'.h5', 'r') as f:
return f[filename][()]
def load_csv_parameter_results(filename, usecols=None):
"""
Get an array of your results in the given file, without the first and the
last column. The first line may have a different objectivefunction and the last
line may be incomplete, which would result in an error.
:filename: Expects an available filename, without the csv, in your working directory
:type: str
:return: Result array
:rtype: array
"""
ofile=open(filename+'.csv')
line = ofile.readline()
header=line.split(',')
ofile.close()
words=[]
index =[]
for i,word in enumerate(header):
if word.startswith('par'):
words.append(word)
index.append(i)
return np.genfromtxt(filename+'.csv', delimiter=',', names=words,
usecols=index, invalid_raise=False, skip_header=1)
def get_header(results):
return results.dtype.names
def get_like_fields(results):
header = get_header(results)
fields=[word for word in header if word.startswith('like')]
return fields
def get_parameter_fields(results):
header = get_header(results)
fields=[word for word in header if word.startswith('par')]
return fields
def get_simulation_fields(results):
header = get_header(results)
fields=[word for word in header if word.startswith('sim')]
return fields
def get_modelruns(results):
"""
Get an shorter array out of your result array, containing just the
simulations of your model.
:results: Expects an numpy array which should have indices beginning with "sim"
:type: array
:return: Array containing just the columns beginnning with the indice "sim"
:rtype: array
"""
fields=[word for word in results.dtype.names if word.startswith('sim')]
return results[fields]
def get_parameters(results):
"""
Get an shorter array out of your result array, containing just the
parameters of your model.
:results: Expects an numpy array which should have indices beginning with "par"
:type: array
:return: Array containing just the columns beginnning with the indice "par"
:rtype: array
"""
fields=[word for word in results.dtype.names if word.startswith('par')]
results = results[fields]
return results
def get_parameternames(results):
"""
Get list of strings with the names of the parameters of your model.
:results: Expects an numpy array which should have indices beginning with "par"
:type: array
:return: Strings with the names of the analysed parameters
:rtype: list
"""
fields=[word for word in results.dtype.names if word.startswith('par')]
parnames=[]
for field in fields:
parnames.append(field[3:])
return parnames
def get_maxlikeindex(results,verbose=True):
"""
Get the maximum objectivefunction of your result array
:results: Expects an numpy array which should of an index "like" for objectivefunctions
:type: array
:return: Index of the position in the results array with the maximum objectivefunction
value and value of the maximum objectivefunction of your result array
:rtype: int and float
"""
try:
likes=results['like']
except ValueError:
likes=results['like1']
maximum=np.nanmax(likes)
value=str(round(maximum,4))
text=str('Run number ' )
index=np.where(likes==maximum)
text2=str(' has the highest objectivefunction with: ')
textv=text+str(index[0][0])+text2+value
if verbose:
print(textv)
return index, maximum
def get_minlikeindex(results):
"""
Get the minimum objectivefunction of your result array
:results: Expects an numpy array which should of an index "like" for objectivefunctions
:type: array
:return: Index of the position in the results array with the minimum objectivefunction
value and value of the minimum objectivefunction of your result array
:rtype: int and float
"""
try:
likes=results['like']
except ValueError:
likes=results['like1']
minimum=np.nanmin(likes)
value=str(round(minimum,4))
text=str('Run number ' )
index=np.where(likes==minimum)
text2=str(' has the lowest objectivefunction with: ')
textv=text+str(index[0][0])+text2+value
print(textv)
return index[0][0], minimum
def get_percentiles(results,sim_number=''):
"""
Get 5,25,50,75 and 95 percentiles of your simulations
:results: Expects an numpy array which should of an index "simulation" for simulations
:type: array
:sim_number: Optional, Number of your simulation, needed when working with multiple lists of simulations
:type: int
:return: Percentiles of simulations
:rtype: int and float
"""
p5,p25,p50,p75,p95=[],[],[],[],[]
fields=[word for word in results.dtype.names if word.startswith('simulation'+str(sim_number))]
for i in range(len(fields)):
p5.append(np.percentile(list(results[fields[i]]),5))
p25.append(np.percentile(list(results[fields[i]]),25))
p50.append(np.percentile(list(results[fields[i]]),50))
p75.append(np.percentile(list(results[fields[i]]),75))
p95.append(np.percentile(list(results[fields[i]]),95))
return p5,p25,p50,p75,p95
def calc_like(results,evaluation,objectivefunction):
"""
Calculate another objectivefunction of your results
:results: Expects an numpy array which should of an index "simulation" for simulations
:type: array
:evaluation: Expects values, which correspond to your simulations
:type: list
:objectivefunction: Takes evaluation and simulation data and returns a objectivefunction, e.g. spotpy.objectvefunction.rmse
:type: function
:return: New objectivefunction list
:rtype: list
"""
likes=[]
sim=get_modelruns(results)
for s in sim:
likes.append(objectivefunction(evaluation,list(s)))
return likes
def compare_different_objectivefunctions(like1,like2):
"""
Performs the Welch’s t-test (aka unequal variances t-test)
:like1: objectivefunction values
:type: list
:like2: Other objectivefunction values
:type: list
:return: p Value
:rtype: list
"""
from scipy import stats
out = stats.ttest_ind(like1, like2, equal_var=False)
print(out)
if out[1]>0.05:
print('like1 is NOT signifikant different to like2: p>0.05')
else:
print('like1 is signifikant different to like2: p<0.05' )
return out
def get_posterior(results,percentage=10, maximize=True):
"""
Get the best XX% of your result array (e.g. best 10% model runs would be a threshold setting of 0.9)
:results: Expects an numpy array which should have as first axis an index "like1". This will be sorted .
:type: array
:percentag: Optional, ratio of values that will be deleted.
:type: float
:maximize: If True (default), higher "like1" column values are assumed to be better.
If False, lower "like1" column values are assumed to be better.
:return: Posterior result array
:rtype: array
"""
if maximize:
index = np.where(results['like1']>=np.percentile(results['like1'],100.0-percentage))
else:
index = np.where(results['like1']>=np.percentile(results['like1'],100.0-percentage))
return results[index]
def plot_parameter_trace(ax, results, parameter):
#THis function plots the parameter setting for each run
for i in range(int(max(results['chain']))):
index=np.where(results['chain']==i)
ax.plot(results['par'+parameter['name']][index],'.', markersize=2)
ax.set_ylabel(parameter['name'])
ax.set_ylim(parameter['minbound'], parameter['maxbound'])
def plot_posterior_parameter_histogram(ax, results, parameter):
#This functing is the last 100 runs
ax.hist(results['par'+parameter['name']][-100:],
bins =np.linspace(parameter['minbound'],parameter['maxbound'],20))
ax.set_ylabel('Density')
ax.set_xlim(parameter['minbound'], parameter['maxbound'])
def plot_parameter_uncertainty(posterior_results,evaluation, fig_name='Posterior_parameter_uncertainty.png'):
import matplotlib.pyplot as plt
simulation_fields = get_simulation_fields(posterior_results)
fig= plt.figure(figsize=(16,9))
for i in range(len(evaluation)):
if evaluation[i] == -9999:
evaluation[i] = np.nan
ax = plt.subplot(1,1,1)
q5,q95=[],[]
for field in simulation_fields:
q5.append(np.percentile(list(posterior_results[field]),2.5))
q95.append(np.percentile(list(posterior_results[field]),97.5))
ax.plot(q5,color='dimgrey',linestyle='solid')
ax.plot(q95,color='dimgrey',linestyle='solid')
ax.fill_between(np.arange(0,len(q5),1),list(q5),list(q95),facecolor='dimgrey',zorder=0,
linewidth=0,label='parameter uncertainty')
ax.plot(evaluation,'r.',markersize=1, label='Observation data')
bestindex,bestobjf = get_maxlikeindex(posterior_results,verbose=False)
plt.plot(list(posterior_results[simulation_fields][bestindex][0]),'b-',label='Obj='+str(round(bestobjf,2)))
plt.xlabel('Number of Observation Points')
plt.ylabel ('Simulated value')
plt.legend(loc='upper right')
fig.savefig(fig_name,dpi=300)
text='A plot of the parameter uncertainty has been saved as '+fig_name
print(text)
def sort_like(results):
return np.sort(results,axis=0)
def get_best_parameterset(results,maximize=True):
"""
Get the best parameter set of your result array, depending on your first objectivefunction
:results: Expects an numpy array which should have as first axis an index "like" or "like1".
:type: array
:maximize: Optional, default=True meaning the highest objectivefunction is taken as best, if False the lowest objectivefunction is taken as best.
:type: boolean
:return: Best parameter set
:rtype: array
"""
try:
likes=results['like']
except ValueError:
likes=results['like1']
if maximize:
best=np.nanmax(likes)
else:
best=np.nanmin(likes)
index=np.where(likes==best)
best_parameter_set = get_parameters(results[index])[0]
parameter_names = get_parameternames(results)
text=''
for i in range(len(parameter_names)):
text+=parameter_names[i]+'='+str(best_parameter_set[i])+', '
print('Best parameter set:\n'+text[:-2])
return get_parameters(results[index])
def get_min_max(spotpy_setup):
"""
Get the minimum and maximum values of your parameters function of the spotpy setup
:spotpy_setup: Class with a parameters function
:type: class
:return: Possible minimal and maximal values of all parameters in the parameters function of the spotpy_setup class
:rtype: Two arrays
"""
parameter_obj = spotpy.parameter.generate(spotpy.parameter.get_parameters_from_setup(spotpy_setup))
randompar = parameter_obj['random']
for i in range(1000):
randompar = np.column_stack((randompar, parameter_obj['random']))
return np.amin(randompar, axis=1), np.amax(randompar, axis=1)
def get_parbounds(spotpy_setup):
"""
Get the minimum and maximum parameter bounds of your parameters function of the spotpy setup
:spotpy_setup: Class with a parameters function
:type: class
:return: Possible minimal and maximal values of all parameters in the parameters function of the spotpy_setup class
:rtype: list
"""
parmin,parmax=get_min_max(spotpy_setup)
bounds=[]
for i in range(len(parmin)):
bounds.append([parmin[i],parmax[i]])
return bounds
def get_sensitivity_of_fast(results,like_index=1,M=4, print_to_console=True):
"""
Get the sensitivity for every parameter of your result array, created with the FAST algorithm
:results: Expects an numpy array which should have as first axis an index "like" or "like1".
:type: array
:like_index: Optional, index of objectivefunction to base the sensitivity on, default=None first objectivefunction is taken
:type: int
:return: Sensitivity indices for every parameter
:rtype: list
"""
import math
likes=results['like'+str(like_index)]
print('Number of model runs:', likes.size)
parnames = get_parameternames(results)
parnumber=len(parnames)
print('Number of parameters:', parnumber)
rest = likes.size % (parnumber)
if rest != 0:
print(""""
Number of samples in model output file must be a multiple of D,
where D is the number of parameters in your parameter file.
We handle this by ignoring the last """, rest, """runs.""")
likes = likes[:-rest ]
N = int(likes.size / parnumber)
# Recreate the vector omega used in the sampling
omega = np.zeros([parnumber])
omega[0] = math.floor((N - 1) / (2 * M))
m = math.floor(omega[0] / (2 * M))
print('m =', m)
if m >= (parnumber - 1):
omega[1:] = np.floor(np.linspace(1, m, parnumber - 1))
else:
omega[1:] = np.arange(parnumber - 1) % m + 1
print('Omega =', omega)
# Calculate and Output the First and Total Order Values
if print_to_console:
print("Parameter First Total")
Si = dict((k, [None] * parnumber) for k in ['S1', 'ST'])
print(Si)
for i in range(parnumber):
l = np.arange(i * N, (i + 1) * N)
print(l)
Si['S1'][i] = _compute_first_order(likes[l], N, M, omega[0])
Si['ST'][i] = _compute_total_order(likes[l], N, omega[0])
print(Si)
if print_to_console:
print("%s %f %f" %
(parnames[i], Si['S1'][i], Si['ST'][i]))
return Si
def plot_fast_sensitivity(results,like_index=1,number_of_sensitiv_pars=10,fig_name='FAST_sensitivity.png'):
"""
Example, how to plot the sensitivity for every parameter of your result array, created with the FAST algorithm
:results: Expects an numpy array which should have an header defined with the keyword like.
:type: array
:like: Default 'like1', Collum of which the sensitivity indices will be estimated on
:type: list
:number_of_sensitiv_pars: Optional, this number of most sensitive parameters will be shown in the legend
:type: int
:return: Parameter names which are sensitive, Sensitivity indices for every parameter, Parameter names which are not sensitive
:rtype: Three lists
"""
import matplotlib.pyplot as plt
parnames=get_parameternames(results)
fig=plt.figure(figsize=(9,6))
ax = plt.subplot(1,1,1)
Si = get_sensitivity_of_fast(results, like_index=like_index)
names = []
values = []
no_names = []
no_values = []
index=[]
no_index=[]
try:
threshold = np.sort(list(Si.values())[1])[-number_of_sensitiv_pars]
except IndexError:
threshold = 0
first_sens_call=True
first_insens_call=True
try:
Si.values()
except AttributeError:
exit("Our SI is wrong: " +str(Si))
for j in range(len(list(Si.values())[1])):
if list(Si.values())[1][j]>=threshold:
names.append(j)
values.append(list(Si.values())[1][j])
index.append(j)
if first_sens_call:
ax.bar(j, list(Si.values())[1][j], color='blue', label='Sensitive Parameters')
else:
ax.bar(j, list(Si.values())[1][j], color='blue')
first_sens_call=False
else:
#names.append('')
no_values.append(list(Si.values())[1][j])
no_index.append(j)
if first_insens_call:
ax.bar(j,list(Si.values())[1][j],color='orange', label = 'Insensitive parameter')
else:
ax.bar(j,list(Si.values())[1][j],color='orange')
first_insens_call=False
ax.set_ylim([0,1])
ax.set_xlabel('Model Paramters')
ax.set_ylabel('Total Sensititivity Index')
ax.legend()
ax.set_xticks(np.arange(0,len(parnames)))
xtickNames = ax.set_xticklabels(parnames, color='grey')
plt.setp(xtickNames, rotation=90)
for name_id in names:
ax.get_xticklabels()[name_id].set_color("black")
#ax.set_xticklabels(['0']+parnames)
ax.plot(np.arange(-1,len(parnames)+1,1),[threshold]*(len(parnames)+2),'r--')
ax.set_xlim(-0.5,len(parnames)-0.5)
plt.tight_layout()
fig.savefig(fig_name,dpi=150)
def plot_heatmap_griewank(results,algorithms, fig_name='heatmap_griewank.png'):
"""Example Plot as seen in the SPOTPY Documentation"""
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib import cm
font = {'family' : 'calibri',
'weight' : 'normal',
'size' : 20}
plt.rc('font', **font)
subplots=len(results)
xticks=[-40,0,40]
yticks=[-40,0,40]
fig=plt.figure(figsize=(16,6))
N = 2000
x = np.linspace(-50.0, 50.0, N)
y = np.linspace(-50.0, 50.0, N)
x, y = np.meshgrid(x, y)
z=1+ (x**2+y**2)/4000 - np.cos(x/np.sqrt(2))*np.cos(y/np.sqrt(3))
cmap = plt.get_cmap('autumn')
rows=2.0
for i in range(subplots):
amount_row = int(np.ceil(subplots/rows))
ax = plt.subplot(rows, amount_row, i+1)
CS = ax.contourf(x, y, z,locator=ticker.LogLocator(),cmap=cm.rainbow)
ax.plot(results[i]['par0'],results[i]['par1'],'ko',alpha=0.2,markersize=1.9)
ax.xaxis.set_ticks([])
if i==0:
ax.set_ylabel('y')
if i==subplots/rows:
ax.set_ylabel('y')
if i>=subplots/rows:
ax.set_xlabel('x')
ax.xaxis.set_ticks(xticks)
if i!=0 and i!=subplots/rows:
ax.yaxis.set_ticks([])
ax.set_title(algorithms[i])
fig.savefig(fig_name, bbox_inches='tight')
def plot_objectivefunction(results,evaluation,limit=None,sort=True, fig_name = 'objective_function.png'):
"""Example Plot as seen in the SPOTPY Documentation"""
import matplotlib.pyplot as plt
likes=calc_like(results,evaluation,spotpy.objectivefunctions.rmse)
data=likes
#Calc confidence Interval
mean = np.average(data)
# evaluate sample variance by setting delta degrees of freedom (ddof) to
# 1. The degree used in calculations is N - ddof
stddev = np.std(data, ddof=1)
from scipy.stats import t
# Get the endpoints of the range that contains 95% of the distribution
t_bounds = t.interval(0.999, len(data) - 1)
# sum mean to the confidence interval
ci = [mean + critval * stddev / np.sqrt(len(data)) for critval in t_bounds]
value="Mean: %f" % mean
print(value)
value="Confidence Interval 95%%: %f, %f" % (ci[0], ci[1])
print(value)
threshold=ci[1]
happend=None
bestlike=[data[0]]
for like in data:
if like<bestlike[-1]:
bestlike.append(like)
if bestlike[-1]<threshold and not happend:
thresholdpos=len(bestlike)
happend=True
else:
bestlike.append(bestlike[-1])
if limit:
plt.plot(bestlike,'k-')#[0:limit])
plt.axvline(x=thresholdpos,color='r')
plt.plot(likes,'b-')
#plt.ylim(ymin=-1,ymax=1.39)
else:
plt.plot(bestlike)
plt.savefig(fig_name)
def plot_parametertrace_algorithms(result_lists, algorithmnames, spot_setup,
fig_name='parametertrace_algorithms.png'):
"""Example Plot as seen in the SPOTPY Documentation"""
import matplotlib.pyplot as plt
font = {'family' : 'calibri',
'weight' : 'normal',
'size' : 20}
plt.rc('font', **font)
fig=plt.figure(figsize=(17,5))
subplots=len(result_lists)
parameter = spotpy.parameter.get_parameters_array(spot_setup)
rows=len(parameter['name'])
for j in range(rows):
for i in range(subplots):
ax = plt.subplot(rows,subplots,i+1+j*subplots)
data=result_lists[i]['par'+parameter['name'][j]]
ax.plot(data,'b-')
if i==0:
ax.set_ylabel(parameter['name'][j])
rep = len(data)
if i>0:
ax.yaxis.set_ticks([])
if j==rows-1:
ax.set_xlabel(algorithmnames[i-subplots])
else:
ax.xaxis.set_ticks([])
ax.plot([1]*rep,'r--')
ax.set_xlim(0,rep)
ax.set_ylim(parameter['minbound'][j],parameter['maxbound'][j])
#plt.tight_layout()
fig.savefig(fig_name, bbox_inches='tight')
def plot_parametertrace(results,parameternames=None,fig_name='Parameter_trace.png'):
"""
Get a plot with all values of a given parameter in your result array.
The plot will be saved as a .png file.
:results: Expects an numpy array which should of an index "like" for objectivefunctions
:type: array
:parameternames: A List of Strings with parameternames. A line object will be drawn for each String in the List.
:type: list
:return: Plot of all traces of the given parameternames.
:rtype: figure
"""
import matplotlib.pyplot as plt
fig=plt.figure(figsize=(16,9))
if not parameternames:
parameternames=get_parameternames(results)
names=''
i=1
for name in parameternames:
ax = plt.subplot(len(parameternames),1,i)
ax.plot(results['par'+name],label=name)
names+=name+'_'
ax.set_ylabel(name)
if i==len(parameternames):
ax.set_xlabel('Repetitions')
if i==1:
ax.set_title('Parametertrace')
ax.legend()
i+=1
fig.savefig(fig_name)
text='The figure as been saved as "'+fig_name
print(text)
def plot_posterior_parametertrace(results,parameternames=None,threshold=0.1, fig_name='Posterior_parametertrace.png'):
"""
Get a plot with all values of a given parameter in your result array.
The plot will be saved as a .png file.
:results: Expects an numpy array which should of an index "like" for objectivefunctions
:type: array
:parameternames: A List of Strings with parameternames. A line object will be drawn for each String in the List.
:type: list
:return: Plot of all traces of the given parameternames.
:rtype: figure
"""
import matplotlib.pyplot as plt
fig=plt.figure(figsize=(16,9))
results=sort_like(results)
if not parameternames:
parameternames=get_parameternames(results)
names=''
i=1
for name in parameternames:
ax = plt.subplot(len(parameternames),1,i)
ax.plot(results['par'+name][int(len(results)*threshold):],label=name)
names+=name+'_'
ax.set_ylabel(name)
if i==len(parameternames):
ax.set_xlabel('Repetitions')
if i==1:
ax.set_title('Parametertrace')
ax.legend()
i+=1
fig.savefig(fig_name)
text='The figure as been saved as '+fig_name
print(text)
def plot_posterior(results,evaluation,dates=None,ylabel='Posterior model simulation',xlabel='Time',bestperc=0.1, fig_name='bestmodelrun.png'):
"""
Get a plot with the maximum objectivefunction of your simulations in your result
array.
The plot will be saved as a .png file.
Args:
results (array): Expects an numpy array which should of an index "like" for
objectivefunctions and "sim" for simulations.
evaluation (list): Should contain the values of your observations. Expects that this list has the same lenght as the number of simulations in your result array.
Kwargs:
dates (list): A list of datetime values, equivalent to the evaluation data.
ylabel (str): Labels the y-axis with the given string.
xlabel (str): Labels the x-axis with the given string.
objectivefunction (str): Name of the objectivefunction function used for the simulations.
objectivefunctionmax (boolean): If True the maximum value of the objectivefunction will be searched. If false, the minimum will be searched.
calculatelike (boolean): If True, the NSE will be calulated for each simulation in the result array.
Returns:
figure. Plot of the simulation with the maximum objectivefunction value in the result array as a blue line and dots for the evaluation data.
"""
import matplotlib.pyplot as plt
index,maximum=get_maxlikeindex(results)
sim=get_modelruns(results)
bestmodelrun=list(sim[index][0])#Transform values into list to ensure plotting
bestparameterset=list(get_parameters(results)[index][0])
parameternames=list(get_parameternames(results) )
bestparameterstring=''
maxNSE=spotpy.objectivefunctions.nashsutcliffe(bestmodelrun,evaluation)
for i in range(len(parameternames)):
if i%8==0:
bestparameterstring+='\n'
bestparameterstring+=parameternames[i]+'='+str(round(bestparameterset[i],4))+','
fig=plt.figure(figsize=(16,8))
plt.plot(bestmodelrun,'b-',label='Simulation='+str(round(maxNSE,4)))
plt.plot(evaluation,'ro',label='Evaluation')
plt.legend()
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title('Maximum objectivefunction of Simulations with '+bestparameterstring[0:-2])
fig.savefig(fig_name)
text='The figure as been saved as '+fig_name
print(text)
def plot_bestmodelrun(results,evaluation,fig_name ='Best_model_run.png'):
"""
Get a plot with the maximum objectivefunction of your simulations in your result
array.
The plot will be saved as a .png file.
:results: Expects an numpy array which should of an index "like" for
objectivefunctions and "sim" for simulations.
type: Array
:evaluation: Should contain the values of your observations. Expects that this list has the same lenght as the number of simulations in your result array.
:type: list
Returns:
figure. Plot of the simulation with the maximum objectivefunction value in the result array as a blue line and dots for the evaluation data.
"""
import matplotlib.pyplot as plt
fig= plt.figure(figsize=(16,9))
for i in range(len(evaluation)):
if evaluation[i] == -9999:
evaluation[i] = np.nan
plt.plot(evaluation,'ro',markersize=1, label='Observation data')
simulation_fields = get_simulation_fields(results)
bestindex,bestobjf = get_maxlikeindex(results,verbose=False)
plt.plot(list(results[simulation_fields][bestindex][0]),'b-',label='Obj='+str(round(bestobjf,2)))
plt.xlabel('Number of Observation Points')
plt.ylabel ('Simulated value')
plt.legend(loc='upper right')
fig.savefig(fig_name,dpi=300)
text='A plot of the best model run has been saved as '+fig_name
print(text)
def plot_bestmodelruns(results,evaluation,algorithms=None,dates=None,ylabel='Best model simulation',xlabel='Date',objectivefunctionmax=True,calculatelike=True,fig_name='bestmodelrun.png'):
"""
Get a plot with the maximum objectivefunction of your simulations in your result
array.
The plot will be saved as a .png file.
Args:
results (list of arrays): Expects list of numpy arrays which should of an index "like" for
objectivefunctions and "sim" for simulations.
evaluation (list): Should contain the values of your observations. Expects that this list has the same lenght as the number of simulations in your result array.
Kwargs:
dates (list): A list of datetime values, equivalent to the evaluation data.
ylabel (str): Labels the y-axis with the given string.
xlabel (str): Labels the x-axis with the given string.
objectivefunction (str): Name of the objectivefunction function used for the simulations.
objectivefunctionmax (boolean): If True the maximum value of the objectivefunction will be searched. If false, the minimum will be searched.
calculatelike (boolean): If True, the NSE will be calulated for each simulation in the result array.
Returns:
figure. Plot of the simulation with the maximum objectivefunction value in the result array as a blue line and dots for the evaluation data.
A really great idea. A way you might use me is
>>> bcf.analyser.plot_bestmodelrun(results,evaluation, ylabel='Best model simulation')
"""
import matplotlib.pyplot as plt
plt.rc('font', **font)
fig=plt.figure(figsize=(17,8))
colors=['grey', 'black', 'brown','red','orange', 'yellow','green','blue',]
plt.plot(dates,evaluation,'ro',label='Evaluation data')
for i in range(len(results)):
if calculatelike:
likes=[]
sim=get_modelruns(results[i])
par=get_parameters(results[i])
for s in sim:
likes.append(spotpy.objectivefunctions.lognashsutcliffe(evaluation,list(s)))
maximum=max(likes)
index=likes.index(maximum)
bestmodelrun=list(sim[index])
bestparameterset=list(par[index])
print(bestparameterset)
else:
if objectivefunctionmax==True:
index,maximum=get_maxlikeindex(results[i])
else:
index,maximum=get_minlikeindex(results[i])
bestmodelrun=list(get_modelruns(results[i])[index][0])#Transform values into list to ensure plotting
maxLike=spotpy.objectivefunctions.lognashsutcliffe(evaluation,bestmodelrun)
if dates is not None:
plt.plot(dates,bestmodelrun,'-',color=colors[i],label=algorithms[i]+': LogNSE='+str(round(maxLike,4)))
else:
plt.plot(bestmodelrun,'-',color=colors[i],label=algorithms[i]+': AI='+str(round(maxLike,4)))
#plt.plot(evaluation,'ro',label='Evaluation data')
plt.legend(bbox_to_anchor=(.0, 0), loc=3)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.ylim(15,50) #DELETE WHEN NOT USED WITH SOIL MOISTUR RESULTS
fig.savefig(fig_name)
text='The figure as been saved as '+fig_name
print(text)
def plot_objectivefunctiontraces(results,evaluation,algorithms,fig_name='Like_trace.png'):
import matplotlib.pyplot as plt
from matplotlib import colors
cnames=list(colors.cnames)
font = {'family' : 'calibri',
'weight' : 'normal',
'size' : 20}
plt.rc('font', **font)
fig=plt.figure(figsize=(16,3))
xticks=[5000,15000]
for i in range(len(results)):
ax = plt.subplot(1,len(results),i+1)
likes=calc_like(results[i],evaluation,spotpy.objectivefunctions.rmse)
ax.plot(likes,'b-')
ax.set_ylim(0,25)
ax.set_xlim(0,len(results[0]))
ax.set_xlabel(algorithms[i])
ax.xaxis.set_ticks(xticks)
if i==0:
ax.set_ylabel('RMSE')
ax.yaxis.set_ticks([0,10,20])
else:
ax.yaxis.set_ticks([])
plt.tight_layout()
fig.savefig(fig_name)
def plot_regression(results,evaluation,fig_name='regressionanalysis.png'):
import matplotlib.pyplot as plt
fig=plt.figure(figsize=(16,9))
simulations=get_modelruns(results)
for sim in simulations:
plt.plot(evaluation,list(sim),'bo',alpha=.05)
plt.ylabel('simulation')
plt.xlabel('evaluation')
plt.title('Regression between simulations and evaluation data')
fig.savefig(fig_name)
text='The figure as been saved as '+fig_name
print(text)
def plot_parameterInteraction(results, fig_name ='ParameterInteraction.png'):
'''Input: List with values of parameters and list of strings with parameter names
Output: Dotty plot of parameter distribution and gaussian kde distribution'''
import matplotlib.pyplot as plt
import pandas as pd
parameterdistribtion=get_parameters(results)
parameternames=get_parameternames(results)
df = pd.DataFrame(np.asarray(parameterdistribtion).T.tolist(), columns=parameternames)
pd.plotting.scatter_matrix(df, alpha=0.2, figsize=(12, 12), diagonal='kde')
plt.savefig(fig_name,dpi=300)
def plot_allmodelruns(modelruns,observations,dates=None, fig_name='bestmodel.png'):
'''Input: Array of modelruns and list of Observations
Output: Plot with all modelruns as a line and dots with the Observations
'''
import matplotlib.pyplot as plt
fig=plt.figure(figsize=(16,9))
ax = plt.subplot(1,1,1)
if dates is not None:
for i in range(len(modelruns)):
if i==0:
ax.plot(dates, modelruns[i],'b',alpha=.05,label='Simulations')
else:
ax.plot(dates, modelruns[i],'b',alpha=.05)
else:
for i in range(len(modelruns)):
if i==0:
ax.plot(modelruns[i],'b',alpha=.05,label='Simulations')
else:
ax.plot(modelruns[i],'b',alpha=.05)
ax.plot(observations,'ro',label='Evaluation')
ax.legend()
ax.set_xlabel = 'Best model simulation'
ax.set_ylabel = 'Evaluation points'
ax.set_title = 'Maximum objectivefunction of Simulations'
fig.savefig(fig_name)
text='The figure as been saved as '+fig_name
print(text)
def plot_gelman_rubin(results, r_hat_values,fig_name='gelman_rub.png'):
'''Input: List of R_hat values of chains (see Gelman & Rubin 1992)
Output: Plot as seen for e.g. in (Sadegh and Vrugt 2014)'''
import matplotlib.pyplot as plt
fig= plt.figure(figsize=(9,6))
ax1 = plt.subplot(2,1,1)
for i in range(int(max(results['chain']))+1):
index=np.where(results['chain']==i)
ax1.plot(results['like1'][index], label='Chain '+str(i+1))
ax1.set_ylabel('Likelihood value')
ax1.legend()
ax2 =plt.subplot(2,1,2)
r_hat=np.array(r_hat_values)
ax2.plot([1.2]*len(r_hat),'k--')
for i in range(len(r_hat[0])):
ax2.plot(r_hat[:,i],label='x'+str(i+1))
ax2.set_yscale("log", nonpositive='clip')
ax2.set_ylabel('R$^d$ - convergence diagnostic')
ax2.set_xlabel('Number of chainruns')
ax2.legend()
fig.savefig(fig_name,dpi=150)
def gelman_rubin(x):
'''NOT USED YET'''
if np.shape(x) < (2,):
raise ValueError(
'Gelman-Rubin diagnostic requires multiple chains of the same length.')
try:
m, n = np.shape(x)
except ValueError:
return [gelman_rubin(np.transpose(y)) for y in np.transpose(x)]
# Calculate between-chain variance
B_over_n = np.sum((np.mean(x, 1) - np.mean(x)) ** 2) / (m - 1)
# Calculate within-chain variances
W = np.sum(
[(x[i] - xbar) ** 2 for i,
xbar in enumerate(np.mean(x,
1))]) / (m * (n - 1))
# (over) estimate of variance
s2 = W * (n - 1) / n + B_over_n
# Pooled posterior variance estimate
V = s2 + B_over_n / m
# Calculate PSRF
R = V / W
return R
def plot_Geweke(parameterdistribution,parametername):
'''Input: Takes a list of sampled values for a parameter and his name as a string
Output: Plot as seen for e.g. in BUGS or PyMC'''
import matplotlib.pyplot as plt
# perform the Geweke test
Geweke_values = _Geweke(parameterdistribution)
# plot the results
fig = plt.figure()
plt.plot(Geweke_values,label=parametername)
plt.legend()
plt.title(parametername + '- Geweke_Test')
plt.xlabel('Subinterval')
plt.ylabel('Geweke Test')
plt.ylim([-3,3])
# plot the delimiting line
plt.plot( [2]*len(Geweke_values), 'r-.')
plt.plot( [-2]*len(Geweke_values), 'r-.')
def _compute_first_order(outputs, N, M, omega):
f = np.fft.fft(outputs)
Sp = np.power(np.absolute(f[np.arange(1, int((N + 1) / 2))]) / N, 2)
V = 2 * np.sum(Sp)
D1 = 2 * np.sum(Sp[np.arange(1, M + 1) * int(omega) - 1])
return D1 / V
def _compute_total_order(outputs, N, omega):
f = np.fft.fft(outputs)
Sp = np.power(np.absolute(f[np.arange(1, int((N + 1) / 2))]) / N, 2)
V = 2 * np.sum(Sp)
Dt = 2 * sum(Sp[np.arange(int(omega / 2))])
return (1 - Dt / V)
def _Geweke(samples, intervals=20):
'''Calculates Geweke Z-Scores'''
length=int(len(samples)/intervals/2)
# discard the first 10 per cent
first = 0.1*len(samples)
# create empty array to store the results
z = np.empty(intervals)
for k in np.arange(0, intervals):
# starting points of the two different subsamples
start1 = int(first + k*length)
start2 = int(len(samples)/2 + k*length)
# extract the sub samples
subsamples1 = samples[start1:start1+length]
subsamples2 = samples[start2:start2+length]
# calculate the mean and the variance
mean1 = np.mean(subsamples1)
mean2 = np.mean(subsamples2)
var1 = np.var(subsamples1)
var2 = np.var(subsamples2)
# calculate the Geweke test
z[k] = (mean1-mean2)/np.sqrt(var1+var2)
return z
|
thouska/spotpy
|
spotpy/analyser.py
|
Python
|
mit
| 38,375
|
[
"Gaussian"
] |
9c0e7da62a3e9436c231ad00e3c37c29fa534b07898a73191409a523c7f77343
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Views tests for the OSF."""
from __future__ import absolute_import
import datetime as dt
from rest_framework import status as http_status
import json
import time
import unittest
from future.moves.urllib.parse import quote
from flask import request
import mock
import pytest
from nose.tools import * # noqa PEP8 asserts
from django.utils import timezone
from django.apps import apps
from django.core.exceptions import ValidationError
from django.db import connection, transaction
from django.test import TransactionTestCase
from django.test.utils import CaptureQueriesContext
from addons.github.tests.factories import GitHubAccountFactory
from addons.wiki.models import WikiPage
from framework.auth import cas, authenticate
from framework.flask import redirect
from framework.auth.core import generate_verification_key
from framework import auth
from framework.auth.campaigns import get_campaigns, is_institution_login, is_native_login, is_proxy_login, campaign_url_for
from framework.auth import Auth
from framework.auth.cas import get_login_url
from framework.auth.exceptions import InvalidTokenError
from framework.auth.utils import impute_names_model, ensure_external_identity_uniqueness
from framework.auth.views import login_and_register_handler
from framework.celery_tasks import handlers
from framework.exceptions import HTTPError, TemplateHTTPError
from framework.transactions.handlers import no_auto_transaction
from website import mailchimp_utils, mails, settings, language
from addons.osfstorage import settings as osfstorage_settings
from osf.models import AbstractNode, NodeLog, QuickFilesNode
from website.profile.utils import add_contributor_json, serialize_unregistered
from website.profile.views import update_osf_help_mails_subscription
from website.project.decorators import check_can_access
from website.project.model import has_anonymous_link
from website.project.signals import contributor_added
from website.project.views.contributor import (
deserialize_contributors,
notify_added_contributor,
send_claim_email,
send_claim_registered_email,
)
from website.project.views.node import _should_show_wiki_widget, _view_project, abbrev_authors
from website.util import api_url_for, web_url_for
from website.util import rubeus
from website.util.metrics import OsfSourceTags, OsfClaimedTags, provider_source_tag, provider_claimed_tag
from osf.utils import permissions
from osf.models import Comment
from osf.models import OSFUser, Tag
from osf.models import Email
from osf.models.spam import SpamStatus
from tests.base import (
assert_is_redirect,
capture_signals,
fake,
get_default_metaschema,
OsfTestCase,
assert_datetime_equal,
)
from tests.base import test_app as mock_app
from tests.test_cas_authentication import generate_external_user_with_resp, make_external_response
from api_tests.utils import create_test_file
pytestmark = pytest.mark.django_db
from osf.models import NodeRelation, QuickFilesNode, BlacklistedEmailDomain
from osf_tests.factories import (
fake_email,
ApiOAuth2ApplicationFactory,
ApiOAuth2PersonalTokenFactory,
AuthUserFactory,
CollectionFactory,
CommentFactory,
InstitutionFactory,
NodeFactory,
OSFGroupFactory,
PreprintFactory,
PreprintProviderFactory,
PrivateLinkFactory,
ProjectFactory,
ProjectWithAddonFactory,
RegistrationFactory,
RegistrationProviderFactory,
UserFactory,
UnconfirmedUserFactory,
UnregUserFactory,
RegionFactory,
DraftRegistrationFactory,
)
@mock_app.route('/errorexc')
def error_exc():
UserFactory()
raise RuntimeError
@mock_app.route('/error500')
def error500():
UserFactory()
return 'error', 500
@mock_app.route('/noautotransact')
@no_auto_transaction
def no_auto_transact():
UserFactory()
return 'error', 500
class TestViewsAreAtomic(OsfTestCase):
def test_error_response_rolls_back_transaction(self):
original_user_count = OSFUser.objects.count()
self.app.get('/error500', expect_errors=True)
assert_equal(OSFUser.objects.count(), original_user_count)
# Need to set debug = False in order to rollback transactions in transaction_teardown_request
mock_app.debug = False
try:
self.app.get('/errorexc', expect_errors=True)
except RuntimeError:
pass
mock_app.debug = True
self.app.get('/noautotransact', expect_errors=True)
assert_equal(OSFUser.objects.count(), original_user_count + 1)
@pytest.mark.enable_bookmark_creation
class TestViewingProjectWithPrivateLink(OsfTestCase):
def setUp(self):
super(TestViewingProjectWithPrivateLink, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory()
self.link.nodes.add(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_edit_private_link_empty(self):
node = ProjectFactory(creator=self.user)
link = PrivateLinkFactory()
link.nodes.add(node)
link.save()
url = node.api_url_for('project_private_link_edit')
res = self.app.put_json(url, {'pk': link._id, 'value': ''}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Title cannot be blank', res.body.decode())
def test_edit_private_link_invalid(self):
node = ProjectFactory(creator=self.user)
link = PrivateLinkFactory()
link.nodes.add(node)
link.save()
url = node.api_url_for('project_private_link_edit')
res = self.app.put_json(url, {'pk': link._id, 'value': '<a></a>'}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Invalid link name.', res.body.decode())
@mock.patch('framework.auth.core.Auth.private_link')
def test_can_be_anonymous_for_public_project(self, mock_property):
mock_property.return_value(mock.MagicMock())
mock_property.anonymous = True
anonymous_link = PrivateLinkFactory(anonymous=True)
anonymous_link.nodes.add(self.project)
anonymous_link.save()
self.project.set_privacy('public')
self.project.save()
self.project.reload()
auth = Auth(user=self.user, private_key=anonymous_link.key)
assert_true(has_anonymous_link(self.project, auth))
def test_has_private_link_key(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_equal(res.status_code, 200)
def test_not_logged_in_no_key(self):
res = self.app.get(self.project_url, {'view_only': None})
assert_is_redirect(res)
res = res.follow(expect_errors=True)
assert_equal(res.status_code, 308)
assert_equal(
res.request.path,
'/login'
)
def test_logged_in_no_private_key(self):
res = self.app.get(self.project_url, {'view_only': None}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_403_FORBIDDEN)
def test_logged_in_has_key(self):
res = self.app.get(
self.project_url, {'view_only': self.link.key}, auth=self.user.auth)
assert_equal(res.status_code, 200)
@unittest.skip('Skipping for now until we find a way to mock/set the referrer')
def test_prepare_private_key(self):
res = self.app.get(self.project_url, {'key': self.link.key})
res = res.click('Registrations')
assert_is_redirect(res)
res = res.follow()
assert_equal(res.status_code, 200)
assert_equal(res.request.GET['key'], self.link.key)
def test_cannot_access_registrations_or_forks_with_anon_key(self):
anonymous_link = PrivateLinkFactory(anonymous=True)
anonymous_link.nodes.add(self.project)
anonymous_link.save()
self.project.is_public = False
self.project.save()
url = self.project_url + 'registrations/?view_only={}'.format(anonymous_link.key)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_can_access_registrations_and_forks_with_not_anon_key(self):
link = PrivateLinkFactory(anonymous=False)
link.nodes.add(self.project)
link.save()
self.project.is_public = False
self.project.save()
url = self.project_url + 'registrations/?view_only={}'.format(self.link.key)
res = self.app.get(url)
assert_equal(res.status_code, 302)
assert_in(url.replace('/project/', ''), res.location)
def test_check_can_access_valid(self):
contributor = AuthUserFactory()
self.project.add_contributor(contributor, auth=Auth(self.project.creator))
self.project.save()
assert_true(check_can_access(self.project, contributor))
def test_check_can_access_osf_group_member_valid(self):
user = AuthUserFactory()
group = OSFGroupFactory(creator=user)
self.project.add_osf_group(group, permissions.READ)
self.project.save()
assert_true(check_can_access(self.project, user))
def test_check_user_access_invalid(self):
noncontrib = AuthUserFactory()
with assert_raises(HTTPError):
check_can_access(self.project, noncontrib)
def test_check_user_access_if_user_is_None(self):
assert_false(check_can_access(self.project, None))
def test_check_can_access_invalid_access_requests_enabled(self):
noncontrib = AuthUserFactory()
assert self.project.access_requests_enabled
with assert_raises(TemplateHTTPError):
check_can_access(self.project, noncontrib)
def test_check_can_access_invalid_access_requests_disabled(self):
noncontrib = AuthUserFactory()
self.project.access_requests_enabled = False
self.project.save()
with assert_raises(HTTPError):
check_can_access(self.project, noncontrib)
def test_logged_out_user_cannot_view_spammy_project_via_private_link(self):
self.project.spam_status = SpamStatus.SPAM
self.project.save()
res = self.app.get(self.project_url, {'view_only': self.link.key})
# Logged out user gets redirected to login page
assert_equal(res.status_code, 302)
def test_logged_in_user_cannot_view_spammy_project_via_private_link(self):
rando_user = AuthUserFactory()
self.project.spam_status = SpamStatus.SPAM
self.project.save()
res = self.app.get(
self.project_url,
{'view_only': self.link.key},
auth=rando_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
@pytest.mark.enable_bookmark_creation
class TestProjectViews(OsfTestCase):
def setUp(self):
super(TestProjectViews, self).setUp()
self.user1 = AuthUserFactory()
self.user1.save()
self.consolidate_auth1 = Auth(user=self.user1)
self.auth = self.user1.auth
self.user2 = AuthUserFactory()
self.auth2 = self.user2.auth
# A project has 2 contributors
self.project = ProjectFactory(
title='Ham',
description='Honey-baked',
creator=self.user1
)
self.project.add_contributor(self.user2, auth=Auth(self.user1))
self.project.save()
self.project2 = ProjectFactory(
title='Tofu',
description='Glazed',
creator=self.user1
)
self.project2.add_contributor(self.user2, auth=Auth(self.user1))
self.project2.save()
@mock.patch('framework.status.push_status_message')
def test_view_project_tos_status_message(self, mock_push_status_message):
self.app.get(
self.project.web_url_for('view_project'),
auth=self.auth
)
assert_true(mock_push_status_message.called)
assert_equal('terms_of_service', mock_push_status_message.mock_calls[0][2]['id'])
@mock.patch('framework.status.push_status_message')
def test_view_project_no_tos_status_message(self, mock_push_status_message):
self.user1.accepted_terms_of_service = timezone.now()
self.user1.save()
self.app.get(
self.project.web_url_for('view_project'),
auth=self.auth
)
assert_false(mock_push_status_message.called)
def test_node_setting_with_multiple_matched_institution_email_domains(self):
# User has alternate emails matching more than one institution's email domains
inst1 = InstitutionFactory(email_domains=['foo.bar'])
inst2 = InstitutionFactory(email_domains=['baz.qux'])
user = AuthUserFactory()
user.emails.create(address='queen@foo.bar')
user.emails.create(address='brian@baz.qux')
user.save()
project = ProjectFactory(creator=user)
# node settings page loads without error
url = project.web_url_for('node_setting')
res = self.app.get(url, auth=user.auth)
assert_equal(res.status_code, 200)
# user is automatically affiliated with institutions
# that matched email domains
user.reload()
assert_in(inst1, user.affiliated_institutions.all())
assert_in(inst2, user.affiliated_institutions.all())
def test_edit_title_empty(self):
node = ProjectFactory(creator=self.user1)
url = node.api_url_for('edit_node')
res = self.app.post_json(url, {'name': 'title', 'value': ''}, auth=self.user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Title cannot be blank', res.body.decode())
def test_edit_title_invalid(self):
node = ProjectFactory(creator=self.user1)
url = node.api_url_for('edit_node')
res = self.app.post_json(url, {'name': 'title', 'value': '<a></a>'}, auth=self.user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_in('Invalid title.', res.body.decode())
def test_view_project_doesnt_select_for_update(self):
node = ProjectFactory(creator=self.user1)
url = node.api_url_for('view_project')
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
res = self.app.get(url, auth=self.user1.auth)
for_update_sql = connection.ops.for_update_sql()
assert_equal(res.status_code, 200)
assert not any(for_update_sql in query['sql'] for query in ctx.captured_queries)
def test_cannot_remove_only_visible_contributor(self):
user1_contrib = self.project.contributor_set.get(user=self.user1)
user1_contrib.visible = False
user1_contrib.save()
url = self.project.api_url_for('project_remove_contributor')
res = self.app.post_json(
url, {'contributorID': self.user2._id,
'nodeIDs': [self.project._id]}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_403_FORBIDDEN)
assert_equal(res.json['message_long'], 'Must have at least one bibliographic contributor')
assert_true(self.project.is_contributor(self.user2))
def test_remove_only_visible_contributor_return_false(self):
user1_contrib = self.project.contributor_set.get(user=self.user1)
user1_contrib.visible = False
user1_contrib.save()
ret = self.project.remove_contributor(contributor=self.user2, auth=self.consolidate_auth1)
assert_false(ret)
self.project.reload()
assert_true(self.project.is_contributor(self.user2))
def test_can_view_nested_project_as_admin(self):
self.parent_project = NodeFactory(
title='parent project',
category='project',
parent=self.project,
is_public=False
)
self.parent_project.save()
self.child_project = NodeFactory(
title='child project',
category='project',
parent=self.parent_project,
is_public=False
)
self.child_project.save()
url = self.child_project.web_url_for('view_project')
res = self.app.get(url, auth=self.auth)
assert_not_in('Private Project', res.body.decode())
assert_in('parent project', res.body.decode())
def test_edit_description(self):
url = '/api/v1/project/{0}/edit/'.format(self.project._id)
self.app.post_json(url,
{'name': 'description', 'value': 'Deep-fried'},
auth=self.auth)
self.project.reload()
assert_equal(self.project.description, 'Deep-fried')
def test_project_api_url(self):
url = self.project.api_url
res = self.app.get(url, auth=self.auth)
data = res.json
assert_equal(data['node']['category'], 'Project')
assert_equal(data['node']['node_type'], 'project')
assert_equal(data['node']['title'], self.project.title)
assert_equal(data['node']['is_public'], self.project.is_public)
assert_equal(data['node']['is_registration'], False)
assert_equal(data['node']['id'], self.project._primary_key)
assert_true(data['user']['is_contributor'])
assert_equal(data['node']['description'], self.project.description)
assert_equal(data['node']['url'], self.project.url)
assert_equal(data['node']['tags'], list(self.project.tags.values_list('name', flat=True)))
assert_in('forked_date', data['node'])
assert_in('registered_from_url', data['node'])
# TODO: Test "parent" and "user" output
def test_add_contributor_post(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
user2 = UserFactory()
user3 = UserFactory()
url = '/api/v1/project/{0}/contributors/'.format(project._id)
dict2 = add_contributor_json(user2)
dict3 = add_contributor_json(user3)
dict2.update({
'permission': permissions.ADMIN,
'visible': True,
})
dict3.update({
'permission': permissions.WRITE,
'visible': False,
})
self.app.post_json(
url,
{
'users': [dict2, dict3],
'node_ids': [project._id],
},
content_type='application/json',
auth=self.auth,
).maybe_follow()
project.reload()
assert_in(user2, project.contributors)
# A log event was added
assert_equal(project.logs.latest().action, 'contributor_added')
assert_equal(len(project.contributors), 3)
assert project.has_permission(user2, permissions.ADMIN) is True
assert project.has_permission(user2, permissions.WRITE) is True
assert project.has_permission(user2, permissions.READ) is True
assert project.has_permission(user3, permissions.ADMIN) is False
assert project.has_permission(user3, permissions.WRITE) is True
assert project.has_permission(user3, permissions.READ) is True
def test_manage_permissions(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.project.creator._id, 'permission': permissions.ADMIN,
'registered': True, 'visible': True},
{'id': self.user1._id, 'permission': permissions.READ,
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': permissions.ADMIN,
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert self.project.has_permission(self.user1, permissions.ADMIN) is False
assert self.project.has_permission(self.user1, permissions.WRITE) is False
assert self.project.has_permission(self.user1, permissions.READ) is True
assert self.project.has_permission(self.user2, permissions.ADMIN) is True
assert self.project.has_permission(self.user2, permissions.WRITE) is True
assert self.project.has_permission(self.user2, permissions.READ) is True
def test_manage_permissions_again(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': permissions.ADMIN,
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': permissions.ADMIN,
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': permissions.ADMIN,
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': permissions.READ,
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert self.project.has_permission(self.user2, permissions.ADMIN) is False
assert self.project.has_permission(self.user2, permissions.WRITE) is False
assert self.project.has_permission(self.user2, permissions.READ) is True
assert self.project.has_permission(self.user1, permissions.ADMIN) is True
assert self.project.has_permission(self.user1, permissions.WRITE) is True
assert self.project.has_permission(self.user1, permissions.READ) is True
def test_contributor_manage_reorder(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': permissions.ADMIN, 'visible': True},
{'user': reg_user2, 'permissions': permissions.ADMIN, 'visible': False},
]
)
# Add a non-registered user
unregistered_user = project.add_unregistered_contributor(
fullname=fake.name(), email=fake_email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': reg_user2._id, 'permission': permissions.ADMIN,
'registered': True, 'visible': False},
{'id': project.creator._id, 'permission': permissions.ADMIN,
'registered': True, 'visible': True},
{'id': unregistered_user._id, 'permission': permissions.ADMIN,
'registered': False, 'visible': True},
{'id': reg_user1._id, 'permission': permissions.ADMIN,
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
project.reload()
assert_equal(
# Note: Cast ForeignList to list for comparison
list(project.contributors),
[reg_user2, project.creator, unregistered_user, reg_user1]
)
assert_equal(
list(project.visible_contributors),
[project.creator, unregistered_user, reg_user1]
)
def test_project_remove_contributor(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {'contributorID': self.user2._id,
'nodeIDs': [self.project._id]}
self.app.post(url, json.dumps(payload),
content_type='application/json',
auth=self.auth).maybe_follow()
self.project.reload()
assert_not_in(self.user2._id, self.project.contributors)
# A log event was added
assert_equal(self.project.logs.latest().action, 'contributor_removed')
def test_multiple_project_remove_contributor(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {'contributorID': self.user2._id,
'nodeIDs': [self.project._id, self.project2._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
auth=self.auth).maybe_follow()
self.project.reload()
self.project2.reload()
assert_not_in(self.user2._id, self.project.contributors)
assert_not_in('/dashboard/', res.json)
assert_not_in(self.user2._id, self.project2.contributors)
# A log event was added
assert_equal(self.project.logs.latest().action, 'contributor_removed')
def test_private_project_remove_self_not_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# user2 removes self
payload = {'contributorID': self.user2._id,
'nodeIDs': [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
auth=self.auth2).maybe_follow()
self.project.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['redirectUrl'], '/dashboard/')
assert_not_in(self.user2._id, self.project.contributors)
def test_public_project_remove_self_not_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# user2 removes self
self.public_project = ProjectFactory(creator=self.user1, is_public=True)
self.public_project.add_contributor(self.user2, auth=Auth(self.user1))
self.public_project.save()
payload = {'contributorID': self.user2._id,
'nodeIDs': [self.public_project._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
auth=self.auth2).maybe_follow()
self.public_project.reload()
assert_equal(res.status_code, 200)
assert_equal(res.json['redirectUrl'], '/' + self.public_project._id + '/')
assert_not_in(self.user2._id, self.public_project.contributors)
def test_project_remove_other_not_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {'contributorID': self.user1._id,
'nodeIDs': [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
expect_errors=True,
auth=self.auth2).maybe_follow()
self.project.reload()
assert_equal(res.status_code, 403)
assert_equal(res.json['message_long'],
'You do not have permission to perform this action. '
'If this should not have occurred and the issue persists, '
+ language.SUPPORT_LINK
)
assert_in(self.user1, self.project.contributors)
def test_project_remove_fake_contributor(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {'contributorID': 'badid',
'nodeIDs': [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
expect_errors=True,
auth=self.auth).maybe_follow()
self.project.reload()
# Assert the contributor id was invalid
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Contributor not found.')
assert_not_in('badid', self.project.contributors)
def test_project_remove_self_only_admin(self):
url = self.project.api_url_for('project_remove_contributor')
# User 1 removes user2
payload = {'contributorID': self.user1._id,
'nodeIDs': [self.project._id]}
res = self.app.post(url, json.dumps(payload),
content_type='application/json',
expect_errors=True,
auth=self.auth).maybe_follow()
self.project.reload()
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Could not remove contributor.')
assert_in(self.user1, self.project.contributors)
def test_get_contributors_abbrev(self):
# create a project with 3 registered contributors
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': permissions.ADMIN, 'visible': True},
{'user': reg_user2, 'permissions': permissions.ADMIN, 'visible': True},
]
)
# add an unregistered contributor
project.add_unregistered_contributor(
fullname=fake.name(), email=fake_email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url_for('get_node_contributors_abbrev')
res = self.app.get(url, auth=self.auth)
assert_equal(len(project.contributors), 4)
assert_equal(len(res.json['contributors']), 3)
assert_equal(len(res.json['others_count']), 1)
assert_equal(res.json['contributors'][0]['separator'], ',')
assert_equal(res.json['contributors'][1]['separator'], ',')
assert_equal(res.json['contributors'][2]['separator'], ' &')
def test_edit_node_title(self):
url = '/api/v1/project/{0}/edit/'.format(self.project._id)
# The title is changed though posting form data
self.app.post_json(url, {'name': 'title', 'value': 'Bacon'},
auth=self.auth).maybe_follow()
self.project.reload()
# The title was changed
assert_equal(self.project.title, 'Bacon')
# A log event was saved
assert_equal(self.project.logs.latest().action, 'edit_title')
def test_add_tag(self):
url = self.project.api_url_for('project_add_tag')
self.app.post_json(url, {'tag': "foo'ta#@%#%^&g?"}, auth=self.auth)
self.project.reload()
assert_in("foo'ta#@%#%^&g?", self.project.tags.values_list('name', flat=True))
assert_equal("foo'ta#@%#%^&g?", self.project.logs.latest().params['tag'])
def test_remove_tag(self):
self.project.add_tag("foo'ta#@%#%^&g?", auth=self.consolidate_auth1, save=True)
assert_in("foo'ta#@%#%^&g?", self.project.tags.values_list('name', flat=True))
url = self.project.api_url_for('project_remove_tag')
self.app.delete_json(url, {'tag': "foo'ta#@%#%^&g?"}, auth=self.auth)
self.project.reload()
assert_not_in("foo'ta#@%#%^&g?", self.project.tags.values_list('name', flat=True))
latest_log = self.project.logs.latest()
assert_equal('tag_removed', latest_log.action)
assert_equal("foo'ta#@%#%^&g?", latest_log.params['tag'])
# Regression test for #OSF-5257
def test_removal_empty_tag_throws_error(self):
url = self.project.api_url_for('project_remove_tag')
res = self.app.delete_json(url, {'tag': ''}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
# Regression test for #OSF-5257
def test_removal_unknown_tag_throws_error(self):
self.project.add_tag('narf', auth=self.consolidate_auth1, save=True)
url = self.project.api_url_for('project_remove_tag')
res = self.app.delete_json(url, {'tag': 'troz'}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_409_CONFLICT)
def test_suspended_project(self):
node = NodeFactory(parent=self.project, creator=self.user1)
node.remove_node(Auth(self.user1))
node.reload()
node.suspended = True
node.save()
url = node.api_url
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 451)
def test_private_link_edit_name(self):
link = PrivateLinkFactory(name='link')
link.nodes.add(self.project)
link.save()
assert_equal(link.name, 'link')
url = self.project.api_url + 'private_link/edit/'
self.app.put_json(
url,
{'pk': link._id, 'value': 'new name'},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_equal(link.name, 'new name')
def test_remove_private_link(self):
link = PrivateLinkFactory()
link.nodes.add(self.project)
link.save()
url = self.project.api_url_for('remove_private_link')
self.app.delete_json(
url,
{'private_link_id': link._id},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_true(link.is_deleted)
def test_remove_private_link_log(self):
link = PrivateLinkFactory()
link.nodes.add(self.project)
link.save()
url = self.project.api_url_for('remove_private_link')
self.app.delete_json(
url,
{'private_link_id': link._id},
auth=self.auth,
).maybe_follow()
last_log = self.project.logs.latest()
assert last_log.action == NodeLog.VIEW_ONLY_LINK_REMOVED
assert not last_log.params.get('anonymous_link')
def test_remove_private_link_anonymous_log(self):
link = PrivateLinkFactory(anonymous=True)
link.nodes.add(self.project)
link.save()
url = self.project.api_url_for('remove_private_link')
self.app.delete_json(
url,
{'private_link_id': link._id},
auth=self.auth,
).maybe_follow()
last_log = self.project.logs.latest()
assert last_log.action == NodeLog.VIEW_ONLY_LINK_REMOVED
assert last_log.params.get('anonymous_link')
def test_remove_component(self):
node = NodeFactory(parent=self.project, creator=self.user1)
url = node.api_url
res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow()
node.reload()
assert_equal(node.is_deleted, True)
assert_in('url', res.json)
assert_equal(res.json['url'], self.project.url)
def test_cant_remove_component_if_not_admin(self):
node = NodeFactory(parent=self.project, creator=self.user1)
non_admin = AuthUserFactory()
node.add_contributor(
non_admin,
permissions=permissions.WRITE,
save=True,
)
url = node.api_url
res = self.app.delete_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
).maybe_follow()
assert_equal(res.status_code, http_status.HTTP_403_FORBIDDEN)
assert_false(node.is_deleted)
def test_view_project_returns_whether_to_show_wiki_widget(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user, is_public=True)
project.add_contributor(user)
project.save()
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_equal(res.status_code, http_status.HTTP_200_OK)
assert_in('show_wiki_widget', res.json['user'])
def test_fork_grandcomponents_has_correct_root(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
auth = Auth(project.creator)
child = NodeFactory(parent=project, creator=user)
grand_child = NodeFactory(parent=child, creator=user)
project.save()
fork = project.fork_node(auth)
fork.save()
grand_child_fork = fork.nodes[0].nodes[0]
assert_equal(grand_child_fork.root, fork)
def test_fork_count_does_not_include_deleted_forks(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
auth = Auth(project.creator)
fork = project.fork_node(auth)
project.save()
fork.remove_node(auth)
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_in('fork_count', res.json['node'])
assert_equal(0, res.json['node']['fork_count'])
def test_fork_count_does_not_include_fork_registrations(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
auth = Auth(project.creator)
fork = project.fork_node(auth)
project.save()
registration = RegistrationFactory(project=fork)
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_in('fork_count', res.json['node'])
assert_equal(1, res.json['node']['fork_count'])
def test_registration_retraction_redirect(self):
url = self.project.web_url_for('node_registration_retraction_redirect')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302)
assert_in(self.project.web_url_for('node_registration_retraction_get', _guid=True), res.location)
def test_update_node(self):
url = self.project.api_url_for('update_node')
res = self.app.put_json(url, {'title': 'newtitle'}, auth=self.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(self.project.title, 'newtitle')
# Regression test
def test_update_node_with_tags(self):
self.project.add_tag('cheezebørger', auth=Auth(self.project.creator), save=True)
url = self.project.api_url_for('update_node')
res = self.app.put_json(url, {'title': 'newtitle'}, auth=self.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(self.project.title, 'newtitle')
# Regression test
def test_retraction_view(self):
project = ProjectFactory(creator=self.user1, is_public=True)
registration = RegistrationFactory(project=project, is_public=True)
reg_file = create_test_file(registration, user=registration.creator, create_guid=True)
registration.retract_registration(self.user1)
approval_token = registration.retraction.approval_state[self.user1._id]['approval_token']
registration.retraction.approve_retraction(self.user1, approval_token)
registration.save()
url = registration.web_url_for('view_project')
res = self.app.get(url, auth=self.auth)
assert_not_in('Mako Runtime Error', res.body.decode())
assert_in(registration.title, res.body.decode())
assert_equal(res.status_code, 200)
for route in ['files', 'wiki/home', 'contributors', 'settings', 'withdraw', 'register', 'register/fakeid']:
res = self.app.get('{}{}/'.format(url, route), auth=self.auth, allow_redirects=True)
assert_equal(res.status_code, 302, route)
res = res.follow()
assert_equal(res.status_code, 200, route)
assert_in('This project is a withdrawn registration of', res.body.decode(), route)
res = self.app.get('/{}/'.format(reg_file.guids.first()._id))
assert_equal(res.status_code, 200)
assert_in('This project is a withdrawn registration of', res.body.decode())
class TestEditableChildrenViews(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.child = ProjectFactory(parent=self.project, creator=self.user, is_public=True)
self.grandchild = ProjectFactory(parent=self.child, creator=self.user, is_public=False)
self.great_grandchild = ProjectFactory(parent=self.grandchild, creator=self.user, is_public=True)
self.great_great_grandchild = ProjectFactory(parent=self.great_grandchild, creator=self.user, is_public=False)
url = self.project.api_url_for('get_editable_children')
self.project_results = self.app.get(url, auth=self.user.auth).json
def test_get_editable_children(self):
assert_equal(len(self.project_results['children']), 4)
assert_equal(self.project_results['node']['id'], self.project._id)
def test_editable_children_order(self):
assert_equal(self.project_results['children'][0]['id'], self.child._id)
assert_equal(self.project_results['children'][1]['id'], self.grandchild._id)
assert_equal(self.project_results['children'][2]['id'], self.great_grandchild._id)
assert_equal(self.project_results['children'][3]['id'], self.great_great_grandchild._id)
def test_editable_children_indents(self):
assert_equal(self.project_results['children'][0]['indent'], 0)
assert_equal(self.project_results['children'][1]['indent'], 1)
assert_equal(self.project_results['children'][2]['indent'], 2)
assert_equal(self.project_results['children'][3]['indent'], 3)
def test_editable_children_parents(self):
assert_equal(self.project_results['children'][0]['parent_id'], self.project._id)
assert_equal(self.project_results['children'][1]['parent_id'], self.child._id)
assert_equal(self.project_results['children'][2]['parent_id'], self.grandchild._id)
assert_equal(self.project_results['children'][3]['parent_id'], self.great_grandchild._id)
def test_editable_children_privacy(self):
assert_false(self.project_results['node']['is_public'])
assert_true(self.project_results['children'][0]['is_public'])
assert_false(self.project_results['children'][1]['is_public'])
assert_true(self.project_results['children'][2]['is_public'])
assert_false(self.project_results['children'][3]['is_public'])
def test_editable_children_titles(self):
assert_equal(self.project_results['node']['title'], self.project.title)
assert_equal(self.project_results['children'][0]['title'], self.child.title)
assert_equal(self.project_results['children'][1]['title'], self.grandchild.title)
assert_equal(self.project_results['children'][2]['title'], self.great_grandchild.title)
assert_equal(self.project_results['children'][3]['title'], self.great_great_grandchild.title)
class TestGetNodeTree(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.user2 = AuthUserFactory()
def test_get_single_node(self):
project = ProjectFactory(creator=self.user)
# child = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user.auth)
node_id = res.json[0]['node']['id']
assert_equal(node_id, project._primary_key)
def test_get_node_with_children(self):
project = ProjectFactory(creator=self.user)
child1 = NodeFactory(parent=project, creator=self.user)
child2 = NodeFactory(parent=project, creator=self.user2)
child3 = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user.auth)
tree = res.json[0]
parent_node_id = tree['node']['id']
child_ids = [child['node']['id'] for child in tree['children']]
assert_equal(parent_node_id, project._primary_key)
assert_in(child1._primary_key, child_ids)
assert_in(child2._primary_key, child_ids)
assert_in(child3._primary_key, child_ids)
def test_get_node_with_child_linked_to_parent(self):
project = ProjectFactory(creator=self.user)
child1 = NodeFactory(parent=project, creator=self.user)
child1.save()
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user.auth)
tree = res.json[0]
parent_node_id = tree['node']['id']
child1_id = tree['children'][0]['node']['id']
assert_equal(child1_id, child1._primary_key)
def test_get_node_not_parent_owner(self):
project = ProjectFactory(creator=self.user2)
child = NodeFactory(parent=project, creator=self.user2)
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json, [])
# Parent node should show because of user2 read access, and only child3
def test_get_node_parent_not_admin(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user2, auth=Auth(self.user))
project.save()
child1 = NodeFactory(parent=project, creator=self.user)
child2 = NodeFactory(parent=project, creator=self.user)
child3 = NodeFactory(parent=project, creator=self.user)
child3.add_contributor(self.user2, auth=Auth(self.user))
url = project.api_url_for('get_node_tree')
res = self.app.get(url, auth=self.user2.auth)
tree = res.json[0]
parent_node_id = tree['node']['id']
children = tree['children']
assert_equal(parent_node_id, project._primary_key)
assert_equal(len(children), 1)
assert_equal(children[0]['node']['id'], child3._primary_key)
@pytest.mark.enable_enqueue_task
@pytest.mark.enable_implicit_clean
@pytest.mark.enable_quickfiles_creation
class TestUserProfile(OsfTestCase):
def setUp(self):
super(TestUserProfile, self).setUp()
self.user = AuthUserFactory()
def test_unserialize_social(self):
url = api_url_for('unserialize_social')
payload = {
'profileWebsites': ['http://frozen.pizza.com/reviews'],
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
self.app.put_json(
url,
payload,
auth=self.user.auth,
)
self.user.reload()
for key, value in payload.items():
assert_equal(self.user.social[key], value)
assert_true(self.user.social['researcherId'] is None)
# Regression test for help-desk ticket
def test_making_email_primary_is_not_case_sensitive(self):
user = AuthUserFactory(username='fred@queen.test')
# make confirmed email have different casing
email = user.emails.first()
email.address = email.address.capitalize()
email.save()
url = api_url_for('update_user')
res = self.app.put_json(
url,
{'id': user._id, 'emails': [{'address': 'fred@queen.test', 'primary': True, 'confirmed': True}]},
auth=user.auth
)
assert_equal(res.status_code, 200)
def test_unserialize_social_validation_failure(self):
url = api_url_for('unserialize_social')
# profileWebsites URL is invalid
payload = {
'profileWebsites': ['http://goodurl.com', 'http://invalidurl'],
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
res = self.app.put_json(
url,
payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Invalid personal URL.')
def test_serialize_social_editable(self):
self.user.social['twitter'] = 'howtopizza'
self.user.social['profileWebsites'] = ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com']
self.user.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_equal(res.json.get('profileWebsites'), ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com'])
assert_true(res.json.get('github') is None)
assert_true(res.json['editable'])
def test_serialize_social_not_editable(self):
user2 = AuthUserFactory()
self.user.social['twitter'] = 'howtopizza'
self.user.social['profileWebsites'] = ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com']
self.user.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_equal(res.json.get('profileWebsites'), ['http://www.cos.io', 'http://www.osf.io', 'http://www.wordup.com'])
assert_true(res.json.get('github') is None)
assert_false(res.json['editable'])
def test_serialize_social_addons_editable(self):
self.user.add_addon('github')
github_account = GitHubAccountFactory()
github_account.save()
self.user.external_accounts.add(github_account)
self.user.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(
res.json['addons']['github'],
'abc'
)
def test_serialize_social_addons_not_editable(self):
user2 = AuthUserFactory()
self.user.add_addon('github')
github_account = GitHubAccountFactory()
github_account.save()
self.user.external_accounts.add(github_account)
self.user.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_not_in('addons', res.json)
def test_unserialize_and_serialize_jobs(self):
jobs = [{
'institution': 'an institution',
'department': 'a department',
'title': 'a title',
'startMonth': 'January',
'startYear': '2001',
'endMonth': 'March',
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'title': None,
'startMonth': 'May',
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.jobs), 2)
url = api_url_for('serialize_jobs')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(jobs):
assert_equal(job, res.json['contents'][i])
def test_unserialize_and_serialize_schools(self):
schools = [{
'institution': 'an institution',
'department': 'a department',
'degree': 'a degree',
'startMonth': 1,
'startYear': '2001',
'endMonth': 5,
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'degree': None,
'startMonth': 5,
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.schools), 2)
url = api_url_for('serialize_schools')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(schools):
assert_equal(job, res.json['contents'][i])
@mock.patch('osf.models.user.OSFUser.check_spam')
def test_unserialize_jobs(self, mock_check_spam):
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# jobs field is updated
assert_equal(self.user.jobs, jobs)
assert mock_check_spam.called
def test_unserialize_names(self):
fake_fullname_w_spaces = ' {} '.format(fake.name())
names = {
'full': fake_fullname_w_spaces,
'given': 'Tea',
'middle': 'Gray',
'family': 'Pot',
'suffix': 'Ms.',
}
url = api_url_for('unserialize_names')
res = self.app.put_json(url, names, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# user is updated
assert_equal(self.user.fullname, fake_fullname_w_spaces.strip())
assert_equal(self.user.given_name, names['given'])
assert_equal(self.user.middle_names, names['middle'])
assert_equal(self.user.family_name, names['family'])
assert_equal(self.user.suffix, names['suffix'])
@mock.patch('osf.models.user.OSFUser.check_spam')
def test_unserialize_schools(self, mock_check_spam):
schools = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'degree': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# schools field is updated
assert_equal(self.user.schools, schools)
assert mock_check_spam.called
@mock.patch('osf.models.user.OSFUser.check_spam')
def test_unserialize_jobs_valid(self, mock_check_spam):
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert mock_check_spam.called
def test_update_user_timezone(self):
assert_equal(self.user.timezone, 'Etc/UTC')
payload = {'timezone': 'America/New_York', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.timezone, 'America/New_York')
def test_update_user_locale(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': 'de_DE', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'de_DE')
def test_update_user_locale_none(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': None, 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_update_user_locale_empty_string(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': '', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_cannot_update_user_without_user_id(self):
user1 = AuthUserFactory()
url = api_url_for('update_user')
header = {'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], '"id" is required')
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_emails_return_emails(self, send_mail):
user1 = AuthUserFactory()
url = api_url_for('update_user')
email = 'test@cos.io'
header = {'id': user1._id,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user1.auth)
assert_equal(res.status_code, 200)
assert_in('emails', res.json['profile'])
assert_equal(len(res.json['profile']['emails']), 2)
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation_return_emails(self, send_mail):
user1 = AuthUserFactory()
url = api_url_for('resend_confirmation')
email = 'test@cos.io'
header = {'id': user1._id,
'email': {'address': email, 'primary': False, 'confirmed': False}
}
res = self.app.put_json(url, header, auth=user1.auth)
assert_equal(res.status_code, 200)
assert_in('emails', res.json['profile'])
assert_equal(len(res.json['profile']['emails']), 2)
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_update_user_mailing_lists(self, mock_get_mailchimp_api, send_mail):
email = fake_email()
self.user.emails.create(address=email)
list_name = 'foo'
self.user.mailchimp_mailing_lists[list_name] = True
self.user.save()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
url = api_url_for('update_user', uid=self.user._id)
emails = [
{'address': self.user.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}]
payload = {'locale': '', 'id': self.user._id, 'emails': emails}
self.app.put_json(url, payload, auth=self.user.auth)
# the test app doesn't have celery handlers attached, so we need to call this manually.
handlers.celery_teardown_request()
assert mock_client.lists.unsubscribe.called
mock_client.lists.unsubscribe.assert_called_with(
id=list_id,
email={'email': self.user.username},
send_goodbye=True
)
mock_client.lists.subscribe.assert_called_with(
id=list_id,
email={'email': email},
merge_vars={
'fname': self.user.given_name,
'lname': self.user.family_name,
},
double_optin=False,
update_existing=True
)
handlers.celery_teardown_request()
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_unsubscribe_mailchimp_not_called_if_user_not_subscribed(self, mock_get_mailchimp_api, send_mail):
email = fake_email()
self.user.emails.create(address=email)
list_name = 'foo'
self.user.mailchimp_mailing_lists[list_name] = False
self.user.save()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
url = api_url_for('update_user', uid=self.user._id)
emails = [
{'address': self.user.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}]
payload = {'locale': '', 'id': self.user._id, 'emails': emails}
self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(mock_client.lists.unsubscribe.call_count, 0)
assert_equal(mock_client.lists.subscribe.call_count, 0)
handlers.celery_teardown_request()
def test_user_with_quickfiles(self):
quickfiles_node = QuickFilesNode.objects.get_for_user(self.user)
create_test_file(quickfiles_node, self.user, filename='skrr_skrrrrrrr.pdf')
url = web_url_for('profile_view_id', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
assert_in('Quick files', res.body.decode())
def test_user_with_no_quickfiles(self):
assert(not QuickFilesNode.objects.first().files.filter(type='osf.osfstoragefile').exists())
url = web_url_for('profile_view_id', uid=self.user._primary_key)
res = self.app.get(url, auth=self.user.auth)
assert_not_in('Quick files', res.body.decode())
def test_user_update_region(self):
user_settings = self.user.get_addon('osfstorage')
assert user_settings.default_region_id == 1
url = '/api/v1/profile/region/'
auth = self.user.auth
region = RegionFactory(name='Frankfort', _id='eu-central-1')
payload = {'region_id': 'eu-central-1'}
res = self.app.put_json(url, payload, auth=auth)
user_settings.reload()
assert user_settings.default_region_id == region.id
def test_user_update_region_missing_region_id_key(self):
url = '/api/v1/profile/region/'
auth = self.user.auth
region = RegionFactory(name='Frankfort', _id='eu-central-1')
payload = {'bad_key': 'eu-central-1'}
res = self.app.put_json(url, payload, auth=auth, expect_errors=True)
assert res.status_code == 400
def test_user_update_region_missing_bad_region(self):
url = '/api/v1/profile/region/'
auth = self.user.auth
payload = {'region_id': 'bad-region-1'}
res = self.app.put_json(url, payload, auth=auth, expect_errors=True)
assert res.status_code == 404
class TestUserProfileApplicationsPage(OsfTestCase):
def setUp(self):
super(TestUserProfileApplicationsPage, self).setUp()
self.user = AuthUserFactory()
self.user2 = AuthUserFactory()
self.platform_app = ApiOAuth2ApplicationFactory(owner=self.user)
self.detail_url = web_url_for('oauth_application_detail', client_id=self.platform_app.client_id)
def test_non_owner_cant_access_detail_page(self):
res = self.app.get(self.detail_url, auth=self.user2.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_403_FORBIDDEN)
def test_owner_cant_access_deleted_application(self):
self.platform_app.is_active = False
self.platform_app.save()
res = self.app.get(self.detail_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_410_GONE)
def test_owner_cant_access_nonexistent_application(self):
url = web_url_for('oauth_application_detail', client_id='nonexistent')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_404_NOT_FOUND)
def test_url_has_not_broken(self):
assert_equal(self.platform_app.url, self.detail_url)
class TestUserProfileTokensPage(OsfTestCase):
def setUp(self):
super(TestUserProfileTokensPage, self).setUp()
self.user = AuthUserFactory()
self.token = ApiOAuth2PersonalTokenFactory()
self.detail_url = web_url_for('personal_access_token_detail', _id=self.token._id)
def test_url_has_not_broken(self):
assert_equal(self.token.url, self.detail_url)
class TestUserAccount(OsfTestCase):
def setUp(self):
super(TestUserAccount, self).setUp()
self.user = AuthUserFactory()
self.user.set_password('password')
self.user.auth = (self.user.username, 'password')
self.user.save()
def test_password_change_valid(self,
old_password='password',
new_password='Pa$$w0rd',
confirm_password='Pa$$w0rd'):
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=(self.user.username, old_password))
assert_true(302, res.status_code)
res = res.follow(auth=(self.user.username, new_password))
assert_true(200, res.status_code)
self.user.reload()
assert_true(self.user.check_password(new_password))
@mock.patch('website.profile.views.push_status_message')
def test_user_account_password_reset_query_params(self, mock_push_status_message):
url = web_url_for('user_account') + '?password_reset=True'
res = self.app.get(url, auth=(self.user.auth))
assert_true(mock_push_status_message.called)
assert_in('Password updated successfully', mock_push_status_message.mock_calls[0][1][0])
@mock.patch('website.profile.views.push_status_message')
def test_password_change_invalid(self, mock_push_status_message, old_password='', new_password='',
confirm_password='', error_message='Old password is invalid'):
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=self.user.auth)
assert_true(302, res.status_code)
res = res.follow(auth=self.user.auth)
assert_true(200, res.status_code)
self.user.reload()
assert_false(self.user.check_password(new_password))
assert_true(mock_push_status_message.called)
error_strings = [e[1][0] for e in mock_push_status_message.mock_calls]
assert_in(error_message, error_strings)
@mock.patch('website.profile.views.push_status_message')
def test_password_change_rate_limiting(self, mock_push_status_message):
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
url = web_url_for('user_account_password')
post_data = {
'old_password': 'invalid old password',
'new_password': 'this is a new password',
'confirm_password': 'this is a new password',
}
res = self.app.post(url, post_data, auth=self.user.auth)
self.user.reload()
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 1
assert_true(200, res.status_code)
# Make a second request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(len( mock_push_status_message.mock_calls) == 2)
assert_true('Old password is invalid' == mock_push_status_message.mock_calls[1][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 2
# Make a third request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(len( mock_push_status_message.mock_calls) == 3)
assert_true('Old password is invalid' == mock_push_status_message.mock_calls[2][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 3
# Make a fourth request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(mock_push_status_message.called)
error_strings = mock_push_status_message.mock_calls[3][2]
assert_in('Too many failed attempts', error_strings['message'])
self.user.reload()
# Too many failed requests within a short window. Throttled.
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 3
@mock.patch('website.profile.views.push_status_message')
def test_password_change_rate_limiting_not_imposed_if_old_password_correct(self, mock_push_status_message):
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
url = web_url_for('user_account_password')
post_data = {
'old_password': 'password',
'new_password': 'short',
'confirm_password': 'short',
}
res = self.app.post(url, post_data, auth=self.user.auth)
self.user.reload()
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
assert_true(200, res.status_code)
# Make a second request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(len(mock_push_status_message.mock_calls) == 2)
assert_true('Password should be at least eight characters' == mock_push_status_message.mock_calls[1][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
# Make a third request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(len(mock_push_status_message.mock_calls) == 3)
assert_true('Password should be at least eight characters' == mock_push_status_message.mock_calls[2][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
# Make a fourth request
res = self.app.post(url, post_data, auth=self.user.auth, expect_errors=True)
assert_true(mock_push_status_message.called)
assert_true(len(mock_push_status_message.mock_calls) == 4)
assert_true('Password should be at least eight characters' == mock_push_status_message.mock_calls[3][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
@mock.patch('website.profile.views.push_status_message')
def test_old_password_invalid_attempts_reset_if_password_successfully_reset(self, mock_push_status_message):
assert self.user.change_password_last_attempt is None
assert self.user.old_password_invalid_attempts == 0
url = web_url_for('user_account_password')
post_data = {
'old_password': 'invalid old password',
'new_password': 'this is a new password',
'confirm_password': 'this is a new password',
}
correct_post_data = {
'old_password': 'password',
'new_password': 'thisisanewpassword',
'confirm_password': 'thisisanewpassword',
}
res = self.app.post(url, post_data, auth=self.user.auth)
assert_true(len( mock_push_status_message.mock_calls) == 1)
assert_true('Old password is invalid' == mock_push_status_message.mock_calls[0][1][0])
self.user.reload()
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 1
assert_true(200, res.status_code)
# Make a second request that successfully changes password
res = self.app.post(url, correct_post_data, auth=self.user.auth, expect_errors=True)
self.user.reload()
assert self.user.change_password_last_attempt is not None
assert self.user.old_password_invalid_attempts == 0
def test_password_change_invalid_old_password(self):
self.test_password_change_invalid(
old_password='invalid old password',
new_password='new password',
confirm_password='new password',
error_message='Old password is invalid',
)
def test_password_change_invalid_confirm_password(self):
self.test_password_change_invalid(
old_password='password',
new_password='new password',
confirm_password='invalid confirm password',
error_message='Password does not match the confirmation',
)
def test_password_change_invalid_new_password_length(self):
self.test_password_change_invalid(
old_password='password',
new_password='1234567',
confirm_password='1234567',
error_message='Password should be at least eight characters',
)
def test_password_change_valid_new_password_length(self):
self.test_password_change_valid(
old_password='password',
new_password='12345678',
confirm_password='12345678',
)
def test_password_change_invalid_blank_password(self, old_password='', new_password='', confirm_password=''):
self.test_password_change_invalid(
old_password=old_password,
new_password=new_password,
confirm_password=confirm_password,
error_message='Passwords cannot be blank',
)
def test_password_change_invalid_empty_string_new_password(self):
self.test_password_change_invalid_blank_password('password', '', 'new password')
def test_password_change_invalid_blank_new_password(self):
self.test_password_change_invalid_blank_password('password', ' ', 'new password')
def test_password_change_invalid_empty_string_confirm_password(self):
self.test_password_change_invalid_blank_password('password', 'new password', '')
def test_password_change_invalid_blank_confirm_password(self):
self.test_password_change_invalid_blank_password('password', 'new password', ' ')
@mock.patch('framework.auth.views.mails.send_mail')
def test_user_cannot_request_account_export_before_throttle_expires(self, send_mail):
url = api_url_for('request_export')
self.app.post(url, auth=self.user.auth)
assert_true(send_mail.called)
res = self.app.post(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(send_mail.call_count, 1)
def test_get_unconfirmed_emails_exclude_external_identity(self):
external_identity = {
'service': {
'AFI': 'LINK'
}
}
self.user.add_unconfirmed_email('james@steward.com')
self.user.add_unconfirmed_email('steward@james.com', external_identity=external_identity)
self.user.save()
unconfirmed_emails = self.user.get_unconfirmed_emails_exclude_external_identity()
assert_in('james@steward.com', unconfirmed_emails)
assert_not_in('steward@james.com', unconfirmed_emails)
@pytest.mark.enable_implicit_clean
class TestAddingContributorViews(OsfTestCase):
def setUp(self):
super(TestAddingContributorViews, self).setUp()
self.creator = AuthUserFactory()
self.project = ProjectFactory(creator=self.creator)
self.auth = Auth(self.project.creator)
# Authenticate all requests
self.app.authenticate(*self.creator.auth)
contributor_added.connect(notify_added_contributor)
def test_serialize_unregistered_without_record(self):
name, email = fake.name(), fake_email()
res = serialize_unregistered(fullname=name, email=email)
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
assert_equal(res['id'], None)
assert_false(res['registered'])
assert_true(res['profile_image_url'])
assert_false(res['active'])
def test_deserialize_contributors(self):
contrib = UserFactory()
unreg = UnregUserFactory()
name, email = fake.name(), fake_email()
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [
add_contributor_json(contrib),
serialize_unregistered(fake.name(), unreg.username),
unreg_no_record
]
contrib_data[0]['permission'] = permissions.ADMIN
contrib_data[1]['permission'] = permissions.WRITE
contrib_data[2]['permission'] = permissions.READ
contrib_data[0]['visible'] = True
contrib_data[1]['visible'] = True
contrib_data[2]['visible'] = True
res = deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator))
assert_equal(len(res), len(contrib_data))
assert_true(res[0]['user'].is_registered)
assert_false(res[1]['user'].is_registered)
assert_true(res[1]['user']._id)
assert_false(res[2]['user'].is_registered)
assert_true(res[2]['user']._id)
def test_deserialize_contributors_validates_fullname(self):
name = '<img src=1 onerror=console.log(1)>'
email = fake_email()
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [unreg_no_record]
contrib_data[0]['permission'] = permissions.ADMIN
contrib_data[0]['visible'] = True
with assert_raises(ValidationError):
deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator),
validate=True)
def test_deserialize_contributors_validates_email(self):
name = fake.name()
email = '!@#$%%^&*'
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [unreg_no_record]
contrib_data[0]['permission'] = permissions.ADMIN
contrib_data[0]['visible'] = True
with assert_raises(ValidationError):
deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator),
validate=True)
def test_serialize_unregistered_with_record(self):
name, email = fake.name(), fake_email()
user = self.project.add_unregistered_contributor(fullname=name,
email=email, auth=Auth(self.project.creator))
self.project.save()
res = serialize_unregistered(
fullname=name,
email=email
)
assert_false(res['active'])
assert_false(res['registered'])
assert_equal(res['id'], user._primary_key)
assert_true(res['profile_image_url'])
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
def test_add_contributor_with_unreg_contribs_and_reg_contribs(self):
n_contributors_pre = len(self.project.contributors)
reg_user = UserFactory()
name, email = fake.name(), fake_email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': permissions.ADMIN,
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = permissions.ADMIN
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(len(self.project.contributors),
n_contributors_pre + len(payload['users']))
new_unreg = auth.get_user(email=email)
assert_false(new_unreg.is_registered)
# unclaimed record was added
new_unreg.reload()
assert_in(self.project._primary_key, new_unreg.unclaimed_records)
rec = new_unreg.get_unclaimed_record(self.project._primary_key)
assert_equal(rec['name'], name)
assert_equal(rec['email'], email)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_add_contributors_post_only_sends_one_email_to_unreg_user(
self, mock_send_claim_email):
# Project has components
comp1, comp2 = NodeFactory(
creator=self.creator), NodeFactory(creator=self.creator)
NodeRelation.objects.create(parent=self.project, child=comp1)
NodeRelation.objects.create(parent=self.project, child=comp2)
self.project.save()
# An unreg user is added to the project AND its components
unreg_user = { # dict because user has not previous unreg record
'id': None,
'registered': False,
'fullname': fake.name(),
'email': fake_email(),
'permission': permissions.ADMIN,
'visible': True,
}
payload = {
'users': [unreg_user],
'node_ids': [comp1._primary_key, comp2._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert_true(self.project.can_edit(user=self.creator))
self.app.post_json(url, payload, auth=self.creator.auth)
# finalize_invitation should only have been called once
assert_equal(mock_send_claim_email.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributors_post_only_sends_one_email_to_registered_user(self, mock_send_mail):
# Project has components
comp1 = NodeFactory(creator=self.creator, parent=self.project)
comp2 = NodeFactory(creator=self.creator, parent=self.project)
# A registered user is added to the project AND its components
user = UserFactory()
user_dict = {
'id': user._id,
'fullname': user.fullname,
'email': user.username,
'permission': permissions.WRITE,
'visible': True}
payload = {
'users': [user_dict],
'node_ids': [comp1._primary_key, comp2._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert self.project.can_edit(user=self.creator)
self.app.post_json(url, payload, auth=self.creator.auth)
# send_mail should only have been called once
assert_equal(mock_send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributors_post_sends_email_if_user_not_contributor_on_parent_node(self, mock_send_mail):
# Project has a component with a sub-component
component = NodeFactory(creator=self.creator, parent=self.project)
sub_component = NodeFactory(creator=self.creator, parent=component)
# A registered user is added to the project and the sub-component, but NOT the component
user = UserFactory()
user_dict = {
'id': user._id,
'fullname': user.fullname,
'email': user.username,
'permission': permissions.WRITE,
'visible': True}
payload = {
'users': [user_dict],
'node_ids': [sub_component._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert self.project.can_edit(user=self.creator)
self.app.post_json(url, payload, auth=self.creator.auth)
# send_mail is called for both the project and the sub-component
assert_equal(mock_send_mail.call_count, 2)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_email_sent_when_unreg_user_is_added(self, send_mail):
name, email = fake.name(), fake_email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': permissions.ADMIN,
'visible': True,
}
payload = {
'users': [pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
assert_true(send_mail.called)
assert_true(send_mail.called_with(email=email))
@mock.patch('website.mails.send_mail')
def test_email_sent_when_reg_user_is_added(self, send_mail):
contributor = UserFactory()
contributors = [{
'user': contributor,
'visible': True,
'permissions': permissions.WRITE
}]
project = ProjectFactory(creator=self.auth.user)
project.add_contributors(contributors, auth=self.auth)
project.save()
assert_true(send_mail.called)
send_mail.assert_called_with(
to_addr=contributor.username,
mail=mails.CONTRIBUTOR_ADDED_DEFAULT,
user=contributor,
node=project,
referrer_name=self.auth.user.fullname,
all_global_subscriptions_none=False,
branded_service=None,
can_change_preferences=False,
logo=settings.OSF_LOGO,
osf_contact_email=settings.OSF_CONTACT_EMAIL,
is_initiator=False,
published_preprints=[]
)
assert_almost_equal(contributor.contributor_added_email_records[project._id]['last_sent'], int(time.time()), delta=1)
@mock.patch('website.mails.send_mail')
def test_contributor_added_email_sent_to_unreg_user(self, send_mail):
unreg_user = UnregUserFactory()
project = ProjectFactory()
project.add_unregistered_contributor(fullname=unreg_user.fullname, email=unreg_user.email, auth=Auth(project.creator))
project.save()
assert_true(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_forking_project_does_not_send_contributor_added_email(self, send_mail):
project = ProjectFactory()
project.fork_node(auth=Auth(project.creator))
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_templating_project_does_not_send_contributor_added_email(self, send_mail):
project = ProjectFactory()
project.use_as_template(auth=Auth(project.creator))
assert_false(send_mail.called)
@mock.patch('website.archiver.tasks.archive')
@mock.patch('website.mails.send_mail')
def test_registering_project_does_not_send_contributor_added_email(self, send_mail, mock_archive):
project = ProjectFactory()
provider = RegistrationProviderFactory()
project.register_node(
get_default_metaschema(),
Auth(user=project.creator),
DraftRegistrationFactory(branched_from=project),
None,
provider=provider
)
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_notify_contributor_email_does_not_send_before_throttle_expires(self, send_mail):
contributor = UserFactory()
project = ProjectFactory()
auth = Auth(project.creator)
notify_added_contributor(project, contributor, auth)
assert_true(send_mail.called)
# 2nd call does not send email because throttle period has not expired
notify_added_contributor(project, contributor, auth)
assert_equal(send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_notify_contributor_email_sends_after_throttle_expires(self, send_mail):
throttle = 0.5
contributor = UserFactory()
project = ProjectFactory()
auth = Auth(project.creator)
notify_added_contributor(project, contributor, auth, throttle=throttle)
assert_true(send_mail.called)
time.sleep(1) # throttle period expires
notify_added_contributor(project, contributor, auth, throttle=throttle)
assert_equal(send_mail.call_count, 2)
@mock.patch('website.mails.send_mail')
def test_add_contributor_to_fork_sends_email(self, send_mail):
contributor = UserFactory()
fork = self.project.fork_node(auth=Auth(self.creator))
fork.add_contributor(contributor, auth=Auth(self.creator))
fork.save()
assert_true(send_mail.called)
assert_equal(send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributor_to_template_sends_email(self, send_mail):
contributor = UserFactory()
template = self.project.use_as_template(auth=Auth(self.creator))
template.add_contributor(contributor, auth=Auth(self.creator))
template.save()
assert_true(send_mail.called)
assert_equal(send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_creating_fork_does_not_email_creator(self, send_mail):
contributor = UserFactory()
fork = self.project.fork_node(auth=Auth(self.creator))
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_creating_template_does_not_email_creator(self, send_mail):
contributor = UserFactory()
template = self.project.use_as_template(auth=Auth(self.creator))
assert_false(send_mail.called)
def test_add_multiple_contributors_only_adds_one_log(self):
n_logs_pre = self.project.logs.count()
reg_user = UserFactory()
name = fake.name()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': fake_email(),
'permission': permissions.WRITE,
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = permissions.ADMIN
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(self.project.logs.count(), n_logs_pre + 1)
def test_add_contribs_to_multiple_nodes(self):
child = NodeFactory(parent=self.project, creator=self.creator)
n_contributors_pre = child.contributors.count()
reg_user = UserFactory()
name, email = fake.name(), fake_email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': permissions.ADMIN,
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = permissions.ADMIN
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': [self.project._primary_key, child._primary_key]
}
url = '/api/v1/project/{0}/contributors/'.format(self.project._id)
self.app.post_json(url, payload).maybe_follow()
child.reload()
assert_equal(child.contributors.count(),
n_contributors_pre + len(payload['users']))
def tearDown(self):
super(TestAddingContributorViews, self).tearDown()
contributor_added.disconnect(notify_added_contributor)
class TestUserInviteViews(OsfTestCase):
def setUp(self):
super(TestUserInviteViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.invite_url = '/api/v1/project/{0}/invite_contributor/'.format(
self.project._primary_key)
def test_invite_contributor_post_if_not_in_db(self):
name, email = fake.name(), fake_email()
res = self.app.post_json(
self.invite_url,
{'fullname': name, 'email': email},
auth=self.user.auth,
)
contrib = res.json['contributor']
assert_true(contrib['id'] is None)
assert_equal(contrib['fullname'], name)
assert_equal(contrib['email'], email)
def test_invite_contributor_post_if_unreg_already_in_db(self):
# A n unreg user is added to a different project
name, email = fake.name(), fake_email()
project2 = ProjectFactory()
unreg_user = project2.add_unregistered_contributor(fullname=name, email=email,
auth=Auth(project2.creator))
project2.save()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': email}, auth=self.user.auth)
expected = add_contributor_json(unreg_user)
expected['fullname'] = name
expected['email'] = email
assert_equal(res.json['contributor'], expected)
def test_invite_contributor_post_if_email_already_registered(self):
reg_user = UserFactory()
name, email = fake.name(), reg_user.username
# Tries to invite user that is already registered - this is now permitted.
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': email},
auth=self.user.auth)
contrib = res.json['contributor']
assert_equal(contrib['id'], reg_user._id)
assert_equal(contrib['fullname'], name)
assert_equal(contrib['email'], email)
def test_invite_contributor_post_if_user_is_already_contributor(self):
unreg_user = self.project.add_unregistered_contributor(
fullname=fake.name(), email=fake_email(),
auth=Auth(self.project.creator)
)
self.project.save()
# Tries to invite unreg user that is already a contributor
res = self.app.post_json(self.invite_url,
{'fullname': fake.name(), 'email': unreg_user.username},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
def test_invite_contributor_with_no_email(self):
name = fake.name()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': None}, auth=self.user.auth)
assert_equal(res.status_code, http_status.HTTP_200_OK)
data = res.json
assert_equal(data['status'], 'success')
assert_equal(data['contributor']['fullname'], name)
assert_true(data['contributor']['email'] is None)
assert_false(data['contributor']['registered'])
def test_invite_contributor_requires_fullname(self):
res = self.app.post_json(self.invite_url,
{'email': 'brian@queen.com', 'fullname': ''}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_given_email(self, send_mail):
project = ProjectFactory()
given_email = fake_email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=given_email, unclaimed_user=unreg_user, node=project)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=given_email,
mail=mails.INVITE_DEFAULT,
can_change_preferences=False,
))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_referrer(self, send_mail):
project = ProjectFactory()
referrer = project.creator
given_email, real_email = fake_email(), fake_email()
unreg_user = project.add_unregistered_contributor(fullname=fake.name(),
email=given_email, auth=Auth(
referrer)
)
project.save()
send_claim_email(email=real_email, unclaimed_user=unreg_user, node=project)
assert_true(send_mail.called)
# email was sent to referrer
send_mail.assert_called_with(
referrer.username,
mails.FORWARD_INVITE,
user=unreg_user,
referrer=referrer,
claim_url=unreg_user.get_claim_url(project._id, external=True),
email=real_email.lower().strip(),
fullname=unreg_user.get_unclaimed_record(project._id)['name'],
node=project,
branded_service=None,
can_change_preferences=False,
logo=settings.OSF_LOGO,
osf_contact_email=settings.OSF_CONTACT_EMAIL
)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_before_throttle_expires(self, send_mail):
project = ProjectFactory()
given_email = fake_email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=fake_email(), unclaimed_user=unreg_user, node=project)
send_mail.reset_mock()
# 2nd call raises error because throttle hasn't expired
with assert_raises(HTTPError):
send_claim_email(email=fake_email(), unclaimed_user=unreg_user, node=project)
assert_false(send_mail.called)
@pytest.mark.enable_implicit_clean
@pytest.mark.enable_quickfiles_creation
class TestClaimViews(OsfTestCase):
def setUp(self):
super(TestClaimViews, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
self.project_with_source_tag = ProjectFactory(creator=self.referrer, is_public=True)
self.preprint_with_source_tag = PreprintFactory(creator=self.referrer, is_public=True)
osf_source_tag, created = Tag.all_tags.get_or_create(name=OsfSourceTags.Osf.value, system=True)
preprint_source_tag, created = Tag.all_tags.get_or_create(name=provider_source_tag(self.preprint_with_source_tag.provider._id, 'preprint'), system=True)
self.project_with_source_tag.add_system_tag(osf_source_tag.name)
self.preprint_with_source_tag.add_system_tag(preprint_source_tag.name)
self.given_name = fake.name()
self.given_email = fake_email()
self.project_with_source_tag.add_unregistered_contributor(
fullname=self.given_name,
email=self.given_email,
auth=Auth(user=self.referrer)
)
self.preprint_with_source_tag.add_unregistered_contributor(
fullname=self.given_name,
email=self.given_email,
auth=Auth(user=self.referrer)
)
self.user = self.project.add_unregistered_contributor(
fullname=self.given_name,
email=self.given_email,
auth=Auth(user=self.referrer)
)
self.project.save()
@mock.patch('website.project.views.contributor.send_claim_email')
def test_claim_user_already_registered_redirects_to_claim_user_registered(self, claim_email):
name = fake.name()
email = fake_email()
# project contributor adds an unregistered contributor (without an email) on public project
unregistered_user = self.project.add_unregistered_contributor(
fullname=name,
email=None,
auth=Auth(user=self.referrer)
)
assert_in(unregistered_user, self.project.contributors)
# unregistered user comes along and claims themselves on the public project, entering an email
invite_url = self.project.api_url_for('claim_user_post', uid='undefined')
self.app.post_json(invite_url, {
'pk': unregistered_user._primary_key,
'value': email
})
assert_equal(claim_email.call_count, 1)
# set unregistered record email since we are mocking send_claim_email()
unclaimed_record = unregistered_user.get_unclaimed_record(self.project._primary_key)
unclaimed_record.update({'email': email})
unregistered_user.save()
# unregistered user then goes and makes an account with same email, before claiming themselves as contributor
UserFactory(username=email, fullname=name)
# claim link for the now registered email is accessed while not logged in
token = unregistered_user.get_unclaimed_record(self.project._primary_key)['token']
claim_url = '/user/{uid}/{pid}/claim/?token={token}'.format(
uid=unregistered_user._id,
pid=self.project._id,
token=token
)
res = self.app.get(claim_url)
# should redirect to 'claim_user_registered' view
claim_registered_url = '/user/{uid}/{pid}/claim/verify/{token}/'.format(
uid=unregistered_user._id,
pid=self.project._id,
token=token
)
assert_equal(res.status_code, 302)
assert_in(claim_registered_url, res.headers.get('Location'))
@mock.patch('website.project.views.contributor.send_claim_email')
def test_claim_user_already_registered_secondary_email_redirects_to_claim_user_registered(self, claim_email):
name = fake.name()
email = fake_email()
secondary_email = fake_email()
# project contributor adds an unregistered contributor (without an email) on public project
unregistered_user = self.project.add_unregistered_contributor(
fullname=name,
email=None,
auth=Auth(user=self.referrer)
)
assert_in(unregistered_user, self.project.contributors)
# unregistered user comes along and claims themselves on the public project, entering an email
invite_url = self.project.api_url_for('claim_user_post', uid='undefined')
self.app.post_json(invite_url, {
'pk': unregistered_user._primary_key,
'value': secondary_email
})
assert_equal(claim_email.call_count, 1)
# set unregistered record email since we are mocking send_claim_email()
unclaimed_record = unregistered_user.get_unclaimed_record(self.project._primary_key)
unclaimed_record.update({'email': secondary_email})
unregistered_user.save()
# unregistered user then goes and makes an account with same email, before claiming themselves as contributor
registered_user = UserFactory(username=email, fullname=name)
registered_user.emails.create(address=secondary_email)
registered_user.save()
# claim link for the now registered email is accessed while not logged in
token = unregistered_user.get_unclaimed_record(self.project._primary_key)['token']
claim_url = '/user/{uid}/{pid}/claim/?token={token}'.format(
uid=unregistered_user._id,
pid=self.project._id,
token=token
)
res = self.app.get(claim_url)
# should redirect to 'claim_user_registered' view
claim_registered_url = '/user/{uid}/{pid}/claim/verify/{token}/'.format(
uid=unregistered_user._id,
pid=self.project._id,
token=token
)
assert_equal(res.status_code, 302)
assert_in(claim_registered_url, res.headers.get('Location'))
def test_claim_user_invited_with_no_email_posts_to_claim_form(self):
given_name = fake.name()
invited_user = self.project.add_unregistered_contributor(
fullname=given_name,
email=None,
auth=Auth(user=self.referrer)
)
self.project.save()
url = invited_user.get_claim_url(self.project._primary_key)
res = self.app.post(url, {
'password': 'bohemianrhap',
'password2': 'bohemianrhap'
}, expect_errors=True)
assert_equal(res.status_code, 400)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_with_registered_user_id(self, send_mail):
# registered user who is attempting to claim the unclaimed contributor
reg_user = UserFactory()
payload = {
# pk of unreg user record
'pk': self.user._primary_key,
'claimerId': reg_user._primary_key
}
url = '/api/v1/user/{uid}/{pid}/claim/email/'.format(
uid=self.user._primary_key,
pid=self.project._primary_key,
)
res = self.app.post_json(url, payload)
# mail was sent
assert_equal(send_mail.call_count, 2)
# ... to the correct address
referrer_call = send_mail.call_args_list[0]
claimer_call = send_mail.call_args_list[1]
args, _ = referrer_call
assert_equal(args[0], self.referrer.username)
args, _ = claimer_call
assert_equal(args[0], reg_user.username)
# view returns the correct JSON
assert_equal(res.json, {
'status': 'success',
'email': reg_user.username,
'fullname': self.given_name,
})
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unclaimed_user=self.user,
node=self.project
)
assert_equal(mock_send_mail.call_count, 2)
first_call_args = mock_send_mail.call_args_list[0][0]
assert_equal(first_call_args[0], self.referrer.username)
second_call_args = mock_send_mail.call_args_list[1][0]
assert_equal(second_call_args[0], reg_user.username)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email_before_throttle_expires(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unclaimed_user=self.user,
node=self.project,
)
mock_send_mail.reset_mock()
# second call raises error because it was called before throttle period
with assert_raises(HTTPError):
send_claim_registered_email(
claimer=reg_user,
unclaimed_user=self.user,
node=self.project,
)
assert_false(mock_send_mail.called)
@mock.patch('website.project.views.contributor.send_claim_registered_email')
def test_claim_user_post_with_email_already_registered_sends_correct_email(
self, send_claim_registered_email):
reg_user = UserFactory()
payload = {
'value': reg_user.username,
'pk': self.user._primary_key
}
url = self.project.api_url_for('claim_user_post', uid=self.user._id)
self.app.post_json(url, payload)
assert_true(send_claim_registered_email.called)
def test_user_with_removed_unclaimed_url_claiming(self):
""" Tests that when an unclaimed user is removed from a project, the
unregistered user object does not retain the token.
"""
self.project.remove_contributor(self.user, Auth(user=self.referrer))
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_user_with_claim_url_cannot_claim_twice(self):
""" Tests that when an unclaimed user is replaced on a project with a
claimed user, the unregistered user object does not retain the token.
"""
reg_user = AuthUserFactory()
self.project.replace_contributor(self.user, reg_user)
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_claim_user_form_redirects_to_password_confirm_page_if_user_is_logged_in(self):
reg_user = AuthUserFactory()
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url, auth=reg_user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=reg_user.auth)
token = self.user.get_unclaimed_record(self.project._primary_key)['token']
expected = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token=token,
)
assert_equal(res.request.path, expected)
@mock.patch('framework.auth.cas.make_response_from_ticket')
def test_claim_user_when_user_is_registered_with_orcid(self, mock_response_from_ticket):
token = self.user.get_unclaimed_record(self.project._primary_key)['token']
url = '/user/{uid}/{pid}/claim/verify/{token}/'.format(
uid=self.user._id,
pid=self.project._id,
token=token
)
# logged out user gets redirected to cas login
res = self.app.get(url)
assert res.status_code == 302
res = res.follow()
service_url = 'http://localhost{}'.format(url)
expected = cas.get_logout_url(service_url=cas.get_login_url(service_url=service_url))
assert res.request.url == expected
# user logged in with orcid automatically becomes a contributor
orcid_user, validated_credentials, cas_resp = generate_external_user_with_resp(url)
mock_response_from_ticket.return_value = authenticate(
orcid_user,
cas_resp.attributes.get('accessToken', ''),
redirect(url)
)
orcid_user.set_unusable_password()
orcid_user.save()
ticket = fake.md5()
url += '?ticket={}'.format(ticket)
res = self.app.get(url)
res = res.follow()
assert res.status_code == 302
assert self.project.is_contributor(orcid_user)
assert self.project.url in res.headers.get('Location')
def test_get_valid_form(self):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url).maybe_follow()
assert_equal(res.status_code, 200)
def test_invalid_claim_form_raise_400(self):
uid = self.user._primary_key
pid = self.project._primary_key
url = '/user/{uid}/{pid}/claim/?token=badtoken'.format(**locals())
res = self.app.get(url, expect_errors=True).maybe_follow()
assert_equal(res.status_code, 400)
@mock.patch('osf.models.OSFUser.update_search_nodes')
def test_posting_to_claim_form_with_valid_data(self, mock_update_search_nodes):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.post(url, {
'username': self.user.username,
'password': 'killerqueen',
'password2': 'killerqueen'
})
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('login?service=', location)
assert_in('username', location)
assert_in('verification_key', location)
assert_in(self.project._primary_key, location)
self.user.reload()
assert_true(self.user.is_registered)
assert_true(self.user.is_active)
assert_not_in(self.project._primary_key, self.user.unclaimed_records)
@mock.patch('osf.models.OSFUser.update_search_nodes')
def test_posting_to_claim_form_removes_all_unclaimed_data(self, mock_update_search_nodes):
# user has multiple unclaimed records
p2 = ProjectFactory(creator=self.referrer)
self.user.add_unclaimed_record(p2, referrer=self.referrer,
given_name=fake.name())
self.user.save()
assert_true(len(self.user.unclaimed_records.keys()) > 1) # sanity check
url = self.user.get_claim_url(self.project._primary_key)
self.app.post(url, {
'username': self.given_email,
'password': 'bohemianrhap',
'password2': 'bohemianrhap'
})
self.user.reload()
assert_equal(self.user.unclaimed_records, {})
@mock.patch('osf.models.OSFUser.update_search_nodes')
def test_posting_to_claim_form_sets_fullname_to_given_name(self, mock_update_search_nodes):
# User is created with a full name
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
# User invited with a different name
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.project.creator),
)
self.project.save()
# Goes to claim url
claim_url = new_user.get_claim_url(self.project._id)
self.app.post(claim_url, {
'username': unreg.username,
'password': 'killerqueen', 'password2': 'killerqueen'
})
unreg.reload()
# Full name was set correctly
assert_equal(unreg.fullname, different_name)
# CSL names were set correctly
parsed_name = impute_names_model(different_name)
assert_equal(unreg.given_name, parsed_name['given_name'])
assert_equal(unreg.family_name, parsed_name['family_name'])
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_returns_fullname(self, send_mail):
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
res = self.app.post_json(url,
{'value': self.given_email,
'pk': self.user._primary_key},
auth=self.referrer.auth)
assert_equal(res.json['fullname'], self.given_name)
assert_true(send_mail.called)
assert_true(send_mail.called_with(to_addr=self.given_email))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_if_email_is_different_from_given_email(self, send_mail):
email = fake_email() # email that is different from the one the referrer gave
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
self.app.post_json(url,
{'value': email, 'pk': self.user._primary_key}
)
assert_true(send_mail.called)
assert_equal(send_mail.call_count, 2)
call_to_invited = send_mail.mock_calls[0]
assert_true(call_to_invited.called_with(
to_addr=email
))
call_to_referrer = send_mail.mock_calls[1]
assert_true(call_to_referrer.called_with(
to_addr=self.given_email
))
def test_claim_url_with_bad_token_returns_400(self):
url = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token='badtoken',
)
res = self.app.get(url, auth=self.referrer.auth, expect_errors=400)
assert_equal(res.status_code, 400)
def test_cannot_claim_user_with_user_who_is_already_contributor(self):
# user who is already a contirbutor to the project
contrib = AuthUserFactory()
self.project.add_contributor(contrib, auth=Auth(self.project.creator))
self.project.save()
# Claiming user goes to claim url, but contrib is already logged in
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(
url,
auth=contrib.auth,
).follow(
auth=contrib.auth,
expect_errors=True,
)
# Response is a 400
assert_equal(res.status_code, 400)
def test_claim_user_with_project_id_adds_corresponding_claimed_tag_to_user(self):
assert OsfClaimedTags.Osf.value not in self.user.system_tags
url = self.user.get_claim_url(self.project_with_source_tag._primary_key)
res = self.app.post(url, {
'username': self.user.username,
'password': 'killerqueen',
'password2': 'killerqueen'
})
assert_equal(res.status_code, 302)
self.user.reload()
assert OsfClaimedTags.Osf.value in self.user.system_tags
def test_claim_user_with_preprint_id_adds_corresponding_claimed_tag_to_user(self):
assert provider_claimed_tag(self.preprint_with_source_tag.provider._id, 'preprint') not in self.user.system_tags
url = self.user.get_claim_url(self.preprint_with_source_tag._primary_key)
res = self.app.post(url, {
'username': self.user.username,
'password': 'killerqueen',
'password2': 'killerqueen'
})
assert_equal(res.status_code, 302)
self.user.reload()
assert provider_claimed_tag(self.preprint_with_source_tag.provider._id, 'preprint') in self.user.system_tags
@pytest.mark.enable_bookmark_creation
class TestPointerViews(OsfTestCase):
def setUp(self):
super(TestPointerViews, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
def _make_pointer_only_user_can_see(self, user, project, save=False):
node = ProjectFactory(creator=user)
project.add_pointer(node, auth=Auth(user=user), save=save)
def test_pointer_list_write_contributor_can_remove_private_component_entry(self):
"""Ensure that write contributors see the button to delete a pointer,
even if they cannot see what it is pointing at"""
url = web_url_for('view_project', pid=self.project._id)
user2 = AuthUserFactory()
self.project.add_contributor(user2,
auth=Auth(self.project.creator),
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS)
self._make_pointer_only_user_can_see(user2, self.project)
self.project.save()
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, 200)
has_controls = res.lxml.xpath('//li[@node_id]/p[starts-with(normalize-space(text()), "Private Link")]//i[contains(@class, "remove-pointer")]')
assert_true(has_controls)
def test_pointer_list_write_contributor_can_remove_public_component_entry(self):
url = web_url_for('view_project', pid=self.project._id)
for i in range(3):
self.project.add_pointer(ProjectFactory(creator=self.user),
auth=Auth(user=self.user))
self.project.save()
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, 200)
has_controls = res.lxml.xpath(
'//li[@node_id]//i[contains(@class, "remove-pointer")]')
assert_equal(len(has_controls), 3)
def test_pointer_list_read_contributor_cannot_remove_private_component_entry(self):
url = web_url_for('view_project', pid=self.project._id)
user2 = AuthUserFactory()
self.project.add_contributor(user2,
auth=Auth(self.project.creator),
permissions=permissions.READ)
self._make_pointer_only_user_can_see(user2, self.project)
self.project.save()
res = self.app.get(url, auth=user2.auth).maybe_follow()
assert_equal(res.status_code, 200)
pointer_nodes = res.lxml.xpath('//li[@node_id]')
has_controls = res.lxml.xpath('//li[@node_id]/p[starts-with(normalize-space(text()), "Private Link")]//i[contains(@class, "remove-pointer")]')
assert_equal(len(pointer_nodes), 1)
assert_false(has_controls)
def test_pointer_list_read_contributor_cannot_remove_public_component_entry(self):
url = web_url_for('view_project', pid=self.project._id)
self.project.add_pointer(ProjectFactory(creator=self.user,
is_public=True),
auth=Auth(user=self.user))
user2 = AuthUserFactory()
self.project.add_contributor(user2,
auth=Auth(self.project.creator),
permissions=permissions.READ)
self.project.save()
res = self.app.get(url, auth=user2.auth).maybe_follow()
assert_equal(res.status_code, 200)
pointer_nodes = res.lxml.xpath('//li[@node_id]')
has_controls = res.lxml.xpath(
'//li[@node_id]//i[contains(@class, "remove-pointer")]')
assert_equal(len(pointer_nodes), 1)
assert_equal(len(has_controls), 0)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1109
def test_get_pointed_excludes_folders(self):
pointer_project = ProjectFactory(is_public=True) # project that points to another project
pointed_project = ProjectFactory(creator=self.user) # project that other project points to
pointer_project.add_pointer(pointed_project, Auth(pointer_project.creator), save=True)
# Project is in an organizer collection
collection = CollectionFactory(creator=pointed_project.creator)
collection.collect_object(pointed_project, self.user)
url = pointed_project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# pointer_project's id is included in response, but folder's id is not
pointer_ids = [each['id'] for each in res.json['pointed']]
assert_in(pointer_project._id, pointer_ids)
assert_not_in(collection._id, pointer_ids)
def test_add_pointers(self):
url = self.project.api_url + 'pointer/'
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
self.project.nodes_active.count(),
5
)
def test_add_the_same_pointer_more_than_once(self):
url = self.project.api_url + 'pointer/'
double_node = NodeFactory()
self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
)
res = self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_add_pointers_no_user_logg_in(self):
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
res = self.app.post_json(
url,
{'nodeIds': node_ids},
auth=None,
expect_errors=True
)
assert_equal(res.status_code, 401)
def test_add_pointers_public_non_contributor(self):
project2 = ProjectFactory()
project2.set_privacy('public')
project2.save()
url = self.project.api_url_for('add_pointers')
self.app.post_json(
url,
{'nodeIds': [project2._id]},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
self.project.nodes_active.count(),
1
)
def test_add_pointers_contributor(self):
user2 = AuthUserFactory()
self.project.add_contributor(user2)
self.project.save()
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=user2.auth,
).maybe_follow()
self.project.reload()
assert_equal(
self.project.linked_nodes.count(),
5
)
def test_add_pointers_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_remove_pointer(self):
url = self.project.api_url + 'pointer/'
node = NodeFactory()
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
self.app.delete_json(
url,
{'pointerId': pointer.node._id},
auth=self.user.auth,
)
self.project.reload()
assert_equal(
len(list(self.project.nodes)),
0
)
def test_remove_pointer_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_found(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(
url,
{'pointerId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(
url,
{'pointerId': 'somefakeid'},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_forking_pointer_works(self):
url = self.project.api_url + 'pointer/fork/'
linked_node = NodeFactory(creator=self.user)
pointer = self.project.add_pointer(linked_node, auth=self.consolidate_auth)
assert_true(linked_node.id, pointer.child.id)
res = self.app.post_json(url, {'nodeId': pointer.child._id}, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_in('node', res.json['data'])
fork = res.json['data']['node']
assert_equal(fork['title'], 'Fork of {}'.format(linked_node.title))
def test_fork_pointer_not_provided(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(url, {}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_found(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(
url,
{'nodeId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(
url,
{'nodeId': 'somefakeid'},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_before_register_with_pointer(self):
# Assert that link warning appears in before register callback.
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 1)
def test_before_fork_with_pointer(self):
"""Assert that link warning appears in before fork callback."""
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'These links will be copied into your registration,' in prompt
]
assert_equal(len(prompts), 1)
def test_before_register_no_pointer(self):
"""Assert that link warning does not appear in before register callback."""
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 0)
def test_before_fork_no_pointer(self):
"""Assert that link warning does not appear in before fork callback."""
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your registration' in prompt
]
assert_equal(len(prompts), 0)
def test_get_pointed(self):
pointing_node = ProjectFactory(creator=self.user)
pointing_node.add_pointer(self.project, auth=Auth(self.user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], pointing_node.url)
assert_equal(pointed[0]['title'], pointing_node.title)
assert_equal(pointed[0]['authorShort'], abbrev_authors(pointing_node))
def test_get_pointed_private(self):
secret_user = UserFactory()
pointing_node = ProjectFactory(creator=secret_user)
pointing_node.add_pointer(self.project, auth=Auth(secret_user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], None)
assert_equal(pointed[0]['title'], 'Private Component')
assert_equal(pointed[0]['authorShort'], 'Private Author(s)')
def test_can_template_project_linked_to_each_other(self):
project2 = ProjectFactory(creator=self.user)
self.project.add_pointer(project2, auth=Auth(user=self.user))
template = self.project.use_as_template(auth=Auth(user=self.user))
assert_true(template)
assert_equal(template.title, 'Templated from ' + self.project.title)
assert_not_in(project2, template.linked_nodes)
class TestPublicViews(OsfTestCase):
def test_explore(self):
res = self.app.get('/explore/').maybe_follow()
assert_equal(res.status_code, 200)
@pytest.mark.enable_quickfiles_creation
class TestAuthViews(OsfTestCase):
def setUp(self):
super(TestAuthViews, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_ok(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
user = OSFUser.objects.get(username=email)
assert_equal(user.fullname, name)
assert_equal(user.accepted_terms_of_service, None)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2902
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_email_case_insensitive(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': str(email).upper(),
'password': password,
}
)
user = OSFUser.objects.get(username=email)
assert_equal(user.fullname, name)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_email_with_accepted_tos(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
'acceptedTermsOfService': True
}
)
user = OSFUser.objects.get(username=email)
assert_true(user.accepted_terms_of_service)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_email_without_accepted_tos(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
'acceptedTermsOfService': False
}
)
user = OSFUser.objects.get(username=email)
assert_equal(user.accepted_terms_of_service, None)
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_scrubs_username(self, _):
url = api_url_for('register_user')
name = "<i>Eunice</i> O' \"Cornwallis\"<script type='text/javascript' src='http://www.cornify.com/js/cornify.js'></script><script type='text/javascript'>cornify_add()</script>"
email, password = fake_email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
expected_scrub_username = "Eunice O' \"Cornwallis\"cornify_add()"
user = OSFUser.objects.get(username=email)
assert_equal(res.status_code, http_status.HTTP_200_OK)
assert_equal(user.fullname, expected_scrub_username)
def test_register_email_mismatch(self):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email + 'lol',
'password': password,
},
expect_errors=True,
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
users = OSFUser.objects.filter(username=email)
assert_equal(users.count(), 0)
def test_register_email_already_registered(self):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), fake.password()
existing_user = UserFactory(
username=email,
)
res = self.app.post_json(
url, {
'fullName': name,
'email1': email,
'email2': email,
'password': password
},
expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_409_CONFLICT)
users = OSFUser.objects.filter(username=email)
assert_equal(users.count(), 1)
def test_register_blacklisted_email_domain(self):
BlacklistedEmailDomain.objects.get_or_create(domain='mailinator.com')
url = api_url_for('register_user')
name, email, password = fake.name(), 'bad@mailinator.com', 'agreatpasswordobviously'
res = self.app.post_json(
url, {
'fullName': name,
'email1': email,
'email2': email,
'password': password
},
expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
users = OSFUser.objects.filter(username=email)
assert_equal(users.count(), 0)
@mock.patch('framework.auth.views.validate_recaptcha', return_value=True)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_good_captcha(self, _, validate_recaptcha):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
captcha = 'some valid captcha'
with mock.patch.object(settings, 'RECAPTCHA_SITE_KEY', 'some_value'):
resp = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': str(email).upper(),
'password': password,
'g-recaptcha-response': captcha,
}
)
validate_recaptcha.assert_called_with(captcha, remote_ip=None)
assert_equal(resp.status_code, http_status.HTTP_200_OK)
user = OSFUser.objects.get(username=email)
assert_equal(user.fullname, name)
@mock.patch('framework.auth.views.validate_recaptcha', return_value=False)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_missing_captcha(self, _, validate_recaptcha):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
with mock.patch.object(settings, 'RECAPTCHA_SITE_KEY', 'some_value'):
resp = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': str(email).upper(),
'password': password,
# 'g-recaptcha-response': 'supposed to be None',
},
expect_errors=True
)
validate_recaptcha.assert_called_with(None, remote_ip=None)
assert_equal(resp.status_code, http_status.HTTP_400_BAD_REQUEST)
@mock.patch('framework.auth.views.validate_recaptcha', return_value=False)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_bad_captcha(self, _, validate_recaptcha):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
with mock.patch.object(settings, 'RECAPTCHA_SITE_KEY', 'some_value'):
resp = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': str(email).upper(),
'password': password,
'g-recaptcha-response': 'bad captcha',
},
expect_errors=True
)
assert_equal(resp.status_code, http_status.HTTP_400_BAD_REQUEST)
@mock.patch('osf.models.OSFUser.update_search_nodes')
def test_register_after_being_invited_as_unreg_contributor(self, mock_update_search_nodes):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/861
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1021
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1026
# A user is invited as an unregistered contributor
project = ProjectFactory()
name, email = fake.name(), fake_email()
project.add_unregistered_contributor(fullname=name, email=email, auth=Auth(project.creator))
project.save()
# The new, unregistered user
new_user = OSFUser.objects.get(username=email)
# Instead of following the invitation link, they register at the regular
# registration page
# They use a different name when they register, but same email
real_name = fake.name()
password = 'myprecious'
url = api_url_for('register_user')
payload = {
'fullName': real_name,
'email1': email,
'email2': email,
'password': password,
}
# Send registration request
self.app.post_json(url, payload)
new_user.reload()
# New user confirms by following confirmation link
confirm_url = new_user.get_confirmation_url(email, external=False)
self.app.get(confirm_url)
new_user.reload()
# Password and fullname should be updated
assert_true(new_user.is_confirmed)
assert_true(new_user.check_password(password))
assert_equal(new_user.fullname, real_name)
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_sends_user_registered_signal(self, mock_send_confirm_email):
url = api_url_for('register_user')
name, email, password = fake.name(), fake_email(), 'underpressure'
with capture_signals() as mock_signals:
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered,
auth.signals.unconfirmed_user_created]))
assert_true(mock_send_confirm_email.called)
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation(self, send_mail):
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=email
))
self.user.reload()
assert_not_equal(token, self.user.get_confirmation_token(email))
with assert_raises(InvalidTokenError):
self.user.get_unconfirmed_email_for_token(token)
@mock.patch('framework.auth.views.mails.send_mail')
def test_click_confirmation_email(self, send_mail):
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token, self.user.username)
res = self.app.get(url)
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], True)
assert_equal(res.status_code, 302)
login_url = 'login?service'
assert_in(login_url, res.body.decode())
def test_get_email_to_add_no_email(self):
email_verifications = self.user.unconfirmed_email_info
assert_equal(email_verifications, [])
def test_get_unconfirmed_email(self):
email = 'test@mail.com'
self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
assert_equal(email_verifications, [])
def test_get_email_to_add(self):
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token, self.user.username)
self.app.get(url)
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], True)
email_verifications = self.user.unconfirmed_email_info
assert_equal(email_verifications[0]['address'], 'test@mail.com')
def test_add_email(self):
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token)
self.app.get(url)
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
put_email_url = api_url_for('unconfirmed_email_add')
res = self.app.put_json(put_email_url, email_verifications[0], auth=self.user.auth)
self.user.reload()
assert_equal(res.json_body['status'], 'success')
assert_equal(self.user.emails.last().address, 'test@mail.com')
def test_remove_email(self):
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token)
self.app.get(url)
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
remove_email_url = api_url_for('unconfirmed_email_remove')
remove_res = self.app.delete_json(remove_email_url, email_verifications[0], auth=self.user.auth)
self.user.reload()
assert_equal(remove_res.json_body['status'], 'success')
assert_equal(self.user.unconfirmed_email_info, [])
def test_add_expired_email(self):
# Do not return expired token and removes it from user.email_verifications
email = 'test@mail.com'
token = self.user.add_unconfirmed_email(email)
self.user.email_verifications[token]['expiration'] = timezone.now() - dt.timedelta(days=100)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['email'], email)
self.user.clean_email_verifications(given_token=token)
unconfirmed_emails = self.user.unconfirmed_email_info
assert_equal(unconfirmed_emails, [])
assert_equal(self.user.email_verifications, {})
def test_clean_email_verifications(self):
# Do not return bad token and removes it from user.email_verifications
email = 'test@mail.com'
token = 'blahblahblah'
self.user.email_verifications[token] = {'expiration': timezone.now() + dt.timedelta(days=1),
'email': email,
'confirmed': False }
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['email'], email)
self.user.clean_email_verifications(given_token=token)
unconfirmed_emails = self.user.unconfirmed_email_info
assert_equal(unconfirmed_emails, [])
assert_equal(self.user.email_verifications, {})
def test_clean_email_verifications_when_email_verifications_is_an_empty_dict(self):
self.user.email_verifications = {}
self.user.save()
ret = self.user.clean_email_verifications()
assert_equal(ret, None)
assert_equal(self.user.email_verifications, {})
def test_add_invalid_email(self):
# Do not return expired token and removes it from user.email_verifications
email = u'\u0000\u0008\u000b\u000c\u000e\u001f\ufffe\uffffHello@yourmom.com'
# illegal_str = u'\u0000\u0008\u000b\u000c\u000e\u001f\ufffe\uffffHello'
# illegal_str += unichr(0xd800) + unichr(0xdbff) + ' World'
# email = 'test@mail.com'
with assert_raises(ValidationError):
self.user.add_unconfirmed_email(email)
def test_add_email_merge(self):
email = 'copy@cat.com'
dupe = UserFactory(
username=email,
)
dupe.save()
token = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert_equal(self.user.email_verifications[token]['confirmed'], False)
url = '/confirm/{}/{}/?logout=1'.format(self.user._id, token)
self.app.get(url)
self.user.reload()
email_verifications = self.user.unconfirmed_email_info
put_email_url = api_url_for('unconfirmed_email_add')
res = self.app.put_json(put_email_url, email_verifications[0], auth=self.user.auth)
self.user.reload()
assert_equal(res.json_body['status'], 'success')
assert_equal(self.user.emails.last().address, 'copy@cat.com')
def test_resend_confirmation_without_user_id(self):
email = 'test@mail.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
res = self.app.put_json(url, {'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], '"id" is required')
def test_resend_confirmation_without_email(self):
url = api_url_for('resend_confirmation')
res = self.app.put_json(url, {'id': self.user._id}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_resend_confirmation_not_work_for_primary_email(self):
email = 'test@mail.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': True, 'confirmed': False}
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails')
def test_resend_confirmation_not_work_for_confirmed_email(self):
email = 'test@mail.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': True}
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails')
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation_does_not_send_before_throttle_expires(self, send_mail):
email = 'test@mail.com'
self.user.save()
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth)
assert_true(send_mail.called)
# 2nd call does not send email because throttle period has not expired
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_confirm_email_clears_unclaimed_records_and_revokes_token(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo')
unclaimed_user.save()
# sanity check
assert_equal(len(unclaimed_user.email_verifications.keys()), 1)
# user goes to email confirmation link
token = unclaimed_user.get_confirmation_token(unclaimed_user.username)
url = web_url_for('confirm_email_get', uid=unclaimed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
# unclaimed records and token are cleared
unclaimed_user.reload()
assert_equal(unclaimed_user.unclaimed_records, {})
assert_equal(len(unclaimed_user.email_verifications.keys()), 0)
def test_confirmation_link_registers_user(self):
user = OSFUser.create_unconfirmed('brian@queen.com', 'bicycle123', 'Brian May')
assert_false(user.is_registered) # sanity check
user.save()
confirmation_url = user.get_confirmation_url('brian@queen.com', external=False)
res = self.app.get(confirmation_url)
assert_equal(res.status_code, 302, 'redirects to settings page')
res = res.follow()
user.reload()
assert_true(user.is_registered)
class TestAuthLoginAndRegisterLogic(OsfTestCase):
def setUp(self):
super(TestAuthLoginAndRegisterLogic, self).setUp()
self.no_auth = Auth()
self.user_auth = AuthUserFactory()
self.auth = Auth(user=self.user_auth)
self.next_url = web_url_for('my_projects', _absolute=True)
self.invalid_campaign = 'invalid_campaign'
def test_osf_login_with_auth(self):
# login: user with auth
data = login_and_register_handler(self.auth)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_osf_login_without_auth(self):
# login: user without auth
data = login_and_register_handler(self.no_auth)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_osf_register_with_auth(self):
# register: user with auth
data = login_and_register_handler(self.auth, login=False)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_osf_register_without_auth(self):
# register: user without auth
data = login_and_register_handler(self.no_auth, login=False)
assert_equal(data.get('status_code'), http_status.HTTP_200_OK)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_next_url_login_with_auth(self):
# next_url login: user with auth
data = login_and_register_handler(self.auth, next_url=self.next_url)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), self.next_url)
def test_next_url_login_without_auth(self):
# login: user without auth
request.url = web_url_for('auth_login', next=self.next_url, _absolute=True)
data = login_and_register_handler(self.no_auth, next_url=self.next_url)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), get_login_url(request.url))
def test_next_url_register_with_auth(self):
# register: user with auth
data = login_and_register_handler(self.auth, login=False, next_url=self.next_url)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), self.next_url)
def test_next_url_register_without_auth(self):
# register: user without auth
data = login_and_register_handler(self.no_auth, login=False, next_url=self.next_url)
assert_equal(data.get('status_code'), http_status.HTTP_200_OK)
assert_equal(data.get('next_url'), request.url)
def test_institution_login_and_register(self):
pass
def test_institution_login_with_auth(self):
# institution login: user with auth
data = login_and_register_handler(self.auth, campaign='institution')
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_institution_login_without_auth(self):
# institution login: user without auth
data = login_and_register_handler(self.no_auth, campaign='institution')
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(
data.get('next_url'),
get_login_url(web_url_for('dashboard', _absolute=True), campaign='institution'))
def test_institution_login_next_url_with_auth(self):
# institution login: user with auth and next url
data = login_and_register_handler(self.auth, next_url=self.next_url, campaign='institution')
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), self.next_url)
def test_institution_login_next_url_without_auth(self):
# institution login: user without auth and next url
data = login_and_register_handler(self.no_auth, next_url=self.next_url ,campaign='institution')
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(
data.get('next_url'),
get_login_url(self.next_url, campaign='institution'))
def test_institution_regsiter_with_auth(self):
# institution register: user with auth
data = login_and_register_handler(self.auth, login=False, campaign='institution')
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), web_url_for('dashboard', _absolute=True))
def test_institution_register_without_auth(self):
# institution register: user without auth
data = login_and_register_handler(self.no_auth, login=False, campaign='institution')
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(
data.get('next_url'),
get_login_url(web_url_for('dashboard', _absolute=True), campaign='institution')
)
def test_campaign_login_with_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign login: user with auth
data = login_and_register_handler(self.auth, campaign=campaign)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), campaign_url_for(campaign))
def test_campaign_login_without_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign login: user without auth
data = login_and_register_handler(self.no_auth, campaign=campaign)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(
data.get('next_url'),
web_url_for('auth_register', campaign=campaign, next=campaign_url_for(campaign))
)
def test_campaign_register_with_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign register: user with auth
data = login_and_register_handler(self.auth, login=False, campaign=campaign)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), campaign_url_for(campaign))
def test_campaign_register_without_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign register: user without auth
data = login_and_register_handler(self.no_auth, login=False, campaign=campaign)
assert_equal(data.get('status_code'), http_status.HTTP_200_OK)
if is_native_login(campaign):
# native campaign: prereg and erpc
assert_equal(data.get('next_url'), campaign_url_for(campaign))
elif is_proxy_login(campaign):
# proxy campaign: preprints and branded ones
assert_equal(
data.get('next_url'),
web_url_for('auth_login', next=campaign_url_for(campaign), _absolute=True)
)
def test_campaign_next_url_login_with_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign login: user with auth
next_url = campaign_url_for(campaign)
data = login_and_register_handler(self.auth, campaign=campaign, next_url=next_url)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), next_url)
def test_campaign_next_url_login_without_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign login: user without auth
next_url = campaign_url_for(campaign)
data = login_and_register_handler(self.no_auth, campaign=campaign, next_url=next_url)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(
data.get('next_url'),
web_url_for('auth_register', campaign=campaign, next=next_url)
)
def test_campaign_next_url_register_with_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign register: user with auth
next_url = campaign_url_for(campaign)
data = login_and_register_handler(self.auth, login=False, campaign=campaign, next_url=next_url)
assert_equal(data.get('status_code'), http_status.HTTP_302_FOUND)
assert_equal(data.get('next_url'), next_url)
def test_campaign_next_url_register_without_auth(self):
for campaign in get_campaigns():
if is_institution_login(campaign):
continue
# campaign register: user without auth
next_url = campaign_url_for(campaign)
data = login_and_register_handler(self.no_auth, login=False, campaign=campaign, next_url=next_url)
assert_equal(data.get('status_code'), http_status.HTTP_200_OK)
if is_native_login(campaign):
# native campaign: prereg and erpc
assert_equal(data.get('next_url'), next_url)
elif is_proxy_login(campaign):
# proxy campaign: preprints and branded ones
assert_equal(
data.get('next_url'),
web_url_for('auth_login', next= next_url, _absolute=True)
)
def test_invalid_campaign_login_without_auth(self):
data = login_and_register_handler(
self.no_auth,
login=True,
campaign=self.invalid_campaign,
next_url=self.next_url
)
redirect_url = web_url_for('auth_login', campaigns=None, next=self.next_url)
assert_equal(data['status_code'], http_status.HTTP_302_FOUND)
assert_equal(data['next_url'], redirect_url)
assert_equal(data['campaign'], None)
def test_invalid_campaign_register_without_auth(self):
data = login_and_register_handler(
self.no_auth,
login=False,
campaign=self.invalid_campaign,
next_url=self.next_url
)
redirect_url = web_url_for('auth_register', campaigns=None, next=self.next_url)
assert_equal(data['status_code'], http_status.HTTP_302_FOUND)
assert_equal(data['next_url'], redirect_url)
assert_equal(data['campaign'], None)
# The following two tests handles the special case for `claim_user_registered`
# When an authenticated user clicks the claim confirmation clink, there are two ways to trigger this flow:
# 1. If the authenticated user is already a contributor to the project, OSF will ask the user to sign out
# by providing a "logout" link.
# 2. If the authenticated user is not a contributor but decides not to claim contributor under this account,
# OSF provides a link "not <username>?" for the user to logout.
# Both links will land user onto the register page with "MUST LOGIN" push notification.
def test_register_logout_flag_with_auth(self):
# when user click the "logout" or "not <username>?" link, first step is to log user out
data = login_and_register_handler(self.auth, login=False, campaign=None, next_url=self.next_url, logout=True)
assert_equal(data.get('status_code'), 'auth_logout')
assert_equal(data.get('next_url'), self.next_url)
def test_register_logout_flage_without(self):
# the second step is to land user on register page with "MUST LOGIN" warning
data = login_and_register_handler(self.no_auth, login=False, campaign=None, next_url=self.next_url, logout=True)
assert_equal(data.get('status_code'), http_status.HTTP_200_OK)
assert_equal(data.get('next_url'), self.next_url)
assert_true(data.get('must_login_warning'))
class TestAuthLogout(OsfTestCase):
def setUp(self):
super(TestAuthLogout, self).setUp()
self.goodbye_url = web_url_for('goodbye', _absolute=True)
self.redirect_url = web_url_for('forgot_password_get', _absolute=True)
self.valid_next_url = web_url_for('dashboard', _absolute=True)
self.invalid_next_url = 'http://localhost:1234/abcde'
self.auth_user = AuthUserFactory()
def tearDown(self):
super(TestAuthLogout, self).tearDown()
OSFUser.objects.all().delete()
assert_equal(OSFUser.objects.count(), 0)
def test_logout_with_valid_next_url_logged_in(self):
logout_url = web_url_for('auth_logout', _absolute=True, next=self.valid_next_url)
resp = self.app.get(logout_url, auth=self.auth_user.auth)
assert_equal(resp.status_code, http_status.HTTP_302_FOUND)
assert_equal(cas.get_logout_url(logout_url), resp.headers['Location'])
def test_logout_with_valid_next_url_logged_out(self):
logout_url = web_url_for('auth_logout', _absolute=True, next=self.valid_next_url)
resp = self.app.get(logout_url, auth=None)
assert_equal(resp.status_code, http_status.HTTP_302_FOUND)
assert_equal(self.valid_next_url, resp.headers['Location'])
def test_logout_with_invalid_next_url_logged_in(self):
logout_url = web_url_for('auth_logout', _absolute=True, next=self.invalid_next_url)
resp = self.app.get(logout_url, auth=self.auth_user.auth)
assert_equal(resp.status_code, http_status.HTTP_302_FOUND)
assert_equal(cas.get_logout_url(self.goodbye_url), resp.headers['Location'])
def test_logout_with_invalid_next_url_logged_out(self):
logout_url = web_url_for('auth_logout', _absolute=True, next=self.invalid_next_url)
resp = self.app.get(logout_url, auth=None)
assert_equal(resp.status_code, http_status.HTTP_302_FOUND)
assert_equal(cas.get_logout_url(self.goodbye_url), resp.headers['Location'])
def test_logout_with_redirect_url(self):
logout_url = web_url_for('auth_logout', _absolute=True, redirect_url=self.redirect_url)
resp = self.app.get(logout_url, auth=self.auth_user.auth)
assert_equal(resp.status_code, http_status.HTTP_302_FOUND)
assert_equal(cas.get_logout_url(self.redirect_url), resp.headers['Location'])
def test_logout_with_no_parameter(self):
logout_url = web_url_for('auth_logout', _absolute=True)
resp = self.app.get(logout_url, auth=None)
assert_equal(resp.status_code, http_status.HTTP_302_FOUND)
assert_equal(cas.get_logout_url(self.goodbye_url), resp.headers['Location'])
class TestExternalAuthViews(OsfTestCase):
def setUp(self):
super(TestExternalAuthViews, self).setUp()
name, email = fake.name(), fake_email()
self.provider_id = fake.ean()
external_identity = {
'orcid': {
self.provider_id: 'CREATE'
}
}
self.user = OSFUser.create_unconfirmed(
username=email,
password=str(fake.password()),
fullname=name,
external_identity=external_identity,
)
self.user.save()
self.auth = Auth(self.user)
def test_external_login_email_get_with_invalid_session(self):
url = web_url_for('external_login_email_get')
resp = self.app.get(url, expect_errors=True)
assert_equal(resp.status_code, 401)
def test_external_login_confirm_email_get_with_another_user_logged_in(self):
another_user = AuthUserFactory()
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid', destination='dashboard')
res = self.app.get(url, auth=another_user.auth)
assert_equal(res.status_code, 302, 'redirects to cas logout')
assert_in('/logout?service=', res.location)
assert_in(url, res.location)
def test_external_login_confirm_email_get_without_destination(self):
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid')
res = self.app.get(url, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 400, 'bad request')
@mock.patch('website.mails.send_mail')
def test_external_login_confirm_email_get_create(self, mock_welcome):
assert_false(self.user.is_registered)
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid', destination='dashboard')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302, 'redirects to cas login')
assert_in('/login?service=', res.location)
assert_in('new=true', res.location)
assert_equal(mock_welcome.call_count, 1)
self.user.reload()
assert_equal(self.user.external_identity['orcid'][self.provider_id], 'VERIFIED')
assert_true(self.user.is_registered)
assert_true(self.user.has_usable_password())
@mock.patch('website.mails.send_mail')
def test_external_login_confirm_email_get_link(self, mock_link_confirm):
self.user.external_identity['orcid'][self.provider_id] = 'LINK'
self.user.save()
assert_false(self.user.is_registered)
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid', destination='dashboard')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302, 'redirects to cas login')
assert_in('/login?service=', res.location)
assert_not_in('new=true', res.location)
assert_equal(mock_link_confirm.call_count, 1)
self.user.reload()
assert_equal(self.user.external_identity['orcid'][self.provider_id], 'VERIFIED')
assert_true(self.user.is_registered)
assert_true(self.user.has_usable_password())
@mock.patch('website.mails.send_mail')
def test_external_login_confirm_email_get_duped_id(self, mock_confirm):
dupe_user = UserFactory(external_identity={'orcid': {self.provider_id: 'CREATE'}})
assert_equal(dupe_user.external_identity, self.user.external_identity)
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid', destination='dashboard')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302, 'redirects to cas login')
assert_in('/login?service=', res.location)
assert_equal(mock_confirm.call_count, 1)
self.user.reload()
dupe_user.reload()
assert_equal(self.user.external_identity['orcid'][self.provider_id], 'VERIFIED')
assert_equal(dupe_user.external_identity, {})
@mock.patch('website.mails.send_mail')
def test_external_login_confirm_email_get_duping_id(self, mock_confirm):
dupe_user = UserFactory(external_identity={'orcid': {self.provider_id: 'VERIFIED'}})
url = self.user.get_confirmation_url(self.user.username, external_id_provider='orcid', destination='dashboard')
res = self.app.get(url, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 403, 'only allows one user to link an id')
assert_equal(mock_confirm.call_count, 0)
self.user.reload()
dupe_user.reload()
assert_equal(dupe_user.external_identity['orcid'][self.provider_id], 'VERIFIED')
assert_equal(self.user.external_identity, {})
def test_ensure_external_identity_uniqueness_unverified(self):
dupe_user = UserFactory(external_identity={'orcid': {self.provider_id: 'CREATE'}})
assert_equal(dupe_user.external_identity, self.user.external_identity)
ensure_external_identity_uniqueness('orcid', self.provider_id, self.user)
dupe_user.reload()
self.user.reload()
assert_equal(dupe_user.external_identity, {})
assert_equal(self.user.external_identity, {'orcid': {self.provider_id: 'CREATE'}})
def test_ensure_external_identity_uniqueness_verified(self):
dupe_user = UserFactory(external_identity={'orcid': {self.provider_id: 'VERIFIED'}})
assert_equal(dupe_user.external_identity, {'orcid': {self.provider_id: 'VERIFIED'}})
assert_not_equal(dupe_user.external_identity, self.user.external_identity)
with assert_raises(ValidationError):
ensure_external_identity_uniqueness('orcid', self.provider_id, self.user)
dupe_user.reload()
self.user.reload()
assert_equal(dupe_user.external_identity, {'orcid': {self.provider_id: 'VERIFIED'}})
assert_equal(self.user.external_identity, {})
def test_ensure_external_identity_uniqueness_multiple(self):
dupe_user = UserFactory(external_identity={'orcid': {self.provider_id: 'CREATE'}})
assert_equal(dupe_user.external_identity, self.user.external_identity)
ensure_external_identity_uniqueness('orcid', self.provider_id)
dupe_user.reload()
self.user.reload()
assert_equal(dupe_user.external_identity, {})
assert_equal(self.user.external_identity, {})
# TODO: Use mock add-on
class TestAddonUserViews(OsfTestCase):
def setUp(self):
super(TestAddonUserViews, self).setUp()
self.user = AuthUserFactory()
def test_choose_addons_add(self):
"""Add add-ons; assert that add-ons are attached to project.
"""
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.user.reload()
assert_true(self.user.get_addon('github'))
def test_choose_addons_remove(self):
# Add, then delete, add-ons; assert that add-ons are not attached to
# project.
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.app.post_json(
url,
{'github': False},
auth=self.user.auth
).maybe_follow()
self.user.reload()
assert_false(self.user.get_addon('github'))
@pytest.mark.enable_enqueue_task
class TestConfigureMailingListViews(OsfTestCase):
@classmethod
def setUpClass(cls):
super(TestConfigureMailingListViews, cls).setUpClass()
cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS
settings.ENABLE_EMAIL_SUBSCRIPTIONS = True
def test_user_unsubscribe_and_subscribe_help_mailing_list(self):
user = AuthUserFactory()
url = api_url_for('user_choose_mailing_lists')
payload = {settings.OSF_HELP_LIST: False}
res = self.app.post_json(url, payload, auth=user.auth)
user.reload()
assert_false(user.osf_mailing_lists[settings.OSF_HELP_LIST])
payload = {settings.OSF_HELP_LIST: True}
res = self.app.post_json(url, payload, auth=user.auth)
user.reload()
assert_true(user.osf_mailing_lists[settings.OSF_HELP_LIST])
def test_get_notifications(self):
user = AuthUserFactory()
mailing_lists = dict(list(user.osf_mailing_lists.items()) + list(user.mailchimp_mailing_lists.items()))
url = api_url_for('user_notifications')
res = self.app.get(url, auth=user.auth)
assert_equal(mailing_lists, res.json['mailing_lists'])
def test_osf_help_mails_subscribe(self):
user = UserFactory()
user.osf_mailing_lists[settings.OSF_HELP_LIST] = False
user.save()
update_osf_help_mails_subscription(user, True)
assert_true(user.osf_mailing_lists[settings.OSF_HELP_LIST])
def test_osf_help_mails_unsubscribe(self):
user = UserFactory()
user.osf_mailing_lists[settings.OSF_HELP_LIST] = True
user.save()
update_osf_help_mails_subscription(user, False)
assert_false(user.osf_mailing_lists[settings.OSF_HELP_LIST])
@unittest.skipIf(settings.USE_CELERY, 'Subscription must happen synchronously for this test')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_user_choose_mailing_lists_updates_user_dict(self, mock_get_mailchimp_api):
user = AuthUserFactory()
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
payload = {settings.MAILCHIMP_GENERAL_LIST: True}
url = api_url_for('user_choose_mailing_lists')
res = self.app.post_json(url, payload, auth=user.auth)
# the test app doesn't have celery handlers attached, so we need to call this manually.
handlers.celery_teardown_request()
user.reload()
# check user.mailing_lists is updated
assert_true(user.mailchimp_mailing_lists[settings.MAILCHIMP_GENERAL_LIST])
assert_equal(
user.mailchimp_mailing_lists[settings.MAILCHIMP_GENERAL_LIST],
payload[settings.MAILCHIMP_GENERAL_LIST]
)
# check that user is subscribed
mock_client.lists.subscribe.assert_called_with(id=list_id,
email={'email': user.username},
merge_vars={
'fname': user.given_name,
'lname': user.family_name,
},
double_optin=False,
update_existing=True)
def test_get_mailchimp_get_endpoint_returns_200(self):
url = api_url_for('mailchimp_get_endpoint')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_subscribe_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'subscribe' actions sent to the OSF via mailchimp
webhooks update the OSF database.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is not subscribed to a list
user = AuthUserFactory()
user.mailchimp_mailing_lists = {'OSF General': False}
user.save()
# user subscribes and webhook sends request to OSF
data = {
'type': 'subscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type='application/x-www-form-urlencoded',
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_true(user.mailchimp_mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_profile_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'profile' actions sent to the OSF via mailchimp
webhooks do not cause any database changes.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailchimp_mailing_lists = {'OSF General': True}
user.save()
# user hits subscribe again, which will update the user's existing info on mailchimp
# webhook sends request (when configured to update on changes made through the API)
data = {
'type': 'profile',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type='application/x-www-form-urlencoded',
auth=user.auth)
# user field does not change
user.reload()
assert_true(user.mailchimp_mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_sync_data_from_mailchimp_unsubscribes_user(self, mock_get_mailchimp_api):
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailchimp_mailing_lists = {'OSF General': True}
user.save()
# user unsubscribes through mailchimp and webhook sends request
data = {
'type': 'unsubscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type='application/x-www-form-urlencoded',
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_false(user.mailchimp_mailing_lists[list_name])
def test_sync_data_from_mailchimp_fails_without_secret_key(self):
user = AuthUserFactory()
payload = {'values': {'type': 'unsubscribe',
'data': {'list_id': '12345',
'email': 'freddie@cos.io'}}}
url = api_url_for('sync_data_from_mailchimp')
res = self.app.post_json(url, payload, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_401_UNAUTHORIZED)
@classmethod
def tearDownClass(cls):
super(TestConfigureMailingListViews, cls).tearDownClass()
settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions
# TODO: Move to OSF Storage
class TestFileViews(OsfTestCase):
def setUp(self):
super(TestFileViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=True)
self.project.add_contributor(self.user)
self.project.save()
def test_grid_data(self):
url = self.project.api_url_for('grid_data')
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, http_status.HTTP_200_OK)
expected = rubeus.to_hgrid(self.project, auth=Auth(self.user))
data = res.json['data']
assert_equal(len(data), len(expected))
class TestTagViews(OsfTestCase):
def setUp(self):
super(TestTagViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
@unittest.skip('Tags endpoint disabled for now.')
def test_tag_get_returns_200(self):
url = web_url_for('project_tag', tag='foo')
res = self.app.get(url)
assert_equal(res.status_code, 200)
class TestReorderComponents(OsfTestCase):
def setUp(self):
super(TestReorderComponents, self).setUp()
self.creator = AuthUserFactory()
self.contrib = AuthUserFactory()
# Project is public
self.project = ProjectFactory.create(creator=self.creator, is_public=True)
self.project.add_contributor(self.contrib, auth=Auth(self.creator))
# subcomponent that only creator can see
self.public_component = NodeFactory(creator=self.creator, is_public=True)
self.private_component = NodeFactory(creator=self.creator, is_public=False)
NodeRelation.objects.create(parent=self.project, child=self.public_component)
NodeRelation.objects.create(parent=self.project, child=self.private_component)
self.project.save()
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/489
def test_reorder_components_with_private_component(self):
# contrib tries to reorder components
payload = {
'new_list': [
'{0}'.format(self.private_component._id),
'{0}'.format(self.public_component._id),
]
}
url = self.project.api_url_for('project_reorder_components')
res = self.app.post_json(url, payload, auth=self.contrib.auth)
assert_equal(res.status_code, 200)
class TestWikiWidgetViews(OsfTestCase):
def setUp(self):
super(TestWikiWidgetViews, self).setUp()
# project with no home wiki page
self.project = ProjectFactory()
self.read_only_contrib = AuthUserFactory()
self.project.add_contributor(self.read_only_contrib, permissions=permissions.READ)
self.noncontributor = AuthUserFactory()
# project with no home wiki content
self.project2 = ProjectFactory(creator=self.project.creator)
self.project2.add_contributor(self.read_only_contrib, permissions=permissions.READ)
WikiPage.objects.create_for_node(self.project2, 'home', '', Auth(self.project.creator))
def test_show_wiki_for_contributors_when_no_wiki_or_content(self):
assert_true(_should_show_wiki_widget(self.project, self.project.creator))
assert_true(_should_show_wiki_widget(self.project2, self.project.creator))
def test_show_wiki_is_false_for_read_contributors_when_no_wiki_or_content(self):
assert_false(_should_show_wiki_widget(self.project, self.read_only_contrib))
assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib))
def test_show_wiki_is_false_for_noncontributors_when_no_wiki_or_content(self):
assert_false(_should_show_wiki_widget(self.project, None))
def test_show_wiki_for_osf_group_members(self):
group = OSFGroupFactory(creator=self.noncontributor)
self.project.add_osf_group(group, permissions.READ)
assert_false(_should_show_wiki_widget(self.project, self.noncontributor))
assert_false(_should_show_wiki_widget(self.project2, self.noncontributor))
self.project.remove_osf_group(group)
self.project.add_osf_group(group, permissions.WRITE)
assert_true(_should_show_wiki_widget(self.project, self.noncontributor))
assert_false(_should_show_wiki_widget(self.project2, self.noncontributor))
@pytest.mark.enable_implicit_clean
@pytest.mark.enable_bookmark_creation
class TestProjectCreation(OsfTestCase):
def setUp(self):
super(TestProjectCreation, self).setUp()
self.creator = AuthUserFactory()
self.url = api_url_for('project_new_post')
self.user1 = AuthUserFactory()
self.user2 = AuthUserFactory()
self.project = ProjectFactory(creator=self.user1)
self.project.add_contributor(self.user2, auth=Auth(self.user1))
self.project.save()
def tearDown(self):
super(TestProjectCreation, self).tearDown()
def test_needs_title(self):
res = self.app.post_json(self.url, {}, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_create_component_strips_html(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
url = web_url_for('project_new_node', pid=project._id)
post_data = {'title': '<b>New <blink>Component</blink> Title</b>', 'category': ''}
request = self.app.post(url, post_data, auth=user.auth).follow()
project.reload()
child = project.nodes[0]
# HTML has been stripped
assert_equal(child.title, 'New Component Title')
def test_strip_html_from_title(self):
payload = {
'title': 'no html <b>here</b>'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
node = AbstractNode.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_equal('no html here', node.title)
def test_only_needs_title(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
def test_title_must_be_one_long(self):
payload = {
'title': ''
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_title_must_be_less_than_200(self):
payload = {
'title': ''.join([str(x) for x in range(0, 250)])
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_fails_to_create_project_with_whitespace_title(self):
payload = {
'title': ' '
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_creates_a_project(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = AbstractNode.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.title, 'Im a real title')
def test_create_component_add_contributors_admin(self):
url = web_url_for('project_new_node', pid=self.project._id)
post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True}
res = self.app.post(url, post_data, auth=self.user1.auth)
self.project.reload()
child = self.project.nodes[0]
assert_equal(child.title, 'New Component With Contributors Title')
assert_in(self.user1, child.contributors)
assert_in(self.user2, child.contributors)
# check redirect url
assert_in('/contributors/', res.location)
def test_create_component_with_contributors_read_write(self):
url = web_url_for('project_new_node', pid=self.project._id)
non_admin = AuthUserFactory()
read_user = AuthUserFactory()
group = OSFGroupFactory(creator=read_user)
self.project.add_contributor(non_admin, permissions=permissions.WRITE)
self.project.add_contributor(read_user, permissions=permissions.READ)
self.project.add_osf_group(group, permissions.ADMIN)
self.project.save()
post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True}
res = self.app.post(url, post_data, auth=non_admin.auth)
self.project.reload()
child = self.project.nodes[0]
assert_equal(child.title, 'New Component With Contributors Title')
assert_in(non_admin, child.contributors)
assert_in(self.user1, child.contributors)
assert_in(self.user2, child.contributors)
assert_in(read_user, child.contributors)
assert child.has_permission(non_admin, permissions.ADMIN) is True
assert child.has_permission(non_admin, permissions.WRITE) is True
assert child.has_permission(non_admin, permissions.READ) is True
# read_user was a read contrib on the parent, but was an admin group member
# read contrib perms copied over
assert child.has_permission(read_user, permissions.ADMIN) is False
assert child.has_permission(read_user, permissions.WRITE) is False
assert child.has_permission(read_user, permissions.READ) is True
# User creating the component was not a manager on the group
assert group not in child.osf_groups
# check redirect url
assert_in('/contributors/', res.location)
def test_group_copied_over_to_component_if_manager(self):
url = web_url_for('project_new_node', pid=self.project._id)
non_admin = AuthUserFactory()
write_user = AuthUserFactory()
group = OSFGroupFactory(creator=write_user)
self.project.add_contributor(non_admin, permissions=permissions.WRITE)
self.project.add_contributor(write_user, permissions=permissions.WRITE)
self.project.add_osf_group(group, permissions.ADMIN)
self.project.save()
post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True}
res = self.app.post(url, post_data, auth=write_user.auth)
self.project.reload()
child = self.project.nodes[0]
assert_equal(child.title, 'New Component With Contributors Title')
assert_in(non_admin, child.contributors)
assert_in(self.user1, child.contributors)
assert_in(self.user2, child.contributors)
assert_in(write_user, child.contributors)
assert child.has_permission(non_admin, permissions.ADMIN) is False
assert child.has_permission(non_admin, permissions.WRITE) is True
assert child.has_permission(non_admin, permissions.READ) is True
# Component creator gets admin
assert child.has_permission(write_user, permissions.ADMIN) is True
assert child.has_permission(write_user, permissions.WRITE) is True
assert child.has_permission(write_user, permissions.READ) is True
# User creating the component was a manager of the group, so group copied
assert group in child.osf_groups
# check redirect url
assert_in('/contributors/', res.location)
def test_create_component_with_contributors_read(self):
url = web_url_for('project_new_node', pid=self.project._id)
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=permissions.READ)
self.project.save()
post_data = {'title': 'New Component With Contributors Title', 'category': '', 'inherit_contributors': True}
res = self.app.post(url, post_data, auth=non_admin.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_create_component_add_no_contributors(self):
url = web_url_for('project_new_node', pid=self.project._id)
post_data = {'title': 'New Component With Contributors Title', 'category': ''}
res = self.app.post(url, post_data, auth=self.user1.auth)
self.project.reload()
child = self.project.nodes[0]
assert_equal(child.title, 'New Component With Contributors Title')
assert_in(self.user1, child.contributors)
assert_not_in(self.user2, child.contributors)
# check redirect url
assert_not_in('/contributors/', res.location)
def test_new_project_returns_serialized_node_data(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = res.json['newNode']
assert_true(node)
assert_equal(node['title'], 'Im a real title')
def test_description_works(self):
payload = {
'title': 'Im a real title',
'description': 'I describe things!'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = AbstractNode.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.description, 'I describe things!')
def test_can_template(self):
other_node = ProjectFactory(creator=self.creator)
payload = {
'title': 'Im a real title',
'template': other_node._id
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = AbstractNode.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.template_node, other_node)
def test_project_before_template_no_addons(self):
project = ProjectFactory()
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_equal(res.json['prompts'], [])
def test_project_before_template_with_addons(self):
project = ProjectWithAddonFactory(addon='box')
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_in('Box', res.json['prompts'])
def test_project_new_from_template_non_user(self):
project = ProjectFactory()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=None)
assert_equal(res.status_code, 302)
res2 = res.follow(expect_errors=True)
assert_equal(res2.status_code, 308)
assert_equal(res2.request.path, '/login')
def test_project_new_from_template_public_non_contributor(self):
non_contributor = AuthUserFactory()
project = ProjectFactory(is_public=True)
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=non_contributor.auth)
assert_equal(res.status_code, 201)
def test_project_new_from_template_contributor(self):
contributor = AuthUserFactory()
project = ProjectFactory(is_public=False)
project.add_contributor(contributor)
project.save()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=contributor.auth)
assert_equal(res.status_code, 201)
class TestUnconfirmedUserViews(OsfTestCase):
def test_can_view_profile(self):
user = UnconfirmedUserFactory()
url = web_url_for('profile_view_id', uid=user._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
class TestStaticFileViews(OsfTestCase):
def test_robots_dot_txt(self):
res = self.app.get('/robots.txt')
assert_equal(res.status_code, 200)
assert_in('User-agent', res)
assert_in('html', res.headers['Content-Type'])
def test_favicon(self):
res = self.app.get('/favicon.ico')
assert_equal(res.status_code, 200)
assert_in('image/vnd.microsoft.icon', res.headers['Content-Type'])
def test_getting_started_page(self):
res = self.app.get('/getting-started/')
assert_equal(res.status_code, 302)
assert_equal(res.location, 'https://openscience.zendesk.com/hc/en-us')
def test_help_redirect(self):
res = self.app.get('/help/')
assert_equal(res.status_code,302)
class TestUserConfirmSignal(OsfTestCase):
def test_confirm_user_signal_called_when_user_claims_account(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo', email=fake_email())
unclaimed_user.save()
token = unclaimed_user.get_unclaimed_record(project._primary_key)['token']
with capture_signals() as mock_signals:
url = web_url_for('claim_user_form', pid=project._id, uid=unclaimed_user._id, token=token)
payload = {'username': unclaimed_user.username,
'password': 'password',
'password2': 'password'}
res = self.app.post(url, payload)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
def test_confirm_user_signal_called_when_user_confirms_email(self):
unconfirmed_user = UnconfirmedUserFactory()
unconfirmed_user.save()
# user goes to email confirmation link
token = unconfirmed_user.get_confirmation_token(unconfirmed_user.username)
with capture_signals() as mock_signals:
url = web_url_for('confirm_email_get', uid=unconfirmed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
# copied from tests/test_comments.py
class TestCommentViews(OsfTestCase):
def setUp(self):
super(TestCommentViews, self).setUp()
self.project = ProjectFactory(is_public=True)
self.user = AuthUserFactory()
self.project.add_contributor(self.user)
self.project.save()
self.user.save()
def test_view_project_comments_updates_user_comments_view_timestamp(self):
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, {
'page': 'node',
'rootId': self.project._id
}, auth=self.user.auth)
self.user.reload()
user_timestamp = self.user.comments_viewed_timestamp[self.project._id]
view_timestamp = timezone.now()
assert_datetime_equal(user_timestamp, view_timestamp)
def test_confirm_non_contrib_viewers_dont_have_pid_in_comments_view_timestamp(self):
non_contributor = AuthUserFactory()
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, {
'page': 'node',
'rootId': self.project._id
}, auth=self.user.auth)
non_contributor.reload()
assert_not_in(self.project._id, non_contributor.comments_viewed_timestamp)
def test_view_comments_updates_user_comments_view_timestamp_files(self):
osfstorage = self.project.get_addon('osfstorage')
root_node = osfstorage.get_root()
test_file = root_node.append_file('test_file')
test_file.create_version(self.user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, {
'page': 'files',
'rootId': test_file._id
}, auth=self.user.auth)
self.user.reload()
user_timestamp = self.user.comments_viewed_timestamp[test_file._id]
view_timestamp = timezone.now()
assert_datetime_equal(user_timestamp, view_timestamp)
# Regression test for https://openscience.atlassian.net/browse/OSF-5193
# moved from tests/test_comments.py
def test_find_unread_includes_edited_comments(self):
project = ProjectFactory()
user = AuthUserFactory()
project.add_contributor(user, save=True)
comment = CommentFactory(node=project, user=project.creator)
n_unread = Comment.find_n_unread(user=user, node=project, page='node')
assert n_unread == 1
url = project.api_url_for('update_comments_timestamp')
payload = {'page': 'node', 'rootId': project._id}
self.app.put_json(url, payload, auth=user.auth)
user.reload()
n_unread = Comment.find_n_unread(user=user, node=project, page='node')
assert n_unread == 0
# Edit previously read comment
comment.edit(
auth=Auth(project.creator),
content='edited',
save=True
)
n_unread = Comment.find_n_unread(user=user, node=project, page='node')
assert n_unread == 1
class TestResetPassword(OsfTestCase):
def setUp(self):
super(TestResetPassword, self).setUp()
self.user = AuthUserFactory()
self.another_user = AuthUserFactory()
self.osf_key_v2 = generate_verification_key(verification_type='password')
self.user.verification_key_v2 = self.osf_key_v2
self.user.verification_key = None
self.user.save()
self.get_url = web_url_for(
'reset_password_get',
uid=self.user._id,
token=self.osf_key_v2['token']
)
self.get_url_invalid_key = web_url_for(
'reset_password_get',
uid=self.user._id,
token=generate_verification_key()
)
self.get_url_invalid_user = web_url_for(
'reset_password_get',
uid=self.another_user._id,
token=self.osf_key_v2['token']
)
# successfully load reset password page
def test_reset_password_view_returns_200(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
# raise http 400 error
def test_reset_password_view_raises_400(self):
res = self.app.get(self.get_url_invalid_key, expect_errors=True)
assert_equal(res.status_code, 400)
res = self.app.get(self.get_url_invalid_user, expect_errors=True)
assert_equal(res.status_code, 400)
self.user.verification_key_v2['expires'] = timezone.now()
self.user.save()
res = self.app.get(self.get_url, expect_errors=True)
assert_equal(res.status_code, 400)
# successfully reset password
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_can_reset_password_if_form_success(self, mock_service_validate):
# load reset password page and submit email
res = self.app.get(self.get_url)
form = res.forms['resetPasswordForm']
form['password'] = 'newpassword'
form['password2'] = 'newpassword'
res = form.submit()
# check request URL is /resetpassword with username and new verification_key_v2 token
request_url_path = res.request.path
assert_in('resetpassword', request_url_path)
assert_in(self.user._id, request_url_path)
assert_not_in(self.user.verification_key_v2['token'], request_url_path)
# check verification_key_v2 for OSF is destroyed and verification_key for CAS is in place
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
assert_not_equal(self.user.verification_key, None)
# check redirection to CAS login with username and the new verification_key(CAS)
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_true('login?service=' in location)
assert_true('username={}'.format(quote(self.user.username, safe='@')) in location)
assert_true('verification_key={}'.format(self.user.verification_key) in location)
# check if password was updated
self.user.reload()
assert_true(self.user.check_password('newpassword'))
# check if verification_key is destroyed after service validation
mock_service_validate.return_value = cas.CasResponse(
authenticated=True,
user=self.user._id,
attributes={'accessToken': fake.md5()}
)
ticket = fake.md5()
service_url = 'http://accounts.osf.io/?ticket=' + ticket
cas.make_response_from_ticket(ticket, service_url)
self.user.reload()
assert_equal(self.user.verification_key, None)
# log users out before they land on reset password page
def test_reset_password_logs_out_user(self):
# visit reset password link while another user is logged in
res = self.app.get(self.get_url, auth=self.another_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_not_in('reauth', location)
assert_in('logout?service=', location)
assert_in('resetpassword', location)
@pytest.mark.enable_quickfiles_creation
@mock.patch('website.views.PROXY_EMBER_APPS', False)
class TestResolveGuid(OsfTestCase):
def setUp(self):
super(TestResolveGuid, self).setUp()
def test_preprint_provider_without_domain(self):
provider = PreprintProviderFactory(domain='')
preprint = PreprintFactory(provider=provider)
url = web_url_for('resolve_guid', _guid=True, guid=preprint._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(
res.request.path,
'/{}/'.format(preprint._id)
)
def test_preprint_provider_with_domain_without_redirect(self):
domain = 'https://test.com/'
provider = PreprintProviderFactory(_id='test', domain=domain, domain_redirect_enabled=False)
preprint = PreprintFactory(provider=provider)
url = web_url_for('resolve_guid', _guid=True, guid=preprint._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(
res.request.path,
'/{}/'.format(preprint._id)
)
def test_preprint_provider_with_domain_with_redirect(self):
domain = 'https://test.com/'
provider = PreprintProviderFactory(_id='test', domain=domain, domain_redirect_enabled=True)
preprint = PreprintFactory(provider=provider)
url = web_url_for('resolve_guid', _guid=True, guid=preprint._id)
res = self.app.get(url)
assert_is_redirect(res)
assert_equal(res.status_code, 301)
assert_equal(
res.headers['location'],
'{}{}/'.format(domain, preprint._id)
)
assert_equal(
res.request.path,
'/{}/'.format(preprint._id)
)
def test_preprint_provider_with_osf_domain(self):
provider = PreprintProviderFactory(_id='osf', domain='https://osf.io/')
preprint = PreprintFactory(provider=provider)
url = web_url_for('resolve_guid', _guid=True, guid=preprint._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(
res.request.path,
'/{}/'.format(preprint._id)
)
def test_deleted_quick_file_gone(self):
user = AuthUserFactory()
quickfiles = QuickFilesNode.objects.get(creator=user)
osfstorage = quickfiles.get_addon('osfstorage')
root = osfstorage.get_root()
test_file = root.append_file('soon_to_be_deleted.txt')
guid = test_file.get_guid(create=True)._id
test_file.delete()
url = web_url_for('resolve_guid', _guid=True, guid=guid)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, http_status.HTTP_410_GONE)
assert_equal(res.request.path, '/{}/'.format(guid))
class TestConfirmationViewBlockBingPreview(OsfTestCase):
def setUp(self):
super(TestConfirmationViewBlockBingPreview, self).setUp()
self.user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534+ (KHTML, like Gecko) BingPreview/1.0b'
# reset password link should fail with BingPreview
def test_reset_password_get_returns_403(self):
user = UserFactory()
osf_key_v2 = generate_verification_key(verification_type='password')
user.verification_key_v2 = osf_key_v2
user.verification_key = None
user.save()
reset_password_get_url = web_url_for(
'reset_password_get',
uid=user._id,
token=osf_key_v2['token']
)
res = self.app.get(
reset_password_get_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# new user confirm account should fail with BingPreview
def test_confirm_email_get_new_user_returns_403(self):
user = OSFUser.create_unconfirmed('unconfirmed@cos.io', 'abCD12#$', 'Unconfirmed User')
user.save()
confirm_url = user.get_confirmation_url('unconfirmed@cos.io', external=False)
res = self.app.get(
confirm_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# confirmation for adding new email should fail with BingPreview
def test_confirm_email_add_email_returns_403(self):
user = UserFactory()
user.add_unconfirmed_email('unconfirmed@cos.io')
user.save()
confirm_url = user.get_confirmation_url('unconfirmed@cos.io', external=False) + '?logout=1'
res = self.app.get(
confirm_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# confirmation for merging accounts should fail with BingPreview
def test_confirm_email_merge_account_returns_403(self):
user = UserFactory()
user_to_be_merged = UserFactory()
user.add_unconfirmed_email(user_to_be_merged.username)
user.save()
confirm_url = user.get_confirmation_url(user_to_be_merged.username, external=False) + '?logout=1'
res = self.app.get(
confirm_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# confirmation for new user claiming contributor should fail with BingPreview
def test_claim_user_form_new_user(self):
referrer = AuthUserFactory()
project = ProjectFactory(creator=referrer, is_public=True)
given_name = fake.name()
given_email = fake_email()
user = project.add_unregistered_contributor(
fullname=given_name,
email=given_email,
auth=Auth(user=referrer)
)
project.save()
claim_url = user.get_claim_url(project._primary_key)
res = self.app.get(
claim_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# confirmation for existing user claiming contributor should fail with BingPreview
def test_claim_user_form_existing_user(self):
referrer = AuthUserFactory()
project = ProjectFactory(creator=referrer, is_public=True)
auth_user = AuthUserFactory()
pending_user = project.add_unregistered_contributor(
fullname=auth_user.fullname,
email=None,
auth=Auth(user=referrer)
)
project.save()
claim_url = pending_user.get_claim_url(project._primary_key)
res = self.app.get(
claim_url,
auth = auth_user.auth,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# account creation confirmation for ORCiD login should fail with BingPreview
def test_external_login_confirm_email_get_create_user(self):
name, email = fake.name(), fake_email()
provider_id = fake.ean()
external_identity = {
'service': {
provider_id: 'CREATE'
}
}
user = OSFUser.create_unconfirmed(
username=email,
password=str(fake.password()),
fullname=name,
external_identity=external_identity,
)
user.save()
create_url = user.get_confirmation_url(
user.username,
external_id_provider='service',
destination='dashboard'
)
res = self.app.get(
create_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
# account linking confirmation for ORCiD login should fail with BingPreview
def test_external_login_confirm_email_get_link_user(self):
user = UserFactory()
provider_id = fake.ean()
user.external_identity = {
'service': {
provider_id: 'LINK'
}
}
user.add_unconfirmed_email(user.username, external_identity='service')
user.save()
link_url = user.get_confirmation_url(
user.username,
external_id_provider='service',
destination='dashboard'
)
res = self.app.get(
link_url,
expect_errors=True,
headers={
'User-Agent': self.user_agent,
}
)
assert_equal(res.status_code, 403)
if __name__ == '__main__':
unittest.main()
|
felliott/osf.io
|
tests/test_views.py
|
Python
|
apache-2.0
| 219,975
|
[
"Brian",
"VisIt"
] |
3172ce4aaf5ccf4e3a97c87e81a2b9f9faee81e12cd6fe92fb38b1ed1617c8ce
|
##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing DIRAC, implemented as an easyblock
"""
import os
import re
import shutil
import tempfile
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.run import run_cmd
class EB_DIRAC(CMakeMake):
"""Support for building/installing DIRAC."""
@staticmethod
def extra_options():
extra_vars = CMakeMake.extra_options()
extra_vars['separate_build_dir'][0] = True
return extra_vars
def configure_step(self):
"""Custom configuration procedure for DIRAC."""
# make very sure the install directory isn't there yet, since it may cause problems if it used (forced rebuild)
if os.path.exists(self.installdir):
self.log.warning("Found existing install directory %s, removing it to avoid problems", self.installdir)
try:
shutil.rmtree(self.installdir)
except OSError as err:
raise EasyBuildError("Failed to remove existing install directory %s: %s", self.installdir, err)
self.cfg.update('configopts', "-DENABLE_MPI=ON")
# complete configuration with configure_method of parent
super(EB_DIRAC, self).configure_step()
def test_step(self):
"""Custom built-in test procedure for DIRAC."""
if self.cfg['runtest']:
if not build_option('mpi_tests'):
self.log.info("Skipping testing of DIRAC since MPI testing is disabled")
return
# set up test environment
# see http://diracprogram.org/doc/release-14/installation/testing.html
env.setvar('DIRAC_TMPDIR', tempfile.mkdtemp(prefix='dirac-test-'))
env.setvar('DIRAC_MPI_COMMAND', self.toolchain.mpi_cmd_for('', self.cfg['parallel']))
# run tests (may take a while, especially if some tests take a while to time out)
self.log.info("Running tests may take a while, especially if some tests timeout (default timeout is 1500s)")
cmd = "make test"
out, ec = run_cmd(cmd, simple=False, log_all=False, log_ok=False)
# check that majority of tests pass
# some may fail due to timeout, but that's acceptable
# cfr. https://groups.google.com/forum/#!msg/dirac-users/zEd5-xflBnY/OQ1pSbuX810J
# over 90% of tests should pass
passed_regex = re.compile('^(9|10)[0-9.]+% tests passed', re.M)
if not passed_regex.search(out) and not self.dry_run:
raise EasyBuildError("Too many failed tests; '%s' not found in test output: %s",
passed_regex.pattern, out)
# extract test results
test_result_regex = re.compile(r'^\s*[0-9]+/[0-9]+ Test \s*#[0-9]+: .*', re.M)
test_results = test_result_regex.findall(out)
if test_results:
self.log.info("Found %d test results: %s", len(test_results), test_results)
elif self.dry_run:
# dummy test result
test_results = ["1/1 Test #1: dft_alda_xcfun ............................. Passed 72.29 sec"]
else:
raise EasyBuildError("Couldn't find *any* test results?")
test_count_regex = re.compile(r'^\s*[0-9]+/([0-9]+)')
res = test_count_regex.search(test_results[0])
if res:
test_count = int(res.group(1))
elif self.dry_run:
# a single dummy test result
test_count = 1
else:
raise EasyBuildError("Failed to determine total test count from %s using regex '%s'",
test_results[0], test_count_regex.pattern)
if len(test_results) != test_count:
raise EasyBuildError("Expected to find %s test results, but found %s", test_count, len(test_results))
# check test results, only 'Passed' or 'Timeout' are acceptable outcomes
faulty_tests = []
for test_result in test_results:
if ' Passed ' not in test_result:
self.log.warning("Found failed test: %s", test_result)
if '***Timeout' not in test_result:
faulty_tests.append(test_result)
if faulty_tests:
raise EasyBuildError("Found tests failing due to something else than timeout: %s", faulty_tests)
def sanity_check_step(self):
"""Custom sanity check for DIRAC."""
custom_paths = {
'files': ['bin/pam-dirac'],
'dirs': ['share/dirac'],
}
super(EB_DIRAC, self).sanity_check_step(custom_paths=custom_paths)
|
pescobar/easybuild-easyblocks
|
easybuild/easyblocks/d/dirac.py
|
Python
|
gpl-2.0
| 5,938
|
[
"DIRAC"
] |
2626a15799a59fe2c06aab332f672510fcc0bbebfa46c7427d7d0768c1f587b1
|
#!/usr/bin/python
import getopt
import sys
from Bio import SeqIO
from Bio.SeqUtils import GC
import time# import time, gmtime, strftime
import os
import shutil
import pandas
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import csv
from datetime import datetime
import numpy as np
from scipy import stats
__author__ = "Andriy Sheremet"
#Helper functions definitions
def genome_shredder(input_dct, shear_val):
shredded = {}
for key, value in input_dct.items():
#print input_dct[i].seq
#print i
dic_name = key
rec_name = value.name
for j in range(0, len(str(value.seq)), int(shear_val)):
# print j
record = str(value.seq)[0+j:int(shear_val)+j]
shredded[dic_name+"_"+str(j)] = SeqRecord(Seq(record),rec_name+"_"+str(j),'','')
#record = SeqRecord(input_ref_records[i].seq[0+i:int(shear_val)+i],input_ref_records[i].name+"_%i"%i,"","")
return shredded
def parse_contigs_ind(f_name):
"""
Returns sequences index from the input files(s)
remember to close index object after use
"""
handle = open(f_name, "rU")
record_dict = SeqIO.index(f_name,"fasta")
handle.close()
return record_dict
#returning specific sequences and overal list
def retrive_sequence(contig_lst, rec_dic):
"""
Returns list of sequence elements from dictionary/index of SeqIO objects specific to the contig_lst parameter
"""
contig_seqs = list()
#record_dict = rec_dic
#handle.close()
for contig in contig_lst:
contig_seqs.append(str(rec_dic[contig].seq))#fixing BiopythonDeprecationWarning
return contig_seqs
def filter_seq_dict(key_lst, rec_dic):
"""
Returns filtered dictionary element from rec_dic according to sequence names passed in key_lst
"""
return { key: rec_dic[key] for key in key_lst }
def unique_scaffold_topEval(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[11]<scaffolds[row[1]][11]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def unique_scaffold_topBits(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[12]>scaffolds[row[1]][12]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def close_ind_lst(ind_lst):
"""
Closes index objects supplied in input parameter list
"""
for index in ind_lst:
index.close()
def usage():
print "\nThis is the usage function\n"
# print 'Usage: '+sys.argv[0]+' -i <input_file> [-o <output>] [-l <minimum length>]'
# print 'Example: '+sys.argv[0]+' -i input.fasta -o output.fasta -l 100'
def main(argv):
#default parameters
mg_lst = []
ref_lst = []
e_val = 1e-5
alen = 50.0
alen_percent = True
alen_bp = False
iden = 95.0
name= "output"
fmt_lst = ["fasta"]
supported_formats =["fasta", "csv"]
iterations = 1
alen_increment = 5.0
iden_increment = 0.0
blast_db_Dir = ""
results_Dir = ""
input_files_Dir = ""
ref_out_0 = ""
blasted_lst = []
continue_from_previous = False #poorly supported, just keeping the directories
skip_blasting = False
debugging = False
sheared = False
shear_val = None
logfile = ""
try:
opts, args = getopt.getopt(argv, "r:m:n:e:a:i:s:f:h", ["reference=", "metagenome=", "name=", "e_value=", "alignment_length=", "identity=","shear=","format=", "iterations=", "alen_increment=", "iden_increment=","continue_from_previous","skip_blasting","debugging", "help"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
# elif opt in ("--recover_after_failure"):
# recover_after_failure = True
# print "Recover after failure:", recover_after_failure
elif opt in ("--continue_from_previous"):
continue_from_previous = True
if debugging:
print "Continue after failure:", continue_from_previous
elif opt in ("--debugging"):
debugging = True
if debugging:
print "Debugging messages:", debugging
elif opt in ("-r", "--reference"):
if arg:
ref_lst=arg.split(',')
#infiles = arg
if debugging:
print "Reference file(s)", ref_lst
elif opt in ("-m", "--metagenome"):
if arg:
mg_lst=arg.split(',')
#infiles = arg
if debugging:
print "Metagenome file(s)", mg_lst
elif opt in ("-f", "--format"):
if arg:
fmt_lst=arg.split(',')
#infiles = arg
if debugging:
print "Output format(s)", fmt_lst
elif opt in ("-n", "--name"):
if arg.strip():
name = arg
if debugging:
print "Project name", name
elif opt in ("-e", "--e_value"):
try:
e_val = float(arg)
except:
print "\nERROR: Please enter numerical value as -e parameter (default: 1e-5)"
usage()
sys.exit(1)
if debugging:
print "E value", e_val
elif opt in ("-a", "--alignment_length"):
if arg.strip()[-1]=="%":
alen_bp = False
alen_percent = True
else:
alen_bp = True
alen_percent = False
try:
alen = float(arg.split("%")[0])
except:
print "\nERROR: Please enter a numerical value as -a parameter (default: 50.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", alen
elif opt in ("-i", "--identity"):
try:
iden = float(arg)
except:
print "\nERROR: Please enter a numerical value as -i parameter (default: 95.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("-s", "--shear"):
sheared = True
try:
shear_val = int(arg)
except:
print "\nERROR: Please enter an integer value as -s parameter"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("--iterations"):
try:
iterations = int(arg)
except:
print "\nWARNING: Please enter integer value as --iterations parameter (using default: 1)"
if debugging:
print "Iterations: ", iterations
elif opt in ("--alen_increment"):
try:
alen_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --alen_increment parameter (using default: )", alen_increment
if debugging:
print "Alignment length increment: ", alen_increment
elif opt in ("--iden_increment"):
try:
iden_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --iden_increment parameter (using default: )", iden_increment
if debugging:
print "Alignment length increment: ", iden_increment
elif opt in ("--skip_blasting"):
skip_blasting = True
if debugging:
print "Blasting step omitted; Using previous blast output."
for ref_file in [x for x in ref_lst if x]:
try:
#
with open(ref_file, "rU") as hand_ref:
pass
except:
print "\nERROR: Reference File(s) ["+ref_file+"] doesn't exist"
usage()
sys.exit(1)
for mg_file in [x for x in mg_lst if x]:
try:
#
with open(mg_file, "rU") as hand_mg:
pass
except:
print "\nERROR: Metagenome File(s) ["+mg_file+"] doesn't exist"
usage()
sys.exit(1)
for fmt in [x for x in fmt_lst if x]:
if fmt not in supported_formats:
print "\nWARNING: Output format [",fmt,"] is not supported"
print "\tUse -h(--help) option for the list of supported formats"
fmt_lst=["fasta"]
print "\tUsing default output format: ", fmt_lst[0]
project_dir = name
if not continue_from_previous:
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
try:
os.mkdir(project_dir)
except OSError:
print "ERROR: Cannot create project directory: " + name
raise
print "\n\t Initial Parameters:"
print "\nProject Name: ", name,'\n'
print "Project Directory: ", os.path.abspath(name),'\n'
print "Reference File(s): ", ref_lst,'\n'
if sheared:
print "Shear Reference File(s):", str(shear_val)+"bp",'\n'
print "Metagenome File(s): ", mg_lst,'\n'
print "E Value: ", e_val, "\n"
if alen_percent:
print "Alignment Length: "+str(alen)+'%\n'
if alen_bp:
print "Alignment Length: "+str(alen)+'bp\n'
print "Sequence Identity: "+str(iden)+'%\n'
print "Output Format(s):", fmt_lst,'\n'
if iterations > 1:
print "Iterations: ", iterations, '\n'
print "Alignment Length Increment: ", alen_increment, '\n'
print "Sequence identity Increment: ", iden_increment, '\n'
#Initializing directories
blast_db_Dir = name+"/blast_db"
if not continue_from_previous:
if os.path.exists(blast_db_Dir):
shutil.rmtree(blast_db_Dir)
try:
os.mkdir(blast_db_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + blast_db_Dir
raise
results_Dir = name+"/results"
if not continue_from_previous:
if os.path.exists(results_Dir):
shutil.rmtree(results_Dir)
try:
os.mkdir(results_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + results_Dir
raise
input_files_Dir = name+"/input_files"
if not continue_from_previous:
if os.path.exists(input_files_Dir):
shutil.rmtree(input_files_Dir)
try:
os.mkdir(input_files_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + input_files_Dir
raise
# Writing raw reference files into a specific input filename
input_ref_records = {}
for reference in ref_lst:
ref_records_ind = parse_contigs_ind(reference)
#ref_records = dict(ref_records_ind)
input_ref_records.update(ref_records_ind)
ref_records_ind.close()
#input_ref_records.update(ref_records)
ref_out_0 = input_files_Dir+"/reference0.fna"
if (sheared & bool(shear_val)):
with open(ref_out_0, "w") as handle:
SeqIO.write(genome_shredder(input_ref_records, shear_val).values(), handle, "fasta")
#NO NEED TO CLOSE with statement will automatically close the file
else:
with open(ref_out_0, "w") as handle:
SeqIO.write(input_ref_records.values(), handle, "fasta")
# Making BLAST databases
#output fname from before used as input for blast database creation
input_ref_0 = ref_out_0
title_db = name+"_db"#add iteration functionality
outfile_db = blast_db_Dir+"/iteration"+str(iterations)+"/"+name+"_db"#change into for loop
os.system("makeblastdb -in "+input_ref_0+" -dbtype nucl -title "+title_db+" -out "+outfile_db+" -parse_seqids")
# BLASTing query contigs
if not skip_blasting:
print "\nBLASTing query file(s):"
for i in range(len(mg_lst)):
database = outfile_db # adjust for iterations
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
start = time.time()
os_string = 'blastn -db '+database+' -query \"'+mg_lst[i]+'\" -out '+blasted_lst[i]+" -evalue "+str(e_val)+" -outfmt 6 -num_threads 8"
#print os_string
os.system(os_string)
print "\t"+mg_lst[i]+"; Time elapsed: "+str(time.time()-start)+" seconds."
else:
for i in range(len(mg_lst)):
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
# Parsing BLAST outputs
blast_cols = ['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
recruited_mg=[]
for i in range(len(mg_lst)):
df = pandas.read_csv(blasted_lst[i] ,sep="\t", header=None)
df.columns=blast_cols
recruited_mg.append(df)
# print len(recruited_mg[0])
# print len(recruited_mg[1])
#creating all_records entry
#! Remember to close index objects after they are no longer needed
#! Use helper function close_ind_lst()
all_records = []
all_input_recs = parse_contigs_ind(ref_out_0)
# _ = 0
# for key, value in all_input_recs.items():
# _ +=1
# if _ < 20:
# print key, len(value)
print "\nIndexing metagenome file(s):"
for i in range(len(mg_lst)):
start = time.time()
all_records.append(parse_contigs_ind(mg_lst[i]))
print "\t"+mg_lst[i]+" Indexed in : "+str(time.time()-start)+" seconds."
# Transforming data
for i in range(len(mg_lst)):
#cutoff_contigs[dataframe]=evalue_filter(cutoff_contigs[dataframe])
recruited_mg[i]=unique_scaffold_topBits(recruited_mg[i])
contig_list = recruited_mg[i]['quid'].tolist()
recruited_mg[i]['Seq_nt']=retrive_sequence(contig_list, all_records[i])
recruited_mg[i]['Seq_size']=recruited_mg[i]['Seq_nt'].apply(lambda x: len(x))
recruited_mg[i]['Ref_size']=recruited_mg[i]['suid'].apply(lambda x: len(all_input_recs[str(x)]))
#recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/min(recruited_mg[i]['Seq_size'].apply(lambda y: y),recruited_mg[i]['Ref_size'].apply(lambda z: z))
#df.loc[:, ['B0', 'B1', 'B2']].min(axis=1)
recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/recruited_mg[i].loc[:,["Seq_size", "Ref_size"]].min(axis=1)
recruited_mg[i]['Metric']=recruited_mg[i]['Coverage']*recruited_mg[i]['iden']/100.0
try:
recruited_mg[i]['Seq_GC']=recruited_mg[i]['Seq_nt'].apply(lambda x: GC(x))
except:
recruited_mg[i]['Seq_GC']=recruited_mg[i]['Seq_nt'].apply(lambda x: None)
recruited_mg[i] = recruited_mg[i][['quid', 'suid', 'iden', 'alen','Coverage','Metric', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits','Ref_size','Seq_size','Seq_GC','Seq_nt']]
# Here would go statistics functions and producing plots
#
#
#
#
#
# Quality filtering before outputting
if alen_percent:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['Coverage']>=alen)&(recruited_mg[i]['eval']<=e_val)]
if alen_bp:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['alen']>=alen)&(recruited_mg[i]['eval']<=e_val)]
# print len(recruited_mg[0])
# print len(recruited_mg[1])
# Batch export to outfmt (csv and/or multiple FASTA)
alen_str = ""
iden_str = "_iden_"+str(iden)+"%"
if alen_percent:
alen_str = "_alen_"+str(alen)+"%"
if alen_bp:
alen_str = "_alen_"+str(alen)+"bp"
if iterations > 1:
prefix=name+"/results/"+name.split("/")[0]+"_iter_e_"+str(e_val)+iden_str+alen_str
else:
prefix=name+"/results/"+name.split("/")[0]+"_e_"+str(e_val)+iden_str+alen_str
if sheared:
prefix = prefix+'_sheared_'+str(shear_val)+"bp"
prefix = prefix + "_recruited_mg_"
#initializing log file data
logfile=name.split("/")[0]+"/results.log"
try:
run = int(name.split("/")[-1].split("_")[-1])# using "_" less depends on the wrapper script
except:
if name.split("/")[-1].split("_")[-1]==name:
run = 0
else:
print "Warning: Run identifier could not be written in: "+logfile
#sys.exit(1)
run = None
alen_header = "Min alen"
if alen_bp:
alen_header = alen_header+" (bp)"
if alen_percent:
alen_header = alen_header+" (%)"
shear_header = "Reference Shear (bp)"
shear_log_value = "None"
if sheared:
shear_log_value = str(shear_val)+"bp"
print "\nWriting files:"
for i in range(len(mg_lst)):
records= []
if "csv" in fmt_lst:
outfile1 = prefix+str(i)+".csv"
recruited_mg[i].to_csv(outfile1, sep='\t')
print str(len(recruited_mg[i]))+" sequences written to "+outfile1
if "fasta" in fmt_lst:
ids = recruited_mg[i]['quid'].tolist()
#if len(ids)==len(sequences):
for j in range(len(ids)):
records.append(all_records[i][ids[j]])
outfile2 = prefix+str(i)+".fasta"
with open(outfile2, "w") as output_handle:
SeqIO.write(records, output_handle, "fasta")
print str(len(ids))+" sequences written to "+outfile2
#Writing logfile
try:
time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
except:
print "Warning: Time identifier could not be written in: "+logfile
metagenome = mg_lst[i]
#contig info
sizes_lst = np.array(recruited_mg[i]['Seq_size'].tolist())
sizes_avg = np.mean(sizes_lst)
sizes_avg_std= np.std(sizes_lst)
sizes_avg_sem = stats.sem(sizes_lst, axis=0)
alen_lst = np.array(recruited_mg[i]['alen'].tolist())
alen_avg = np.mean(alen_lst)
alen_avg_std = np.std(alen_lst)
alen_avg_sem = stats.sem(alen_lst, axis=0)
iden_lst = np.array(recruited_mg[i]['iden'].tolist())
iden_avg = np.mean(iden_lst)
iden_avg_std = np.std(iden_lst)
iden_avg_sem = stats.sem(iden_lst, axis=0)
gc_lst = np.array(recruited_mg[i]['Seq_GC'].tolist())
gc_avg = np.mean(gc_lst)
gc_avg_std = np.std(gc_lst)
gc_avg_sem = stats.sem(gc_lst, axis=0)
log_header = ['Run','Project Name','Created', 'Reference(s)','Metagenome', 'No. Contigs', alen_header, "Min iden (%)", shear_header, "Mean Contig Size (bp)","STD Contig Size", "SEM Contig Size", "Mean Contig alen (bp)","STD Contig alen", "SEM Contig alen", "Mean Contig iden (bp)","STD Contig iden", "SEM Contig iden", "Mean Contig GC (%)","STD Contig GC","SEM Contig GC"]
log_row = [run,name.split("/")[0],time_str, ";".join(ref_lst), metagenome, len(ids), alen, iden, shear_log_value, sizes_avg,sizes_avg_std, sizes_avg_sem, alen_avg,alen_avg_std, alen_avg_sem, iden_avg,iden_avg_std, iden_avg_sem, gc_avg,gc_avg_std, gc_avg_sem]
if os.path.isfile(logfile):#file exists - appending
with open(logfile, "a") as log_handle:
log_writer = csv.writer(log_handle, delimiter='\t')
log_writer.writerow(log_row)
else:#no file exists - writing
with open(logfile,"w") as log_handle:
log_writer = csv.writer(log_handle, delimiter='\t')
log_writer.writerow(log_header)
log_writer.writerow(log_row)
close_ind_lst(all_records)
close_ind_lst([all_input_recs])
#run = 0
#all_records[i].close()# keep open if multiple iterations
#recruited_mg_1 = pandas.read_csv(out_name1 ,sep="\t", header=None)
#recruited_mg_1.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg_2 = pandas.read_csv(out_name2 ,sep="\t", header=None)
#recruited_mg_2.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg = [recruited_mg_1, recruited_mg_2]
# blast_db_Dir = ""
# results_Dir = ""
# input_files_Dir = ""
# parsed = SeqIO.parse(handle, "fasta")
#
# records = list()
#
#
# total = 0
# processed = 0
# for record in parsed:
# total += 1
# #print(record.id), len(record.seq)
# if len(record.seq) >= length:
# processed += 1
# records.append(record)
# handle.close()
#
# print "%d sequences found"%(total)
#
# try:
# output_handle = open(outfile, "w")
# SeqIO.write(records, output_handle, "fasta")
# output_handle.close()
# print "%d sequences written"%(processed)
# except:
# print "ERROR: Illegal output filename"
# sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
nyirock/mg_blast_wrapper
|
mg_blast_wrapper_v1.11.py
|
Python
|
mit
| 22,600
|
[
"BLAST"
] |
7467dc4faee4e4886ab5ac5338f0f49e5fe6e6ed22c41393b38fd056143d89df
|
tests=[
("python","UnitTestChem.py",{}),
("python","UnitTestChemv2.py",{}),
("python","UnitTestChemAtom.py",{}),
("python","UnitTestChemBond.py",{}),
("python","UnitTestChemSmarts.py",{}),
("python","UnitTestFragmentDescriptors.py",{}),
("python","UnitTestGraphDescriptors.2.py",{}),
("python","UnitTestLipinski.py",{}),
("python","MCS.py",{}),
("python","UnitTestMCS.py",{}),
("python","UnitTestOldBugs.py",{}),
("python","UnitTestSATIS.py",{}),
("python","UnitTestSmiles.py",{}),
("python","UnitTestSuppliers.py",{}),
("python","UnitTestSurf.py",{}),
("python","UnitTestMol3D.py",{}),
("python","FragmentMatcher.py",{}),
("python","MACCSkeys.py",{}),
("python","Descriptors.py",{}),
("python","UnitTestCatalog.py",{}),
("python","TemplateAlign.py",{}),
("python","Recap.py",{}),
("python","BRICS.py",{}),
("python","UnitTestDescriptors.py",{}),
("python","AllChem.py",{}),
("python","PropertyMol.py",{}),
("python","UnitTestInchi.py",{}),
("python","SaltRemover.py",{}),
("python","UnitTestFunctionalGroups.py",{}),
("python","UnitTestCrippen.py",{}),
("python","__init__.py",{}),
("python","PandasTools.py",{}),
("python","UnitTestPandasTools.py",{}),
("python","test_list.py",{'dir':'AtomPairs'}),
("python","test_list.py",{'dir':'ChemUtils'}),
("python","test_list.py",{'dir':'EState'}),
("python","test_list.py",{'dir':'FeatMaps'}),
("python","test_list.py",{'dir':'Fingerprints'}),
("python","test_list.py",{'dir':'Pharm2D'}),
("python","test_list.py",{'dir':'Pharm3D'}),
("python","test_list.py",{'dir':'Subshape'}),
("python","test_list.py",{'dir':'Suppliers'}),
("python","test_list.py",{'dir':'Scaffolds'}),
("python","test_list.py",{'dir':'Draw'}),
("python","test_list.py",{'dir':'Fraggle'}),
("python","test_list.py",{'dir':'SimpleEnum'}),
]
# only attempt the MolKey tests if we have the pre-reqs:
try:
from rdkit.Chem.MolKey import MolKey
tests.append(("python","test_list.py",{'dir':'MolKey'}))
except ImportError:
pass
longTests=[
("python","UnitTestArom.py",{}),
("python","UnitTestGraphDescriptors.2.py -l",{}),
("python","UnitTestSurf.py -l",{}),
]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
|
adalke/rdkit
|
rdkit/Chem/test_list.py
|
Python
|
bsd-3-clause
| 2,335
|
[
"RDKit"
] |
63ce74eb24328bda5c656769989de8026261cbb61abb1cfbc5d74583a7d247b1
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run a beam pipeline to evaluate our PDE models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import pandas
import os.path
from absl import app
from absl import flags
import apache_beam as beam
import numpy as np
from pde_superresolution import analysis
from pde_superresolution import duckarray
from pde_superresolution import equations
from pde_superresolution import integrate
from pde_superresolution import training
from pde_superresolution import xarray_beam
import tensorflow as tf
import xarray
# NOTE(shoyer): allow_override=True lets us import multiple binaries for the
# purpose of running integration tests. This is safe since we're strict about
# only using FLAGS inside main().
# files
flags.DEFINE_string(
'checkpoint_dir', '',
'Directory from which to load a trained model and save results.',
allow_override=True)
flags.DEFINE_string(
'exact_solution_path', '',
'Path from which to load the exact solution for an initial condition.',
allow_override=True)
flags.DEFINE_enum(
'equation_name', 'burgers', list(equations.CONSERVATIVE_EQUATION_TYPES),
'Equation to integrate.', allow_override=True)
flags.DEFINE_string(
'equation_kwargs', '',
'If provided, use these parameters instead of those on the saved equation.',
allow_override=True)
flags.DEFINE_string(
'samples_output_name', 'results.nc',
'Name of the netCDF file in checkpoint_dir to which to save samples.')
flags.DEFINE_string(
'mae_output_name', 'mae.nc',
'Name of the netCDF file in checkpoint_dir to which to save MAE results.')
flags.DEFINE_string(
'survival_output_name', 'survival.nc',
'Name of the netCDF file in checkpoint_dir to which to save survival '
'results.')
flags.DEFINE_string(
'stop_times', json.dumps([13, 15, 20, 25, 51, 103]),
'Cut-off times to use when calculating MAE.')
flags.DEFINE_string(
'quantiles', json.dumps([0.8, 0.9, 0.95]),
'Quantiles to use for "good enough".')
# integrate parameters
flags.DEFINE_integer(
'num_samples', 10,
'Number of times to integrate each equation.',
allow_override=True)
flags.DEFINE_float(
'time_max', 10,
'Total time for which to run each integration.',
allow_override=True)
flags.DEFINE_float(
'time_delta', 0.05,
'Difference between saved time steps in the integration.',
allow_override=True)
flags.DEFINE_float(
'warmup', 0,
'Amount of time to integrate before using the neural network.',
allow_override=True)
flags.DEFINE_string(
'integrate_method', 'RK23',
'Method to use for integration with scipy.integrate.solve_ivp.',
allow_override=True)
flags.DEFINE_float(
'exact_filter_interval', 0,
'Interval between periodic filtering. Only used for spectral methods.',
allow_override=True)
FLAGS = flags.FLAGS
_METRICS_NAMESPACE = 'finitediff/run_integrate'
def get_counter_metric(name):
return beam.metrics.Metrics.counter(_METRICS_NAMESPACE, name)
def count_start_finish(func, name=None):
"""Run a function with Beam metric counters for each start/finish."""
if name is None:
name = func.__name__
def wrapper(*args, **kwargs):
get_counter_metric('%s_started' % name).inc()
get_counter_metric('%s_in_progress' % name).inc()
results = func(*args, **kwargs)
get_counter_metric('%s_in_progress' % name).dec()
get_counter_metric('%s_finished' % name).inc()
return results
return wrapper
def main(_, runner=None):
if runner is None:
# must create before flags are used
runner = beam.runners.DirectRunner()
hparams = training.load_hparams(FLAGS.checkpoint_dir)
if FLAGS.equation_kwargs:
hparams.set_hparam('equation_kwargs', FLAGS.equation_kwargs)
def load_initial_conditions(path=FLAGS.exact_solution_path,
num_samples=FLAGS.num_samples):
ds = xarray_beam.read_netcdf(path)
initial_conditions = duckarray.resample_mean(
ds['y'].isel(time=0).data, hparams.resample_factor)
if np.isnan(initial_conditions).any():
raise ValueError('initial conditions cannot have NaNs')
if ds.sizes['sample'] != num_samples:
raise ValueError('invalid number of samples in exact dataset')
for seed in range(num_samples):
y0 = initial_conditions[seed, :]
assert y0.ndim == 1
yield (seed, y0)
def run_integrate(
seed_and_initial_condition,
checkpoint_dir=FLAGS.checkpoint_dir,
times=np.arange(0, FLAGS.time_max + FLAGS.time_delta, FLAGS.time_delta),
warmup=FLAGS.warmup,
integrate_method=FLAGS.integrate_method,
):
random_seed, y0 = seed_and_initial_condition
_, equation_coarse = equations.from_hparams(
hparams, random_seed=random_seed)
checkpoint_path = training.checkpoint_dir_to_path(checkpoint_dir)
differentiator = integrate.SavedModelDifferentiator(
checkpoint_path, equation_coarse, hparams)
solution_model, num_evals_model = integrate.odeint(
y0, differentiator, warmup+times, method=integrate_method)
results = xarray.Dataset(
data_vars={'y': (('time', 'x'), solution_model)},
coords={'time': warmup+times,
'x': equation_coarse.grid.solution_x,
'num_evals': num_evals_model,
'sample': random_seed})
return results
samples_path = os.path.join(FLAGS.checkpoint_dir, FLAGS.samples_output_name)
mae_path = os.path.join(FLAGS.checkpoint_dir, FLAGS.mae_output_name)
survival_path = os.path.join(FLAGS.checkpoint_dir, FLAGS.survival_output_name)
def finalize(
ds_model,
exact_path=FLAGS.exact_solution_path,
stop_times=json.loads(FLAGS.stop_times),
quantiles=json.loads(FLAGS.quantiles),
):
ds_model = ds_model.sortby('sample')
xarray_beam.write_netcdf(ds_model, samples_path)
# build combined dataset
ds_exact = xarray_beam.read_netcdf(exact_path)
ds = ds_model.rename({'y': 'y_model', 'x': 'x_low'})
ds['y_exact'] = ds_exact['y'].rename({'x': 'x_high'})
unified = analysis.unify_x_coords(ds)
# calculate MAE
results = []
for time_max in stop_times:
ds_sel = unified.sel(time=slice(None, time_max))
mae = abs(ds_sel.drop('y_exact') - ds_sel.y_exact).mean(
['x', 'time'], skipna=False)
results.append(mae)
dim = pandas.Index(stop_times, name='time_max')
mae_all = xarray.concat(results, dim=dim)
xarray_beam.write_netcdf(mae_all, mae_path)
# calculate survival
survival_all = xarray.concat(
[analysis.mostly_good_survival(ds, q) for q in quantiles],
dim=pandas.Index(quantiles, name='quantile'))
xarray_beam.write_netcdf(survival_all, survival_path)
pipeline = (
'create' >> beam.Create(range(1))
| 'load' >> beam.FlatMap(lambda _: load_initial_conditions())
| 'reshuffle' >> beam.Reshuffle()
| 'integrate' >> beam.Map(
count_start_finish(run_integrate, name='run_integrate'))
| 'combine' >> beam.CombineGlobally(xarray_beam.ConcatCombineFn('sample'))
| 'finalize' >> beam.Map(finalize)
)
runner.run(pipeline)
if __name__ == '__main__':
flags.mark_flag_as_required('checkpoint_dir')
app.run(main)
|
google/data-driven-discretization-1d
|
pde_superresolution/scripts/run_evaluation.py
|
Python
|
apache-2.0
| 7,931
|
[
"NetCDF"
] |
a41a5eca4fcc6850dd6b05f6cbcc7f09aed4bf8f13f487708f296012a3c9793d
|
"""
Test waiting for elements to appear after requesting via ajax.
"""
from bok_choy.promise import BrokenPromise
from bok_choy.web_app_test import WebAppTest
from .pages import AjaxPage, AjaxNoJQueryPage
class AjaxTest(WebAppTest):
"""
Test waiting for an ajax call to return.
"""
def setUp(self):
super().setUp()
self.ajax = AjaxPage(self.browser)
self.ajax.visit()
def test_ajax(self):
"""
Test retrieving a value from the DOM that
is populated by an ajax call.
"""
self.ajax.click_button()
self.ajax.wait_for_ajax()
assert self.ajax.output == "Loaded via an ajax call."
def test_ajax_too_slow(self):
"""
Test that a BrokenPromise is raised when the ajax requests take longer
than the timeout.
"""
# Pretend there are ajax requests pending.
self.ajax.browser.execute_script('jQuery.active=1')
with self.assertRaises(BrokenPromise) as exc:
self.ajax.wait_for_ajax(timeout=1)
self.assertEqual(
'Promise not satisfied: Finished waiting for ajax requests.',
exc.exception.__str__())
class AjaxNoJQueryTest(WebAppTest):
"""
Test waiting for a ajax on a page where jQuery isn't loaded.
"""
def setUp(self):
super().setUp()
self.ajax = AjaxNoJQueryPage(self.browser)
self.ajax.visit()
def test_ajax_with_slow_jquery(self):
"""
Test that a BrokenPromise is raised when jQuery is not defined on the
page.
"""
with self.assertRaises(BrokenPromise) as exc:
self.ajax.wait_for_ajax(timeout=1)
self.assertEqual(
'Promise not satisfied: Finished waiting for ajax requests.',
exc.exception.__str__())
|
edx/bok-choy
|
tests/test_ajax.py
|
Python
|
apache-2.0
| 1,832
|
[
"VisIt"
] |
8ab217c610a3795b7f034cd814c218536bcf07e6626e65c4dc28c7d2bee27f70
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# rfxcom documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 28 08:27:53 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import ast
import codecs
import os
import sys
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
class VersionFinder(ast.NodeVisitor):
def __init__(self):
self.version = None
def visit_Assign(self, node):
if getattr(node.targets[0], 'id', None) == '__version__':
self.version = node.value.s
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*parts):
finder = VersionFinder()
finder.visit(ast.parse(read(*parts)))
return finder.version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT)
sys.path.insert(0, BASE_DIR)
def gen_ref(ver, title, names):
names = ["__init__", ] + names
refdir = os.path.join(BASE_DIR, "ref")
pkg = "rfxcom"
if ver:
pkg = "%s.%s" % (pkg, ver)
refdir = os.path.join(refdir, ver)
if not os.path.exists(refdir):
os.makedirs(refdir)
idxpath = os.path.join(refdir, "index.rst")
with open(idxpath, "w") as idx:
idx.write(("%(title)s\n"
"%(signs)s\n"
"\n"
".. toctree::\n"
" :maxdepth: 1\n"
"\n") % {"title": title, "signs": "=" * len(title)})
for name in names:
idx.write(" %s\n" % name)
rstpath = os.path.join(refdir, "%s.rst" % name)
with open(rstpath, "w") as rst:
vals = {
"pkg": pkg, "name": name
}
rst.write(
"\n"
".. automodule:: %(pkg)s.%(name)s\n"
" :member-order: bysource\n"
" :members:\n"
" :undoc-members:\n"
" :show-inheritance:\n" % vals)
if not on_rtd:
gen_ref("", "rfxcom", ["exceptions", ])
gen_ref("protocol", "rfxcom.protocol", ["base", "elec", "lighting5",
"status", "temphumidity"])
gen_ref("transport", "rfxcom.transport", ["asyncio", "base"])
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'rfxcom'
copyright = '2014, Dougal Matthews'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = find_version("..", "..", "rfxcom", "__init__.py")
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rfxcomdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'rfxcom.tex', 'rfxcom Docs', 'Dougal Matthews', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rfxcom', 'rfxcom Documentation',
['Dougal Matthews'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index',
'rfxcom',
'rfxcom Documentation',
'Dougal Matthews',
'rfxcom',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- RTD Theme ------------------------------------------------------------
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
kalfa/python-rfxcom
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 10,681
|
[
"VisIt"
] |
becc134668ae1c65be1ffcc27028ee619f8b467cff96d9f32341f50e071dfb90
|
#!/usr/bin/env python
"""
Script that facilitates the modification of a element through the command line.
However, the usage of this script will set the element token to the command
issuer with a duration of 1 day.
"""
from datetime import datetime, timedelta
from DIRAC import gLogger, exit as DIRACExit, S_OK, version
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ResourceStatusSystem.Client import ResourceStatusClient
from DIRAC.ResourceStatusSystem.PolicySystem import StateMachine
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
subLogger = None
def registerSwitches():
"""
Registers all switches that can be used while calling the script from the
command line interface.
"""
switches = (
("element=", "Element family to be Synchronized ( Site, Resource or Node )"),
("name=", "Name (or comma-separeted list of names) of the element where the change applies"),
("statusType=", "StatusType (or comma-separeted list of names), if none applies to all possible statusTypes"),
("status=", "Status to be changed"),
("reason=", "Reason to set the Status"),
("VO=", "VO to change a status for. When omitted, status will be changed for all VOs"),
)
for switch in switches:
Script.registerSwitch("", switch[0], switch[1])
def registerUsageMessage():
"""
Takes the script __doc__ and adds the DIRAC version to it
"""
usageMessage = " DIRAC %s\n" % version
usageMessage += __doc__
Script.setUsageMessage(usageMessage)
def parseSwitches():
"""
Parses the arguments passed by the user
"""
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if args:
subLogger.error("Found the following positional args '%s', but we only accept switches" % args)
subLogger.error("Please, check documentation below")
Script.showHelp(exitCode=1)
switches = dict(Script.getUnprocessedSwitches())
switches.setdefault("statusType", None)
switches.setdefault("VO", None)
for key in ("element", "name", "status", "reason"):
if key not in switches:
subLogger.error("%s Switch missing" % key)
subLogger.error("Please, check documentation below")
Script.showHelp(exitCode=1)
if not switches["element"] in ("Site", "Resource", "Node"):
subLogger.error("Found %s as element switch" % switches["element"])
subLogger.error("Please, check documentation below")
Script.showHelp(exitCode=1)
statuses = StateMachine.RSSMachine(None).getStates()
if not switches["status"] in statuses:
subLogger.error("Found %s as element switch" % switches["element"])
subLogger.error("Please, check documentation below")
Script.showHelp(exitCode=1)
subLogger.debug("The switches used are:")
map(subLogger.debug, switches.items())
return switches
def checkStatusTypes(statusTypes):
"""
To check if values for 'statusType' are valid
"""
opsH = Operations().getValue("ResourceStatus/Config/StatusTypes/StorageElement")
acceptableStatusTypes = opsH.replace(",", "").split()
for statusType in statusTypes:
if statusType not in acceptableStatusTypes and statusType != "all":
acceptableStatusTypes.append("all")
subLogger.error(
"'%s' is a wrong value for switch 'statusType'.\n\tThe acceptable values are:\n\t%s"
% (statusType, str(acceptableStatusTypes))
)
if "all" in statusType:
return acceptableStatusTypes
return statusTypes
def unpack(switchDict):
"""
To split and process comma-separated list of values for 'name' and 'statusType'
"""
switchDictSet = []
names = []
statusTypes = []
if switchDict["name"] is not None:
names = list(filter(None, switchDict["name"].split(",")))
if switchDict["statusType"] is not None:
statusTypes = list(filter(None, switchDict["statusType"].split(",")))
statusTypes = checkStatusTypes(statusTypes)
if len(names) > 0 and len(statusTypes) > 0:
combinations = [(a, b) for a in names for b in statusTypes]
for combination in combinations:
n, s = combination
switchDictClone = switchDict.copy()
switchDictClone["name"] = n
switchDictClone["statusType"] = s
switchDictSet.append(switchDictClone)
elif len(names) > 0 and len(statusTypes) == 0:
for name in names:
switchDictClone = switchDict.copy()
switchDictClone["name"] = name
switchDictSet.append(switchDictClone)
elif len(names) == 0 and len(statusTypes) > 0:
for statusType in statusTypes:
switchDictClone = switchDict.copy()
switchDictClone["statusType"] = statusType
switchDictSet.append(switchDictClone)
elif len(names) == 0 and len(statusTypes) == 0:
switchDictClone = switchDict.copy()
switchDictClone["name"] = None
switchDictClone["statusType"] = None
switchDictSet.append(switchDictClone)
return switchDictSet
def getTokenOwner():
"""
Function that gets the userName from the proxy
"""
proxyInfo = getProxyInfo()
if not proxyInfo["OK"]:
return proxyInfo
userName = proxyInfo["Value"]["username"]
return S_OK(userName)
def setStatus(switchDict, tokenOwner):
"""
Function that gets the user token, sets the validity for it. Gets the elements
in the database for a given name and statusType(s). Then updates the status
of all them adding a reason and the token.
"""
rssClient = ResourceStatusClient.ResourceStatusClient()
elements = rssClient.selectStatusElement(
switchDict["element"],
"Status",
name=switchDict["name"],
statusType=switchDict["statusType"],
vO=switchDict["VO"],
meta={"columns": ["Status", "StatusType"]},
)
if not elements["OK"]:
return elements
elements = elements["Value"]
if not elements:
subLogger.warn(
"Nothing found for %s, %s, %s %s"
% (switchDict["element"], switchDict["name"], switchDict["VO"], switchDict["statusType"])
)
return S_OK()
tomorrow = datetime.utcnow().replace(microsecond=0) + timedelta(days=1)
for status, statusType in elements:
subLogger.debug("%s %s" % (status, statusType))
if switchDict["status"] == status:
subLogger.notice("Status for %s (%s) is already %s. Ignoring.." % (switchDict["name"], statusType, status))
continue
subLogger.debug(
"About to set status %s -> %s for %s, statusType: %s, VO: %s, reason: %s"
% (status, switchDict["status"], switchDict["name"], statusType, switchDict["VO"], switchDict["reason"])
)
result = rssClient.modifyStatusElement(
switchDict["element"],
"Status",
name=switchDict["name"],
statusType=statusType,
status=switchDict["status"],
reason=switchDict["reason"],
vO=switchDict["VO"],
tokenOwner=tokenOwner,
tokenExpiration=tomorrow,
)
if not result["OK"]:
return result
return S_OK()
def run(switchDict):
"""
Main function of the script
"""
tokenOwner = getTokenOwner()
if not tokenOwner["OK"]:
subLogger.error(tokenOwner["Message"])
DIRACExit(1)
tokenOwner = tokenOwner["Value"]
subLogger.notice("TokenOwner is %s" % tokenOwner)
result = setStatus(switchDict, tokenOwner)
if not result["OK"]:
subLogger.error(result["Message"])
DIRACExit(1)
@Script()
def main():
global subLogger
global registerUsageMessage
subLogger = gLogger.getSubLogger(__file__)
# Script initialization
registerSwitches()
registerUsageMessage()
switchDict = parseSwitches()
switchDictSets = unpack(switchDict)
# Run script
for switchDict in switchDictSets:
run(switchDict)
# Bye
DIRACExit(0)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/ResourceStatusSystem/scripts/dirac_rss_set_status.py
|
Python
|
gpl-3.0
| 8,341
|
[
"DIRAC"
] |
72e89bd6a5ede14c7a7c956fa62c09f073b9a7a7b23b8c856606337f90158cc0
|
import re
from nxtools import format_time
from firefly.core.metadata import meta_types
from firefly.qt import Qt, QColor, QPen, QFontMetrics
COLOR_CALENDAR_BACKGROUND = QColor("#161616")
COLOR_DAY_BACKGROUND = QColor("#323232")
TIME_PENS = [
(60, QPen(QColor("#999999"), 2, Qt.SolidLine)),
(15, QPen(QColor("#999999"), 1, Qt.SolidLine)),
(5, QPen(QColor("#444444"), 1, Qt.SolidLine)),
]
RUN_PENS = [
QPen(QColor("#dddd00"), 2, Qt.SolidLine),
QPen(QColor("#dd0000"), 2, Qt.SolidLine),
]
SECS_PER_DAY = 3600 * 24
MINS_PER_DAY = 60 * 24
SECS_PER_WEEK = SECS_PER_DAY * 7
SAFE_OVERRUN = 5 # Do not warn if overrun < 5 mins
CLOCKBAR_WIDTH = 45
def suggested_duration(dur):
adur = int(dur) + 360
g = adur % 300
return adur - g + 300 if g > 150 else adur - g
def text_shorten(text, font, target_width):
fm = QFontMetrics(font)
exps = [r"\W|_", r"[a-z]([aáeéěiíoóuůú])", r"[a-z]", r"."]
r = exps.pop(0)
text = text[::-1]
while fm.width(text) > target_width:
text, n = re.subn(r, "", text, 1)
if n == 0:
r = exps.pop(0)
return text[::-1]
def dump_template(calendar):
result = """<?xml version="1.0" encoding="utf-8" standalone="yes"?>\n"""
result += "<template>\n"
DAY_NAMES = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
days = [[], [], [], [], [], [], []]
for event in calendar.events:
week_offset = event["start"] - calendar.week_start_time
day = int(week_offset / (3600 * 24))
days[day].append(event)
for i, day in enumerate(days):
result += f" <!-- {DAY_NAMES[i]} -->\n"
result += " <day>\n"
for event in day:
clock = format_time(event["start"], "%H:%M")
result += f" <event time=\"{clock}\"> <!-- {event['title']} -->\n"
for key in event.meta:
if meta_types[key]["ns"] != "m":
continue
value = event[key]
if type(value) is str:
value = value.replace("&", "&")
result += f' <meta key="{key}">{value}</meta>\n'
result += " </event>\n"
result += " </day>\n"
result += "</template>"
return result
|
immstudios/firefly
|
firefly/modules/scheduler_utils.py
|
Python
|
gpl-3.0
| 2,372
|
[
"Firefly"
] |
07a3d23b4dfa23316e9c5377d3da8ced4dfd259f8dda289601742e1d0a3aa821
|
import pysam
import os
import sys
import re
Locations = sys.argv[1]
Fasta = sys.argv[2]
OutFile = sys.argv[3]
Size = sys.argv[4]
#Locations = "/lustre/mib-cri/carrol09/Work/PipelinePracticeSet/20111109_RossAdams_DN_HNF1bChIP/Peaks/Macs_Peaks/SLX-4497.739.s_2.bwa.homo_sapiens_Processed_summits.bed"
#Fasta = "/lustre/mib-cri/carrol09/Work/MyPipe/Genomes/GRCh37/homo_sapiens.fa"
#OutFile = "/lustre/mib-cri/carrol09/Work/PipelinePracticeSet/20111109_RossAdams_DN_HNF1bChIP/OutPutSequences.txt"
#print(Locations)
FastaFile = pysam.Fastafile(Fasta)
MyFasta = open(OutFile,'w')
bed = open(Locations,"r")
bedList = []
newSetTemp = []
bedListName = []
for range in bed:
ChompRange = range.rstrip("\n")
coords = re.split("\t",ChompRange)
newSetTemp = []
newSetTemp.append(coords[0])
newSetTemp.append(((float(coords[1])+float(coords[2]))/2)-int(Size))
newSetTemp.append(((float(coords[1])+float(coords[2]))/2)+int(Size))
newSetTemp.append(coords[3])
bedList.append(newSetTemp)
bed.close()
K= 0
Missed = 0
for region in bedList:
K=K+1
#print(str(region[0]),int(region[1]),int(region[2]))
Sequence = FastaFile.fetch(str(region[0]),int(region[1]),int(region[2]))
if len(Sequence) == (int(Size)*2):
#print(Sequence)
MyFasta.write(">"+region[3]+"\n")
MyFasta.write(Sequence+"\n")
else:
Missed = Missed+1
print("Peaks with no sequence found "+str(Missed)+"\n")
MyFasta.close()
|
hjanime/mrcchip
|
extractSequenceUnderPeak.py
|
Python
|
gpl-2.0
| 1,396
|
[
"BWA",
"pysam"
] |
d24633d1e812fcb27d8b8d417c6325978c3dca0c80913931c28d6a501b8b22ce
|
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
from setuptools import setup
def get_static_files(path):
return [os.path.join(dirpath.replace("luigi/", ""), ext)
for (dirpath, dirnames, filenames) in os.walk(path)
for ext in ["*.html", "*.js", "*.css", "*.png",
"*.eot", "*.svg", "*.ttf", "*.woff", "*.woff2"]]
luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/luigi>`_\n\n
"""
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
install_requires = [
'tornado>=4.0,<5',
'python-daemon<3.0',
]
if os.environ.get('READTHEDOCS', None) == 'True':
# So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
install_requires.append('sqlalchemy')
# readthedocs don't like python-daemon, see #1342
install_requires.remove('python-daemon<3.0')
install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py
setup(
name='luigi',
version='2.7.0',
description='Workflow mgmgt + task scheduling + dependency resolution',
long_description=long_description,
author='The Luigi Authors',
url='https://github.com/spotify/luigi',
license='Apache License 2.0',
packages=[
'luigi',
'luigi.contrib',
'luigi.contrib.hdfs',
'luigi.tools'
],
package_data={
'luigi': luigi_package_data
},
entry_points={
'console_scripts': [
'luigi = luigi.cmdline:luigi_run',
'luigid = luigi.cmdline:luigid',
'luigi-grep = luigi.tools.luigi_grep:main',
'luigi-deps = luigi.tools.deps:main',
'luigi-deps-tree = luigi.tools.deps_tree:main',
'luigi-migrate = luigi.tools.migrate:main'
]
},
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Monitoring',
],
)
|
republic-analytics/luigi
|
setup.py
|
Python
|
apache-2.0
| 3,092
|
[
"VisIt"
] |
6a9e76e9b78cdb744ae03e128ee1a9a3640b359e34876f4f7f3ede0f7b318664
|
# -*- coding: utf-8 -*-
##
# TRACK 8
# BODY LANGUAGE
# Brian Foo (brianfoo.com)
# This file builds the sequence file for use with ChucK from the data supplied
##
# Library dependancies
import csv
import json
import math
import os
from pprint import pprint
import sys
import time
# Config
BPM = 100 # Beats per minute, e.g. 60, 75, 100, 120, 150
DIVISIONS_PER_BEAT = 8 # e.g. 4 = quarter notes, 8 = eighth notes, etc
VARIANCE_MS = 20 # +/- milliseconds an instrument note should be off by to give it a little more "natural" feel
GAIN = 0.4 # base gain
TEMPO = 1.0 # base tempo
MS_PER_ARTIST = 12000
REGION_COUNT = 3 # number of artist's top-mentioned body regions to look at
PROBABILITY_MULITPLIER = 0.8 # make it more or less likely to say a body part
# Files
INSTRUMENTS_INPUT_FILE = 'data/instruments.csv'
ARTISTS_INPUT_FILE = 'data/artists.csv'
ANALYSIS_INPUT_FILE = 'data/analysis.json'
SUMMARY_OUTPUT_FILE = 'data/report_summary.csv'
SUMMARY_SEQUENCE_OUTPUT_FILE = 'data/report_sequence.csv'
INSTRUMENTS_OUTPUT_FILE = 'data/ck_instruments.csv'
SEQUENCE_OUTPUT_FILE = 'data/ck_sequence.csv'
VIZ_OUTPUT_FILE = 'visualization/data/visualization.json'
INSTRUMENTS_DIR = 'instruments/'
# Output options
WRITE_SEQUENCE = True
WRITE_REPORT = True
WRITE_VIZ = True
# Calculations
BEAT_MS = round(60.0 / BPM * 1000)
ROUND_TO_NEAREST = round(BEAT_MS / DIVISIONS_PER_BEAT)
BEATS_PER_ARTIST = round(MS_PER_ARTIST / BEAT_MS)
# Init
artist_sequence = []
artists = []
instruments = []
sequence = []
hindex = 0
hindex_instrument = 0
# For creating pseudo-random numbers
def halton(index, base):
result = 0.0
f = 1.0 / base
i = 1.0 * index
while(i > 0):
result += f * (i % base)
i = math.floor(i / base)
f = f / base
return result
# floor {n} to nearest {nearest}
def floorToNearest(n, nearest):
return 1.0 * math.floor(1.0*n/nearest) * nearest
# round {n} to nearest {nearest}
def roundToNearest(n, nearest):
return 1.0 * round(1.0*n/nearest) * nearest
# Read instruments from file
with open(INSTRUMENTS_INPUT_FILE, 'rb') as f:
r = csv.reader(f, delimiter=',')
next(r, None) # remove header
for file, artist, region, gender, from_gain, to_gain, from_tempo, to_tempo, tempo_offset, interval_phase, interval, interval_offset, active in r:
if int(active):
index = len(instruments)
# build instrument object
_beat_ms = int(round(BEAT_MS/TEMPO))
instrument = {
'index': index,
'file': INSTRUMENTS_DIR + file,
'artist': artist.decode('utf-8'),
'region': region,
'gender': gender,
'from_gain': float(from_gain) * GAIN,
'to_gain': float(to_gain) * GAIN,
'from_tempo': float(from_tempo) * TEMPO,
'to_tempo': float(to_tempo) * TEMPO,
'tempo_offset': float(tempo_offset),
'interval_ms': int(int(interval_phase)*_beat_ms),
'interval': int(interval),
'interval_offset': int(interval_offset),
'from_beat_ms': int(round(BEAT_MS/(float(from_tempo)*TEMPO))),
'to_beat_ms': int(round(BEAT_MS/(float(to_tempo)*TEMPO))),
'beat_ms': _beat_ms
}
# add instrument to instruments
instruments.append(instrument)
# Read artists from file
with open(ANALYSIS_INPUT_FILE) as data_file:
artists = json.load(data_file)
with open(ARTISTS_INPUT_FILE) as csvfile:
artist_sequence = csv.DictReader(csvfile)
# Re-order artists based on sequence
artists_temp = artists[:]
artists = []
for i, a in enumerate(artist_sequence):
artist = next(iter([_a for _a in artists_temp if a['name']==_a['artist'].encode('utf-8')]), None)
artist['index'] = i
artists.append(artist)
# Calculate total time
total_ms = len(artists) * MS_PER_ARTIST
total_seconds = int(1.0*total_ms/1000)
print('Main sequence time: '+time.strftime('%M:%S', time.gmtime(total_seconds)) + ' (' + str(total_seconds) + 's)')
print('Ms per beat: ' + str(BEAT_MS))
print('Beats per artist: ' + str(BEATS_PER_ARTIST))
# Multiplier based on sine curve
def getMultiplier(percent_complete, rad=1.0):
radians = percent_complete * (math.pi * rad)
multiplier = math.sin(radians)
if multiplier < 0:
multiplier = 0.0
elif multiplier > 1:
multplier = 1.0
return multiplier
# Retrieve gain based on current beat
def getGain(instrument, percent_complete):
multiplier = getMultiplier(percent_complete)
from_gain = instrument['from_gain']
to_gain = instrument['to_gain']
min_gain = min(from_gain, to_gain)
gain = multiplier * (to_gain - from_gain) + from_gain
gain = max(min_gain, round(gain, 2))
return gain
# Get beat duration in ms based on current point in time
def getBeatMs(instrument, percent_complete, round_to):
multiplier = getMultiplier(percent_complete)
from_beat_ms = instrument['from_beat_ms']
to_beat_ms = instrument['to_beat_ms']
ms = multiplier * (to_beat_ms - from_beat_ms) + from_beat_ms
ms = int(roundToNearest(ms, round_to))
return ms
# Return if the instrument should be played in the given interval
def isValidInterval(instrument, elapsed_ms):
interval_ms = instrument['interval_ms']
interval = instrument['interval']
interval_offset = instrument['interval_offset']
return int(math.floor(1.0*elapsed_ms/interval_ms)) % interval == interval_offset
# Add beats to sequence
def addBeatsToSequence(region, instrument, duration, ms, round_to):
global sequence
global hindex
global hindex_instrument
beat_ms = int(roundToNearest(instrument['beat_ms'], round_to))
offset_ms = int(instrument['tempo_offset'] * instrument['from_beat_ms'])
ms += offset_ms
previous_ms = int(ms)
from_beat_ms = instrument['from_beat_ms']
to_beat_ms = instrument['to_beat_ms']
min_ms = min(from_beat_ms, to_beat_ms)
remaining_duration = int(duration)
elapsed_duration = offset_ms
while remaining_duration >= min_ms:
elapsed_ms = int(ms)
elapsed_beat = int((elapsed_ms-previous_ms) / beat_ms)
percent_complete = 1.0 * elapsed_duration / duration
this_beat_ms = getBeatMs(instrument, percent_complete, round_to)
# add to sequence if in valid interval
if isValidInterval(instrument, elapsed_ms):
if instrument['region'] == 'all':
h_i = -1
else:
h_i = halton(hindex_instrument, 5)
hindex_instrument += 1
if h_i < region['value_n'] * PROBABILITY_MULITPLIER:
h = halton(hindex, 3)
variance = int(h * VARIANCE_MS * 2 - VARIANCE_MS)
sequence.append({
'instrument_index': instrument['index'],
'instrument': instrument,
'position': 0,
'rate': 1,
'gain': getGain(instrument, percent_complete),
'elapsed_ms': max([elapsed_ms + variance, 0]),
'duration': min([this_beat_ms, MS_PER_ARTIST])
})
hindex += 1
remaining_duration -= this_beat_ms
elapsed_duration += this_beat_ms
ms += this_beat_ms
# Build sequence
for i in instruments:
ms = 0
hindex_instrument = 0
# Go through each artist
for a in artists:
# Go through top x regions
regions = a['regions_agnostic'][:REGION_COUNT]
for r in regions:
if a['artist']==i['artist'] and (r['name']==i['region'] or i['region']=='all'):
addBeatsToSequence(r.copy(), i.copy(), MS_PER_ARTIST, ms, ROUND_TO_NEAREST)
ms += MS_PER_ARTIST
# Sort sequence
sequence = sorted(sequence, key=lambda k: k['elapsed_ms'])
# Add milliseconds to sequence
elapsed = 0
for i, step in enumerate(sequence):
sequence[i]['milliseconds'] = step['elapsed_ms'] - elapsed
elapsed = step['elapsed_ms']
# Write instruments to file
if WRITE_SEQUENCE and len(instruments) > 0:
with open(INSTRUMENTS_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
for index, instrument in enumerate(instruments):
w.writerow([index])
w.writerow([instrument['file']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote instruments to file: '+INSTRUMENTS_OUTPUT_FILE)
# Write sequence to file
if WRITE_SEQUENCE and len(sequence) > 0:
with open(SEQUENCE_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
for step in sequence:
w.writerow([step['instrument_index']])
w.writerow([step['position']])
w.writerow([step['gain']])
w.writerow([step['rate']])
w.writerow([step['milliseconds']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote sequence to file: '+SEQUENCE_OUTPUT_FILE)
# Write summary files
if WRITE_REPORT and len(sequence) > 0:
with open(SUMMARY_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
w.writerow(['Artist', 'Region', 'Percent'])
valid_regions = ['eye','face','hand','heart','mouth','foot','arm','butt','groin']
for a in artists:
for r in valid_regions:
region = next(iter([_r for _r in a['regions_agnostic'] if _r['name']==r]), None)
if region is None:
w.writerow([a['artist'].encode('utf-8'), r, 0])
else:
w.writerow([a['artist'].encode('utf-8'), r, 1.0 * region['value'] / a['value_count']])
others = sum([r['value'] for r in a['regions_agnostic'] if r['name'] not in valid_regions])
w.writerow([a['artist'].encode('utf-8'), 'other', 1.0 * others / a['value_count']])
print('Successfully wrote report to file: '+SUMMARY_OUTPUT_FILE)
with open(SUMMARY_SEQUENCE_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
w.writerow(['Time', 'Instrument', 'Gain'])
for step in sequence:
instrument = instruments[step['instrument_index']]
elapsed = step['elapsed_ms']
elapsed_f = time.strftime('%M:%S', time.gmtime(int(elapsed/1000)))
ms = int(elapsed % 1000)
elapsed_f += '.' + str(ms)
w.writerow([elapsed_f, instrument['file'], step['gain']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote sequence report to file: '+SUMMARY_SEQUENCE_OUTPUT_FILE)
def getDuration(wav_file):
duration = 0
with open(wav_file, "r") as f:
# read the ByteRate field from file (see the Microsoft RIFF WAVE file format)
# https://ccrma.stanford.edu/courses/422/projects/WaveFormat/
# ByteRate is located at the first 28th byte
f.seek(28)
a = f.read(4)
# convert string a into integer/longint value
# a is little endian, so proper conversion is required
byteRate = 0
for i in range(4):
byteRate = byteRate + ord(a[i]) * pow(256,i)
# get the file size in bytes
fileSize = os.path.getsize(wav_file)
# the duration of the data, in milliseconds, is given by
duration = (fileSize - 44) * 1000 / byteRate
return duration
if WRITE_VIZ and len(sequence) > 0:
# measure the durations of all audio files
files = set([i['file'] for i in instruments if i['region'] != 'all'])
file_durations = {}
for f in files:
d = getDuration(f)
file_durations[f] = d
# add ms to artists
ms = 0
for i, a in enumerate(artists):
artists[i]['start_ms'] = ms
artists[i]['end_ms'] = ms + MS_PER_ARTIST
artists[i]['instruments'] = []
ms += MS_PER_ARTIST
# build instrument sequence
for step in sequence:
i = step['instrument']
if i['file'] not in file_durations:
continue
duration = file_durations[i['file']]
# Retrieve artist
artist = next(iter([a for a in artists if i['artist']==a['artist']]), None)
# Determine instrument's regions
instrument_regions = []
if i['gender'] == 'both':
instrument_regions = ['female_' + i['region'], 'male_' + i['region']]
else:
instrument_regions = [i['gender'] + '_' + i['region']]
# Add instruments to artists
for r in instrument_regions:
artists[artist['index']]['instruments'].append({
'region': r,
'start_ms': step['elapsed_ms'],
'end_ms': step['elapsed_ms'] + duration
})
with open(VIZ_OUTPUT_FILE, 'w') as outfile:
json.dump(artists, outfile)
print('Successfully wrote viz data to file: '+VIZ_OUTPUT_FILE)
|
beefoo/music-lab-scripts
|
08_body/body.py
|
Python
|
mit
| 12,943
|
[
"Brian"
] |
26614904bae1d09bcb585508801324f5dfb77ccad576ba34f12415db359abb86
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pytest
from numpy.testing import assert_equal
import functools
import numpy as np
from MDAnalysis.tests.datafiles import (
TPR,
TPR400, TPR402, TPR403, TPR404, TPR405, TPR406, TPR407,
TPR450, TPR451, TPR452, TPR453, TPR454, TPR455, TPR455Double,
TPR460, TPR461, TPR502, TPR504, TPR505, TPR510, TPR510_bonded,
TPR2016, TPR2018, TPR2019B3, TPR2020B2, TPR2020, TPR2020Double,
TPR2021, TPR2021Double, TPR2022RC1,
TPR2016_bonded, TPR2018_bonded, TPR2019B3_bonded,
TPR2020B2_bonded, TPR2020_bonded, TPR2020_double_bonded,
TPR2021_bonded, TPR2021_double_bonded, TPR334_bonded,
TPR2022RC1_bonded,
TPR_EXTRA_2021, TPR_EXTRA_2020, TPR_EXTRA_2018,
TPR_EXTRA_2016, TPR_EXTRA_407, TPR_EXTRA_2022RC1,
XTC,
)
from MDAnalysisTests.topology.base import ParserBase
import MDAnalysis.topology.TPRParser
import MDAnalysis as mda
BONDED_TPRS = (
TPR510_bonded,
TPR2016_bonded,
TPR2018_bonded,
TPR2019B3_bonded,
TPR2021_bonded,
TPR2021_double_bonded,
TPR2020_bonded,
TPR2020_double_bonded,
TPR2022RC1_bonded,
TPR_EXTRA_2022RC1,
TPR_EXTRA_2021,
TPR_EXTRA_2020,
TPR_EXTRA_2018,
TPR_EXTRA_2016,
TPR_EXTRA_407,
)
class TPRAttrs(ParserBase):
parser = MDAnalysis.topology.TPRParser.TPRParser
expected_attrs = ['ids', 'names', 'elements',
'resids', 'resnames',
'moltypes', 'molnums', 'charges',
'bonds', 'angles', 'dihedrals', 'impropers']
def test_moltypes(self, top):
moltypes = top.moltypes.values
assert_equal(moltypes, self.ref_moltypes)
def test_molnums(self, top):
molnums = top.molnums.values
assert_equal(molnums, self.ref_molnums)
assert molnums.dtype == np.intp
class TestTPR(TPRAttrs):
"""
this test the data/adk_oplsaa.tpr which is of tpx version 58
"""
expected_n_atoms = 47681
expected_n_residues = 11302
expected_n_segments = 3
ref_moltypes = np.array(['AKeco'] * 214 + ['SOL'] * 11084 + ['NA+'] * 4,
dtype=object)
ref_molnums = np.array([0] * 214 + list(range(1, 1 + 11084 + 4)))
@pytest.fixture()
def filename(self):
return TPR
# The follow test the same system grompped by different version of gromacs
# FORMAT: TPRABC, where numbers ABC indicates the version of gromacs that
# generates the corresponding tpr file
class TestTPRGromacsVersions(TPRAttrs):
expected_n_atoms = 2263
expected_n_residues = 230
expected_n_segments = 2
ref_moltypes = np.array(['Protein_A'] * 129 + ['SOL'] * 101, dtype=object)
ref_molnums = np.array([0] * 129 + list(range(1, 1 + 101)))
@pytest.fixture(params=[TPR400, TPR402, TPR403, TPR404, TPR405, TPR406,
TPR407, TPR450, TPR451, TPR452, TPR453, TPR454,
TPR455, TPR502, TPR504, TPR505, TPR510, TPR2016,
TPR2018, TPR2019B3, TPR2020, TPR2020Double,
TPR2021, TPR2021Double, TPR2022RC1])
def filename(self, request):
return request.param
class TestTPRDouble(TPRAttrs):
expected_n_atoms = 21692
expected_n_residues = 4352
expected_n_segments = 7
ref_moltypes = np.array(['DOPC'] * 21 + ['DPPC'] * 10 + ['CHOL'] * 3
+ ['DOPC'] * 21 + ['DPPC'] * 10 + ['CHOL'] * 3
+ ['SOL'] * 4284,
dtype=object)
ref_molnums = np.arange(4352)
@pytest.fixture()
def filename(self):
return TPR455Double
class TestTPR46x(TPRAttrs):
expected_n_atoms = 44052
expected_n_residues = 10712
expected_n_segments = 8
ref_moltypes = np.array(['Protein_A'] * 27 + ['Protein_B'] * 27
+ ['Protein_C'] * 27 + ['Protein_D'] * 27
+ ['Protein_E'] * 27
+ ['SOL'] * 10530 + ['NA+'] * 26 + ['CL-'] * 21,
dtype=object)
ref_molnums = np.array([0] * 27 + [1] * 27 + [2] * 27 + [3] * 27 + [4] * 27
+ list(range(5, 5 + 10530 + 26 + 21)))
@pytest.fixture(params=[TPR460, TPR461])
def filename(self, request):
return request.param
def _test_is_in_topology(name, elements, topology_path, topology_section):
"""
Test if an interaction appears as expected in the topology
"""
post_40_potentials = {
'RESTRAINTPOT', 'RESTRANGLES', 'RESTRDIHS', 'CBTDIHS', 'PIDIHS',
}
if name in post_40_potentials and topology_path == TPR_EXTRA_407:
# The potential is not yet implemented in this version of gromacs
return
parser = MDAnalysis.topology.TPRParser.TPRParser(topology_path)
top = parser.parse()
for element in elements:
assert element in getattr(top, topology_section).values, \
'Interaction type "{}" not found'.format(name)
@pytest.mark.parametrize('topology', BONDED_TPRS)
@pytest.mark.parametrize('bond', (
('BONDS', [(0, 1)]),
('G96BONDS', [(1, 2)]),
('MORSE', [(2, 3)]),
('CUBICBONDS', [(3, 4)]),
('CONNBONDS', [(4, 5)]),
('HARMONIC', [(5, 6)]),
('FENEBONDS', [(6, 7)]),
('RESTRAINTPOT', [(7, 8)]),
('TABBONDS', [(8, 9)]),
('TABBONDSNC', [(9, 10)]),
('CONSTR', [(10, 11)]),
('CONSTRNC', [(11, 12)]),
))
def test_all_bonds(topology, bond):
"""Test that all bond types are parsed as expected"""
bond_type_in_topology = functools.partial(_test_is_in_topology,
topology_section='bonds')
bond_type, elements = bond
bond_type_in_topology(bond_type, elements, topology)
@pytest.mark.parametrize('topology', BONDED_TPRS)
@pytest.mark.parametrize('angle', (
('ANGLES', [(0, 1, 2)]),
('G96ANGLES', [(1, 2, 3)]),
('CROSS_BOND_BOND', [(2, 3, 4)]),
('CROSS_BOND_ANGLE', [(3, 4, 5)]),
('UREY_BRADLEY', [(4, 5, 6)]),
('QANGLES', [(5, 6, 7)]),
('RESTRANGLES', [(6, 7, 8)]),
('TABANGLES', [(7, 8, 9)]),
))
def test_all_angles(topology, angle):
angle_type_in_topology = functools.partial(_test_is_in_topology,
topology_section='angles')
angle_type, elements = angle
angle_type_in_topology(angle_type, elements, topology)
@pytest.mark.parametrize('topology', BONDED_TPRS)
@pytest.mark.parametrize('dih', (
('PDIHS', [(0, 1, 2, 3), (1, 2, 3, 4), (7, 8, 9, 10)]),
('RBDIHS', [(4, 5, 6, 7)]),
('RESTRDIHS', [(8, 9, 10, 11)]),
('CBTDIHS', [(9, 10, 11, 12)]),
('FOURDIHS', [(6, 7, 8, 9)]),
('TABDIHS', [(10, 11, 12, 13)]),
))
def test_all_dihedrals(topology, dih):
dih_type_in_topology = functools.partial(_test_is_in_topology,
topology_section='dihedrals')
dih_type, elements = dih
dih_type_in_topology(dih_type, elements, topology)
@pytest.mark.parametrize('topology', BONDED_TPRS)
@pytest.mark.parametrize('impr', (
('IDIHS', [(2, 3, 4, 5), (3, 4, 5, 6)]),
('PIDIHS', [(5, 6, 7, 8)])
))
def test_all_impropers(topology, impr):
impr_type_in_topology = functools.partial(_test_is_in_topology,
topology_section='impropers')
impr_type, elements = impr
impr_type_in_topology(impr_type, elements, topology)
@pytest.fixture(params=(
TPR400, TPR402, TPR403, TPR404, TPR405, TPR406, TPR407, TPR450, TPR451,
TPR452, TPR453, TPR454, TPR502, TPR504, TPR505, TPR510, TPR2016, TPR2018,
))
def bonds_water(request):
parser = MDAnalysis.topology.TPRParser.TPRParser(request.param).parse()
# The index of the first water atom is 1960
first = 1960
bonds = [
bond
for bond in parser.bonds.values
if bond[0] >= first and bond[1] >= first
]
return bonds
def test_settle(bonds_water):
# There are 101 water molecule with 2 bonds each
assert len(bonds_water) == 202
# The last index corresponds to the last water atom
assert bonds_water[-1][1] == 2262
@pytest.mark.parametrize('tpr_path, expected_exception', (
(TPR2020B2, IOError), # Gromacs 2020 beta see issue #2428
(TPR2020B2_bonded, IOError), # Gromacs 2020 beta see issue #2428
(TPR334_bonded, NotImplementedError), # Too old
(XTC, IOError), # Not a TPR file
))
def test_fail_for_unsupported_files(tpr_path, expected_exception):
parser = MDAnalysis.topology.TPRParser.TPRParser(tpr_path)
with pytest.raises(expected_exception):
parser.parse()
@pytest.mark.parametrize('tpr_path', BONDED_TPRS)
def test_no_elements(tpr_path):
"""
If the TPR does not contain element information, the element topology
attribute is not defined.
"""
parser = MDAnalysis.topology.TPRParser.TPRParser(tpr_path)
topology = parser.parse()
with pytest.raises(AttributeError):
_ = topology.elements
def test_elements():
tpr_path = TPR
parser = MDAnalysis.topology.TPRParser.TPRParser(tpr_path)
topology = parser.parse()
reference = np.array((
'H,C,H,H,C,H,H,H,C,H,H,H,C,O,N,H,C,H,C,H,H,C,H,C,H,H,H,C,H,H,'
'H,C,O,N,H,C,H,H,C,O,O,O,H,H,,O,H,H,,O,H,H,,O,H,H,,O,H,H,,O,H'
',H,,O,H,H,,O,H,H,,O,H,H,,O,H,H,,O,H,H,,O,H,H,,O,H,H,,O,H,H,,'
'O,H,H'
).split(','), dtype=object)
assert_equal(topology.elements.values[3300:3400], reference)
reference = np.array([
'O', 'H', 'H', '', 'O', 'H', 'H', '', 'O', 'H', 'H', '', 'O', 'H',
'H', '', 'Na', 'Na', 'Na', 'Na',
], dtype=object)
assert_equal(topology.elements.values[-20:], reference)
@pytest.mark.parametrize("resid_from_one,resid_addition", [
(False, 0),
(True, 1), # status quo for 2.x
])
def test_resids(resid_from_one, resid_addition):
u = mda.Universe(TPR, tpr_resid_from_one=resid_from_one)
resids = np.arange(len(u.residues)) + resid_addition
assert_equal(u.residues.resids, resids,
err_msg="tpr_resid_from_one kwarg not switching resids")
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/topology/test_tprparser.py
|
Python
|
gpl-2.0
| 11,188
|
[
"Gromacs",
"MDAnalysis"
] |
1a33136598350bad4c5740d8477f72b970160ac3cc25848cfc3c4c07fe427d37
|
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from ige import *
from xml.dom.minidom import Node
from ige.IObject import IObject
from ige.IDataHolder import IDataHolder
import Rules, Utils, ShipUtils
from Const import *
import math, random
from ige import log
class IPlanet(IObject):
typeID = T_PLANET
def init(self, obj):
IObject.init(self, obj)
#
obj.x = 0.0
obj.y = 0.0
obj.plDiameter = 0
obj.plType = u'-'
obj.plMin = 0
obj.plBio = 0
obj.plEn = 0
obj.plEnv = 0
obj.plSlots = 0
obj.plMaxSlots = 0
obj.plStratRes = 0
obj.plDisease = 0
obj.plStarting = 0
obj.orbit = 0
obj.storPop = 0
obj.slots = []
# storage
obj.storBio = 0
obj.storEn = 0
obj.minBio = Rules.colonyMinBio
obj.minEn = Rules.colonyMinEn
obj.maxBio = 0
obj.maxEn = 0
# changes/prod
obj.prodQueue = []
obj.changeBio = 0
obj.changeEn = 0
obj.changePop = 0
obj.changeEnv = 0
obj.prodProd = 0
obj.effProdProd = 0
obj.prodSci = 0
obj.effProdSci = 0
obj.unemployedPop = 0
# eating / housing
obj.popEatBio = 10
obj.popEatEn = 0
obj.maxPop = 0
# extra goodies
obj.solarmod = 0
obj.scannerPwr = 0
obj.signature = 75
obj.autoMinStor = 1
obj.morale = Rules.maxMorale
obj.changeMorale = 0.0
obj.moraleTrgt = 0.0
obj.revoltLen = 0
obj.combatExp = 0
obj.isMilitary = 0
obj.refuelMax = 0
obj.refuelInc = 0
obj.repairShip = 0.0
obj.upgradeShip = 0.0
obj.trainShipInc = 0
obj.trainShipMax = 0
obj.fleetSpeedBoost = 1.0
obj.ownerSince = 0
obj.shield = 0 #current planetary shield level
obj.maxShield = 0 #structural max sheild (best structure method)
obj.prevShield = 0 #previous turn's shield level (for client growth calculation)
def startConstruction(self, tran, obj, techID, quantity, targetID, isShip, reportFinished,
demolishStruct):
if len(obj.prodQueue) > Rules.maxProdQueueLen:
raise GameException('Queue is full.')
if quantity < 1:
raise GameException("Quantity must be greater than 0")
player = tran.db[obj.owner]
if not player.techs.has_key(techID) and isShip == 0:
raise GameException('You do not own this kind of technology.')
if not player.shipDesigns.has_key(techID) and isShip == 1:
raise GameException('You do not own this ship design.')
if targetID not in tran.db[obj.compOf].planets:
raise GameException('You can build only in the same system.')
if isShip:
tech = player.shipDesigns[techID]
if tech.upgradeTo:
raise GameException("You cannot build obsolete ship design.")
else:
tech = Rules.techs[techID]
if not (tech.isStructure or tech.isProject):
raise GameException('You cannot construct this technology.')
if not tech.validateConstrHandler(tran, obj, tran.db[targetID], tech):
raise GameException('Conditions for construction are not satisfied.')
neededSR = {}
for sr in tech.buildSRes:
if player.stratRes.get(sr, 0) < neededSR.get(sr, 0) + quantity:
raise GameException("You do not own required strategic resource(s)")
neededSR[sr] = neededSR.get(sr, 0) + quantity
# consume strategic resources
for sr in neededSR:
player.stratRes[sr] -= neededSR[sr]
# start construction
item = IDataHolder()
item.techID = techID
item.currProd = 0
item.currTurn = 0
item.quantity = int(quantity)
item.targetID = targetID
item.changePerc = 0
item.isShip = bool(isShip)
item.reportFin = bool(reportFinished)
item.demolishStruct = demolishStruct
item.type = T_TASK
obj.prodQueue.append(item)
return obj.prodQueue, player.stratRes
startConstruction.public = 1
startConstruction.accLevel = AL_FULL
def changeConstruction(self, tran, obj, index, quantity):
if index < 0 or index >= len(obj.prodQueue):
raise GameException("No such item in the construction queue.")
if quantity < 1:
raise GameException("Quantity must be greater than 0")
player = tran.db[obj.owner]
item = obj.prodQueue[index]
if item.isShip:
tech = player.shipDesigns[item.techID]
else:
tech = Rules.techs[item.techID]
quantityChange = quantity - obj.prodQueue[index].quantity
neededSR = {}
for sr in tech.buildSRes:
if player.stratRes.get(sr, 0) < neededSR.get(sr, 0) + quantityChange:
raise GameException("You do not own required strategic resource(s)")
neededSR[sr] = neededSR.get(sr, 0) + quantityChange
# consume strategic resources
for sr in neededSR:
player.stratRes[sr] += (-1 * neededSR[sr])
obj.prodQueue[index].quantity = quantity
return obj.prodQueue, player.stratRes
changeConstruction.public = 1
changeConstruction.accLevel = AL_FULL
def abortConstruction(self, tran, obj, index):
if index >= len(obj.prodQueue):
raise GameException('No such item in the construction queue.')
# Free strategic resources
player = tran.db[obj.owner]
item = obj.prodQueue[index]
if item.isShip:
tech = player.shipDesigns[item.techID]
else:
tech = Rules.techs[item.techID]
for sr in tech.buildSRes:
player.stratRes[sr] = player.stratRes.get(sr, 0) + item.quantity
# delete task
del obj.prodQueue[index]
return obj.prodQueue, player.stratRes
abortConstruction.public = 1
abortConstruction.accLevel = AL_FULL
def moveConstrItem(self, tran, obj, index, rel):
if index >= len(obj.prodQueue):
raise GameException('No such item in the construction queue.')
if index + rel < 0 or index + rel >= len(obj.prodQueue):
raise GameException('Cannot move.')
item = obj.prodQueue[index]
del obj.prodQueue[index]
obj.prodQueue.insert(index + rel, item)
return obj.prodQueue
moveConstrItem.public = 1
moveConstrItem.accLevel = AL_FULL
def changeOwner(self, tran, obj, ownerID, force = 0):
oldOwnerID = obj.owner
if obj.owner == ownerID:
# the owner is the same
return
elif obj.owner != OID_NONE and force == 0:
# this planet is already owned!
# TODO resolve conflict (based on player relations)
raise GameException('Planet is already owned by another commander.')
elif obj.owner != OID_NONE and force == 1:
# remove planet from old owner
try:
oldOwner = tran.db[obj.owner]
oldOwner.planets.remove(obj.oid)
if tran.db.has_key(obj.owner):
Utils.sendMessage(tran, obj, MSG_LOST_PLANET, obj.oid, None)
except Exception:
log.warning("Cannot remove planet from owner", obj.oid, obj.owner)
oldOwnerID = OID_NONE
# reset timer
obj.ownerSince = tran.db[OID_UNIVERSE].turn
# add planet to new owner's empire
if ownerID != OID_NONE:
newOwner = tran.db[ownerID]
newOwner.planets.append(obj.oid)
# reset some attributes
obj.owner = ownerID
obj.revoltLen = 0 # no revolt
obj.prodQueue = [] # clear production queue
if ownerID != OID_NONE:
# notify player
Utils.sendMessage(tran, obj, MSG_GAINED_PLANET, obj.oid, None)
changeOwner.public = 1
changeOwner.accLevel = AL_ADMIN
def setMinStorage(self, tran, obj, bio, en):
if bio < 0 or en < 0:
raise GameException('Values must be equal or greater than zero.')
obj.minBio = bio
obj.minEn = en
setMinStorage.public = 1
setMinStorage.accLevel = AL_FULL
def setAutoMinStorage(self, tran, obj, on):
if on != 0 and on != 1:
raise GameException('Must be 0 or 1')
obj.autoMinStor = on
return obj.autoMinStor
setAutoMinStorage.public = 1
setAutoMinStorage.accLevel = AL_FULL
def setStructOn(self, tran, obj, slotIdx, on):
if slotIdx >= len(obj.slots) or slotIdx < 0:
raise GameException('No such structure.')
if on:
obj.slots[slotIdx][STRUCT_IDX_STATUS] |= STRUCT_STATUS_ON
else:
obj.slots[slotIdx][STRUCT_IDX_STATUS] &= ~STRUCT_STATUS_ON
return obj.slots[slotIdx]
setStructOn.public = 1
setStructOn.accLevel = AL_FULL
def demolishStruct(self, tran, obj, slotIdx):
# TODO implement special button for demolishing structures when
# planet surrenders
#isCombat = tran.db[obj.compOf].combatCounter > 0
#if isCombat and len(obj.slots) < obj.plSlots:
# raise GameException("You cannot destroy this structure under fire - at least one slot is free.")
if slotIdx >= len(obj.slots) or slotIdx < 0:
raise GameException('No such structure.')
del obj.slots[slotIdx]
return obj.slots
demolishStruct.public = 1
demolishStruct.accLevel = AL_FULL
def moveStruct(self, tran, obj, slotIdx, rel):
if slotIdx >= len(obj.slots) or slotIdx < 0:
raise GameException('No such structure.')
if slotIdx + rel < 0 or slotIdx + rel >= len(obj.slots):
raise GameException('Cannot move.')
struct = obj.slots[slotIdx]
del obj.slots[slotIdx]
obj.slots.insert(slotIdx + rel, struct)
return obj.slots
moveStruct.public = 1
moveStruct.accLevel = AL_FULL
def processPRODPhase(self, tran, obj, data):
if obj.plType == "A":
self.cmd(obj).generateAsteroid(tran, obj)
# max storage
obj.maxPop = obj.plSlots * Rules.popPerSlot + Rules.popBaseStor
obj.maxBio = obj.plSlots * Rules.bioPerSlot + Rules.bioBaseStor
obj.maxEn = obj.plSlots * Rules.enPerSlot + Rules.enBaseStor
# refuel & repair
obj.refuelMax = 0
obj.refuelInc = 0
obj.repairShip = 0.0
obj.upgradeShip = 0.0
# train
obj.trainShipInc = 0
obj.trainShipMax = 0
obj.fleetSpeedBoost = 1.0
#
if obj.storPop <= 0 and not obj.slots and obj.owner == OID_NONE:
# do not process this planet
return
obj.scannerPwr = Rules.scannerMinPwr
obj.prodProd = obj.prodSci = 0
obj.changeBio = - obj.storBio
obj.changeEn = - obj.storEn
obj.changePop = - obj.storPop
obj.changeEnv = - obj.plEnv
obj.changeMorale = - obj.morale
# parent objects
system = tran.db[obj.compOf]
galaxy = tran.db[system.compOf]
# env. conditions
emrLevel = galaxy.emrLevel
# collect strategic resources
owner = tran.db.get(obj.owner, None)
if owner and obj.plStratRes != SR_NONE:
turn = tran.db[OID_UNIVERSE].turn
if turn % Rules.stratResRate == 0:
owner.stratRes[obj.plStratRes] = owner.stratRes.get(obj.plStratRes, 0) + 1
Utils.sendMessage(tran, obj, MSG_EXTRACTED_STRATRES, obj.oid, obj.plStratRes)
# compute moraleTrgt
if owner:
homePlanet = tran.db[owner.planets[0]]
dist = int(math.sqrt((homePlanet.x - obj.x) ** 2 + (homePlanet.y - obj.y) ** 2))
moraleTrgt = -37.5 * dist / owner.govPwrCtrlRange + 107.5
obj.moraleTrgt = max(Rules.minMoraleTrgt, min(moraleTrgt, Rules.maxMorale))
#@log.debug(obj.oid, "Morale target", obj.moraleTrgt, "dist", dist, owner.govPwrCtrlRange)
# auto regulation of min resources
if obj.autoMinStor:
obj.minBio = obj.minEn = 0
# combat?
isCombat = system.combatCounter > 0
obj.unemployedPop = obj.storPop
# ok, reset max pop
obj.maxPop = 0
# process all structures
destroyed = []
obj.maxShield = 0
obj.solarmod = 0
#@log.debug("Morale bonus/penalty for planet", obj.oid, moraleBonus)
for struct in obj.slots:
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
# compute struct effectivity
techEff = Utils.getTechEff(tran, struct[STRUCT_IDX_TECHID], obj.owner)
# morale does not affect hit points of structures
maxHP = int(tech.maxHP * techEff)
if maxHP < struct[STRUCT_IDX_HP]:
# damage structure
struct[STRUCT_IDX_HP] = max(maxHP, struct[STRUCT_IDX_HP] - int(maxHP * Rules.decayRatio))
# auto regulation of min resources
if obj.autoMinStor:
obj.minBio += tech.operBio * Rules.autoMinStorTurns
obj.minEn += tech.operEn * Rules.autoMinStorTurns
struct[STRUCT_IDX_STATUS] &= STRUCT_STATUS_RESETFLGS
# each structure accomodate it's workers
obj.maxPop += tech.operWorkers
# produce/consume resources
# find most limitating condition
try:
opStatus = min(1.0, float(struct[STRUCT_IDX_HP]) / maxHP)
except:
opStatus = 0.0
log.warning('Invalid max HP of structure', STRUCT_IDX_TECHID)
if tech.operBio > 0:
opStatus = min(opStatus, float(obj.storBio) / tech.operBio)
if tech.operEn > 0:
opStatus = min(opStatus, float(obj.storEn) / tech.operEn)
if tech.operWorkers > 0:
opStatus = min(opStatus, float(obj.unemployedPop) / tech.operWorkers)
if not struct[STRUCT_IDX_STATUS] & STRUCT_STATUS_ON:
opStatus = 0.0
struct[STRUCT_IDX_OPSTATUS] = int(100 * opStatus)
# solarmod effects ENV change and terraforming only if benificial
if tech.solarMod * opStatus > 0:
obj.solarmod = max(obj.solarmod,tech.solarMod * techEff * opStatus)
elif tech.solarMod * opStatus < 0:
obj.solarmod = min(obj.solarmod,tech.solarMod * techEff * opStatus)
#@log.debug("IPlanet - oper status", obj.oid, struct, opStatus)
# set status bits
if tech.operBio > obj.storBio: struct[STRUCT_IDX_STATUS] |= STRUCT_STATUS_NOBIO
if tech.operEn > obj.storEn: struct[STRUCT_IDX_STATUS] |= STRUCT_STATUS_NOEN
if tech.operWorkers > obj.unemployedPop: struct[STRUCT_IDX_STATUS] |= STRUCT_STATUS_NOPOP
# produce/consume
#@log.debug("Active structure", obj.oid, struct)
# bio
b, m, e, d = tech.prodBioMod
prodMod = (b * obj.plBio + m * obj.plMin + e * obj.plEn + d * 100) / 100
obj.storBio += int(tech.prodBio * prodMod * techEff * opStatus) - int(tech.operBio * opStatus)
# en
b, m, e, d = tech.prodEnMod
prodMod = (b * obj.plBio + m * obj.plMin + e * obj.plEn + d * 100) / 100
obj.storEn += int(tech.prodEn * prodMod * techEff * opStatus) - int(tech.operEn * opStatus)
obj.unemployedPop -= min(obj.unemployedPop, int(tech.operWorkers * opStatus))
obj.storPop += int(tech.prodPop * techEff * opStatus)
obj.scannerPwr = max(int(tech.scannerPwr * techEff * (2.0 - emrLevel) * opStatus), obj.scannerPwr)
# rebellion and combat has common penalty
b, m, e, d = tech.prodProdMod
prodMod = (b * obj.plBio + m * obj.plMin + e * obj.plEn + d * 100) / 100
obj.prodProd += int(tech.prodProd * prodMod * techEff * opStatus)
# science
b, m, e, d = tech.prodSciMod
prodMod = (b * obj.plBio + m * obj.plMin + e * obj.plEn + d * 100) / 100
obj.prodSci += int(tech.prodSci * prodMod * techEff * opStatus)
# refuelling & repairing
obj.refuelMax = max(obj.refuelMax, int(tech.refuelMax * techEff * opStatus))
if obj.revoltLen == 0 and not isCombat:
# refuelling
obj.refuelInc = max(obj.refuelInc, int(tech.refuelInc * techEff * opStatus))
# repair
obj.repairShip += tech.repairShip * techEff * opStatus
obj.upgradeShip += tech.upgradeShip * techEff * opStatus
# train
obj.trainShipMax = max(obj.trainShipMax, tech.trainShipMax)
obj.trainShipInc = max(obj.trainShipInc, tech.trainShipInc * techEff * opStatus)
# shielding
obj.maxShield = max(tech.planetShield * techEff * opStatus, obj.maxShield)
# stargates
obj.fleetSpeedBoost = max(obj.fleetSpeedBoost, tech.fleetSpeedBoost * techEff * opStatus)
# storage
obj.maxBio += int(tech.storBio * techEff)
obj.maxEn += int(tech.storEn * techEff)
obj.maxPop += int(tech.storPop * techEff)
obj.plEnv += int(tech.prodEnv * techEff * opStatus)
obj.moraleTrgt += tech.moraleTrgt * techEff * opStatus
# auto repair/damage
# also damage structures on not owned planets
if struct[STRUCT_IDX_HP] < maxHP and opStatus > 0.0:
struct[STRUCT_IDX_HP] = min(maxHP, struct[STRUCT_IDX_HP] + max(int(maxHP * Rules.repairRunningRatio), 1))
struct[STRUCT_IDX_STATUS] |= STRUCT_STATUS_REPAIRING
elif struct[STRUCT_IDX_HP] > maxHP or opStatus <= 0.0:
# flag only for non functional structure
if opStatus <= 0.0:
struct[STRUCT_IDX_STATUS] |= STRUCT_STATUS_DETER
# damage it a bit
struct[STRUCT_IDX_HP] -= max(1, int(maxHP * Rules.decayRatio))
if obj.storPop > 0:
# do not fall below 1% of HP for populated planets
struct[STRUCT_IDX_HP] = max(struct[STRUCT_IDX_HP], maxHP / 100)
if struct[STRUCT_IDX_HP] <= 0:
# destroy building only if there is no population
destroyed.append(struct)
# do shield self generation
obj.prevShield = obj.shield #for planet display of shield growth
if obj.maxShield < obj.shield:
obj.shield = obj.maxShield
if obj.maxShield > obj.shield and not isCombat:
regenTemp = max(1, Rules.plShieldRegen* obj.maxShield) #always regen at at least 1
obj.shield = min(obj.shield + regenTemp, obj.maxShield) #don't let it regen over shieldMax
# pass scanner/... to the system
#@log.debug(obj.oid, "IPlanet scanner", obj.scannerPwr)
system.scannerPwrs[obj.owner] = max(obj.scannerPwr, system.scannerPwrs.get(obj.owner, 0))
# destroy destroyed buildings
for struct in destroyed:
obj.slots.remove(struct)
# process population
if obj.storPop > 0:
# the reserve is needed
#obj.maxPop = int((obj.maxPop + getattr(owner, "techLevel", 1) * Rules.tlPopReserve) * Rules.maxPopReserve)
obj.maxPop = int(obj.maxPop * Rules.maxPopReserve)
obj.maxPop += int((obj.plSlots - len(obj.slots)) * getattr(owner, "techLevel", 1) * Rules.tlPopReserve)
# max pop
maxPop = obj.maxPop
if obj.popEatBio: maxPop = min(maxPop, 1000.0 * obj.storBio / obj.popEatBio)
if obj.popEatEn: maxPop = min(maxPop, 1000.0 * obj.storEn / obj.popEatEn)
maxPop = int(maxPop)
# eat
pop = obj.storPop / 1000.0
wantBio = int(math.ceil(pop * obj.popEatBio))
wantEn = int(math.ceil(pop * obj.popEatEn))
# auto regulation of min resources
if obj.autoMinStor:
obj.minBio += wantBio * Rules.autoMinStorTurns
obj.minEn += wantEn * Rules.autoMinStorTurns
# consume resources
obj.storBio -= min(obj.storBio, wantBio)
obj.storEn -= min(obj.storEn, wantEn)
# modify pop
if obj.storPop > maxPop:
# die
obj.storPop -= max(int((obj.storPop - maxPop) * Rules.popDieRate), Rules.popMinDieRate)
#if obj.storPop < maxPop: obj.storPop = maxPop
# do not generate this message when construction has been destroyed
# and do not lower morale too
if obj.storPop < obj.maxPop:
#@Utils.sendMessage(tran, obj, MSG_NOSUPPORT_POP, obj.oid, None)
obj.morale = max(obj.morale - Rules.moraleLostNoFood,0)
elif obj.storPop < maxPop:
# born
obj.storPop += max(min(int(obj.storPop * Rules.popGrowthRate), maxPop - obj.storPop), Rules.popMinGrowthRate)
# produce items in construction queue
if owner:
moraleBonus = Rules.moraleProdBonus[int(obj.morale / Rules.moraleProdStep)]
prod = obj.effProdProd = max(0, int(obj.prodProd * (owner.prodEff + moraleBonus)))
if (obj.morale > 15 and prod == 0 and obj.prodProd > 0 and owner.prodEff > 0): #added for super-low moral bonus issues
prod = obj.effProdProd = 1
else:
prod = obj.prodProd
index = 0
missing = [0, 0, 0, 0, 0]
idleProd = 0.0
while prod > 0 and index < len(obj.prodQueue):
item = obj.prodQueue[index]
# check if owner has this tech
if not item.isShip and item.techID not in owner.techs:
# bad tech
del obj.prodQueue[index]
# TODO send message
# set target
target = tran.db[item.targetID]
# set tech and build conditions
if item.isShip:
tech = tran.db[obj.owner].shipDesigns[item.techID]
mod = Rules.buildOnSamePlanetMod
else:
tech = Rules.techs[item.techID]
# check validity of the project
if not tech.validateConstrHandler(tran, obj, target, tech):
index += 1
# message to player
Utils.sendMessage(tran, obj, MSG_INVALID_TASK, obj.oid, item.techID)
continue
# building on other planet is more expensive
if item.targetID == obj.oid:
mod = Rules.buildOnSamePlanetMod
else:
mod = Rules.buildOnAnotherPlanetMod
# compute needs (do not consume resources under minimal storage)
wantProd = min(int(tech.buildProd * mod / tech.buildTurns - item.currProd), prod)
# production
item.changePerc = wantProd * 10000 / (tech.buildProd * mod)
# consume / produce
if item.techID == Rules.Tech.IDLETASK and item.isShip == 0:
idleProd += wantProd
prod -= wantProd
item.currProd += wantProd
# check, if production is complete
if item.currProd >= tech.buildProd * mod:
# item is complete
if item.isShip:
# find commander's fleet
fleet = None
# check if current system has any redirection
hasRedirection = obj.compOf in owner.shipRedirections
for fleetID in system.fleets:
tmpFleet = tran.db[fleetID]
if tmpFleet.owner == obj.owner and Utils.isIdleFleet(tmpFleet):
fleet = tmpFleet
break
if not fleet or hasRedirection:
fleet = self.new(T_FLEET)
tran.db.create(fleet)
self.cmd(fleet).create(tran, fleet, system, obj.owner)
self.cmd(fleet).addAction(tran, fleet, 0, FLACTION_REDIRECT, OID_NONE, None)
# add ships to the fleet
self.cmd(fleet).addNewShip(tran, fleet, item.techID)
if item.reportFin and item.quantity == 1:
Utils.sendMessage(tran, obj, MSG_COMPLETED_SHIP, obj.oid, item.techID)
elif tech.isStructure:
# if there is struct to demolish, find it and delete it
if item.demolishStruct != OID_NONE:
structToDemolish = None
for struct in target.slots:
if struct[STRUCT_IDX_TECHID] == item.demolishStruct:
structToDemolish = struct
break
if structToDemolish:
# struct found -- delete it
target.slots.remove(structToDemolish)
else:
# well, this can be a problem?
# shall we report it? (TODO: decide)
pass
if len(target.slots) < target.plSlots:
target.slots.append(Utils.newStructure(tran, item.techID, obj.owner))
try:
tech.finishConstrHandler(tran, obj, target, tech)
except Exception:
log.warning("Cannot execute finish constr handler")
if item.reportFin and item.quantity == 1:
Utils.sendMessage(tran, obj, MSG_COMPLETED_STRUCTURE, target.oid, item.techID)
else:
# no free slot!
Utils.sendMessage(tran, obj, MSG_CANNOTBUILD_NOSLOT, target.oid, None)
elif tech.isProject:
tech.finishConstrHandler(tran, obj, target, tech)
if item.reportFin and item.quantity == 1:
Utils.sendMessage(tran, obj, MSG_COMPLETED_PROJECT, target.oid, item.techID)
else:
raise GameException('Unsupported type of technology %d ' % item.techID)
# remove item from prod queue
item.quantity -= 1
if item.quantity == 0:
# remove item from the queue
del obj.prodQueue[index]
else:
# try to produce another item
item.currProd = 0
else:
# item is not complete stop production
index += 1
break
# decay items not currently produced
while index < len(obj.prodQueue):
item = obj.prodQueue[index]
item.currProd = max(0, int(item.currProd - max(item.currProd * Rules.decayRatio, 1)))
index += 1
# use excess raw CP to increase production elsewhere
prod += idleProd
#if obj.effProdProd > 0 and owner:
#owner.prodIncreasePool += float(prod) / obj.effProdProd * obj.prodProd
if prod > 0.0:
owner.prodIncreasePool += prod
#if prod > 1: # ignore rounding error
# # report wasting production points
# Utils.sendMessage(tran, obj, MSG_WASTED_PRODPTS, obj.oid, (prod,))
# auto environment changes
downgradeTo = Rules.planetSpec[obj.plType].downgradeTo
solarminus = 0
solarplus = 0
if obj.solarmod > 0:
solarplus = obj.solarmod
if obj.solarmod < 0:
solarminus = obj.solarmod
if not downgradeTo == None:
if (Rules.planetSpec[downgradeTo].upgradeEnReqs[0] > obj.plEn + solarplus) or (Rules.planetSpec[downgradeTo].upgradeEnReqs[1] < obj.plEn + solarminus):
# auto damage on plEn outside downgrade's upgrade range
obj.plEnv -= Rules.envAutoMod
if obj.plBio > Rules.planetSpec[obj.plType].maxBio:
# auto damage on plBio > maxBio of class # @log.debug('IPlanet', obj.oid, 'Env auto damage', obj.plType, obj.plBio, Rules.planetSpec[obj.plType].maxBio)
dEnv = int((obj.plBio - Rules.planetSpec[obj.plType].maxBio) * Rules.envAutoMod)
if obj.plEnv > 0:
obj.plEnv -= min(obj.plEnv, dEnv)
else:
obj.plEnv -= dEnv
# small chance of self-upgrading
spec = Rules.planetSpec[obj.plType]
if owner:
chance = int((obj.plBio - spec.maxBio) * Rules.envSelfUpgradeChance[owner.race])
else:
chance = int((obj.plBio - spec.maxBio) * Rules.envSelfUpgradeChance["H"])
if Utils.rand(0, 10001) < chance and spec.upgradeTo and \
obj.plEn + solarplus >= spec.upgradeEnReqs[0] and obj.plEn - solarminus <= spec.upgradeEnReqs[1]:
log.debug('IPlanet', obj.oid, 'Upgraded to', spec.upgradeTo)
obj.plType = spec.upgradeTo
Utils.sendMessage(tran, obj, MSG_UPGRADED_PLANET_ECO, obj.oid, spec.upgradeTo)
while obj.plEnv >= Rules.envInterval:
#@log.debug('IPlanet', obj.oid, 'Env improved')
obj.plEnv -= Rules.envInterval
obj.changeEnv += Rules.envInterval
if obj.plBio < 200: obj.plBio += 1
while obj.plEnv < 0:
if obj.plBio > 0:
obj.plBio -= 1
obj.plEnv += Rules.envInterval
obj.changeEnv -= Rules.envInterval
else:
obj.changeEnv += obj.plEnv
obj.plEnv = 0
# downgrade planet if necessary
if obj.plBio < Rules.planetSpec[obj.plType].minBio:
downgradeTo = Rules.planetSpec[obj.plType].downgradeTo
if downgradeTo:
log.debug('IPlanet', obj.oid, 'Downgraded to', downgradeTo)
obj.plType = downgradeTo
Utils.sendMessage(tran, obj, MSG_DOWNGRADED_PLANET_ECO, obj.oid, downgradeTo)
# record changes
obj.changeBio += obj.storBio
obj.changeEn += obj.storEn
obj.changePop += obj.storPop
obj.changeEnv += obj.plEnv
# auto regulation of min resources
if obj.autoMinStor:
obj.minBio = min(obj.minBio, obj.maxBio / 2)
obj.minEn = min(obj.minEn, obj.maxEn / 2)
# science
if owner:
moraleBonus = Rules.moraleProdBonus[int(obj.morale / Rules.moraleProdStep)]
obj.effProdSci = max(0, int(obj.prodSci * (owner.sciEff + moraleBonus)))
owner.sciPoints += obj.effProdSci
# planet with no population cannot have an owner
# and planet with no owner cannot have population
if (obj.storPop <= 0 and obj.owner != OID_NONE) or obj.owner == OID_NONE:
# TODO: remove
#if obj.owner != OID_NONE:
# # send message
# Utils.sendMessage(tran, obj, MSG_LOST_PLANET, obj.oid, None)
# remove this planet from owner's planets
self.cmd(obj).changeOwner(tran, obj, OID_NONE, force = 1)
obj.storPop = 0
processPRODPhase.public = 1
processPRODPhase.accLevel = AL_ADMIN
def processACTIONPhase(self, tran, obj, data):
return
processACTIONPhase.public = 1
processACTIONPhase.accLevel = AL_ADMIN
def processFINALPhase(self, tran, obj, data):
if obj.storPop <= 0 and not obj.slots and obj.owner == OID_NONE:
# do not process this planet
return
# morale
system = tran.db[obj.compOf]
galaxy = tran.db[system.compOf]
if galaxy.timeEnabled and not galaxy.timeStopped:
# too much population affects morale (if there is more than base population)
if obj.storPop > Rules.moraleBasePop:
obj.moraleTrgt -= Rules.moraleHighPopPenalty * obj.storPop / Rules.moraleBasePop
elif obj.storPop <= Rules.moraleLowPop:
obj.moraleTrgt += Rules.moraleLowPopBonus
obj.moraleTrgt = max(0.0, min(obj.moraleTrgt, Rules.maxMorale))
if obj.morale > int(obj.moraleTrgt):
obj.morale -= max(1.0, (obj.morale - obj.moraleTrgt) * Rules.moraleChngPerc)
elif obj.morale < int(obj.moraleTrgt) and system.combatCounter == 0:
obj.morale += max(1.0, (obj.moraleTrgt - obj.morale) * Rules.moraleChngPerc)
#@log.debug('IPlanet', 'Mor Mor trgt/reb thr', obj.morale, obj.moraleTrgt)
# revolt?
if obj.revoltLen > 0:
obj.revoltLen += 1
if obj.morale < Rules.revoltThr and obj.owner != OID_NONE and obj.revoltLen == 0:
chance = (Rules.revoltThr - obj.morale) * Rules.moralePerPointChance
#@log.debug('IPlanet', 'Start revolt? mor, mor trgt, reb thr, chance', obj.morale, obj.moraleTrgt, chance)
if Utils.rand(0, 101) <= chance:
# rebelion starts
#@log.debug('IPlanet', 'Revolt on', obj.oid)
obj.revoltLen = 1
Utils.sendMessage(tran, obj, MSG_REVOLT_STARTED, obj.oid, None)
elif obj.revoltLen > 0 and obj.morale > Rules.revoltThr:
chance = (obj.morale - Rules.revoltThr) * Rules.moralePerPointChance
#@log.debug('IPlanet', 'Stop revolt? mor, mor trgt, reb thr, chance', obj.morale, obj.moraleTrgt, chance)
if Utils.rand(0, 101) <= chance:
# revolt ends
obj.revoltLen = 0
Utils.sendMessage(tran, obj, MSG_REVOLT_ENDED, obj.oid, None)
obj.morale = max(0.0, min(Rules.maxMorale, obj.morale))
obj.changeMorale += obj.morale
# when rebelling destroy some resources
if obj.revoltLen > 0:
obj.storBio -= int(obj.storBio * Rules.revoltDestrBio)
obj.storEn -= int(obj.storEn * Rules.revoltDestrEn)
# storage
obj.storBio = min(obj.storBio, obj.maxBio)
obj.storEn = min(obj.storEn, obj.maxEn)
#obj.storPop = min(obj.storPop, obj.maxPop) TODO remove
# collect stats
if obj.owner != OID_NONE:
player = tran.db[obj.owner]
player.stats.storPop += obj.storPop
player.stats.prodProd += obj.prodProd
player.stats.effProdProd += obj.effProdProd
player.stats.prodSci += obj.prodSci
player.stats.effProdSci += obj.effProdSci
player.stats.structs += len(obj.slots)
player.stats.slots += obj.plSlots
galaxyID = tran.db[obj.compOf].compOf
if galaxyID not in player.galaxies:
player.galaxies.append(galaxyID)
# morale computation
homePlanet = tran.db[player.planets[0]]
dist = int(math.sqrt((homePlanet.x - obj.x) ** 2 + (homePlanet.y - obj.y) ** 2))
player.tmpPopDistr[dist] = player.tmpPopDistr.get(dist, 0) + obj.storPop
processFINALPhase.public = 1
processFINALPhase.accLevel = AL_ADMIN
def getScanInfos(self, tran, obj, scanPwr, player):
if scanPwr >= Rules.level1InfoScanPwr:
result = IDataHolder()
result._type = T_SCAN
result.scanPwr = scanPwr
result.oid = obj.oid
result.signature = obj.signature
result.type = obj.type
result.orbit = obj.orbit
result.compOf = obj.compOf
result.x = obj.x
result.y = obj.y
result.plType = obj.plType
if scanPwr >= Rules.level2InfoScanPwr:
result.plDiameter = obj.plDiameter
if getattr(obj, "plType", 'X') != 'G':
result.plMin = obj.plMin
result.plBio = obj.plBio
result.plEn = obj.plEn
result.plSlots = obj.plSlots
result.plStratRes = obj.plStratRes
result.plMaxSlots = obj.plMaxSlots
if scanPwr >= Rules.level3InfoScanPwr:
result.name = obj.name
result.storPop = obj.storPop
result.owner = obj.owner
if scanPwr >= Rules.level4InfoScanPwr:
# TODO provide less information
result.hasRefuel = (obj.refuelInc > 0) #simple detect if docks exist for problems dialog
result.slots = obj.slots
result.shield = obj.shield
result.prevShield = -1
result.maxShield = -1
if scanPwr >= Rules.partnerScanPwr:
result.maxShield = obj.maxShield
result.prevShield = obj.prevShield
result.refuelMax = obj.refuelMax
result.refuelInc = obj.refuelInc
result.scannerPwr = obj.scannerPwr
result.trainShipInc = obj.trainShipInc
result.trainShipMax = obj.trainShipMax
result.upgradeShip = obj.upgradeShip
result.repairShip = obj.repairShip
result.fleetSpeedBoost = obj.fleetSpeedBoost
return [result]
def loadDOMNode(self, tran, obj, xoff, yoff, orbit, node):
obj.x = xoff
obj.y = yoff
obj.orbit = orbit
for elem in node.childNodes:
if elem.nodeType == Node.ELEMENT_NODE:
name = elem.tagName
if name == 'properties':
self.loadDOMAttrs(obj, elem)
elif name == 'startingpoint':
galaxy = tran.db[tran.db[obj.compOf].compOf]
galaxy.startingPos.append(obj.oid)
galaxy.numOfStartPos += 1
else:
raise GameException('Unknown element %s' % name)
return SUCC
def update(self, tran, obj):
# clean up negative build queues and fix missing demolishStruct keys
loopAgain = True
while loopAgain:
deletedKey = False
for key in range(0,len(obj.prodQueue)):
item = obj.prodQueue[key]
if not hasattr(item, "demolishStruct"):
item.demolishStruct = OID_NONE
if item.quantity < 0:
log.warning("Deleting negative item queue on", obj.oid,"for player",obj.owner)
if item.isShip:
tech = player.shipDesigns[item.techID]
else:
tech = Rules.techs[item.techID]
player = tran.db[obj.owner]
for sr in tech.buildSRes:
player.stratRes[sr] = player.stratRes.get(sr, 0) + item.quantity #quantity negative, so subtracting strat resources
# del the bad item. Since this changes indicies, start the check over again on remaining items
deletedKey = True
del obj.prodQueue[key]
break
# no more bad entries found; break the while loop
if not deletedKey:
loopAgain = False
# remove in 0.5.34
for struct in obj.slots:
if len(struct) < 4:
# add oper status
struct.append(100)
# change owner to OID_NONE when owner is invalid
if obj.owner != OID_NONE:
player = tran.db.get(obj.owner, None)
if not player or player.type not in PLAYER_TYPES or obj.oid not in player.planets:
# TODO this can be a probem - this planet cannot be attacked!
log.warning("Changing owner to OID_NONE - invalid owner", obj)
self.cmd(obj).changeOwner(tran, obj, OID_NONE, force = 1)
# kill all population
obj.storPop = 0
return
# check compOf
if not tran.db.has_key(obj.compOf) or tran.db[obj.compOf].type != T_SYSTEM:
log.debug("CONSISTENCY invalid compOf for planet", obj.oid)
# fix signature
obj.signature = 75
update.public = 0
def deleteDesign(self, tran, obj, designID, keepWIP = 0):
# TODO: handle stategic resources
for task in obj.prodQueue[:]:
if task.isShip and task.techID == designID:
if task.currProd > 0 and keepWIP:
self.cmd(obj).changeConstruction(tran, obj, obj.procQueue.index(task), 1)
else:
self.cmd(obj).abortConstruction(tran, obj, obj.prodQueue.index(task))
deleteDesign.public = 0
def changeShipDesign(self, tran, obj, oldDesignID, newDesignID):
# TODO: handle strategic resources
for task in obj.prodQueue[:]:
if task.isShip and task.techID == oldDesignID:
task.techID = newDesignID
task.currProd = int(task.currProd / Rules.shipUpgradeMod)
changeShipDesign.public = 0
##
## Asteroids
##
def generateAsteroid(self, tran, obj):
return
assert obj.plType == "A"
#
modifier = pow(
max(Rules.asteroidMinPlMinAbund / 100.0, obj.plMin / 100.0),
Rules.asteroidModPwr,
)
# get probability
prob = Rules.asteroidGenerPerc * modifier
#@log.debug("Asteroids ?", prob, modifier, int(Rules.asteroidMinHP * modifier), int(Rules.asteroidMaxHP * modifier))
if prob < random.random():
# bad luck
return
# new asteroid - gener hit points and speed
hp = random.randrange(
int(Rules.asteroidMinHP * modifier),
int(Rules.asteroidMaxHP * modifier)
)
speed = Rules.asteroidMinSpeed + random.random() * \
(Rules.asteroidMaxSpeed - Rules.asteroidMinSpeed)
# position
system = tran.db[obj.compOf]
# select target
if Rules.asteroidTargetInSystem < random.random():
# TODO: target nearby system
objIDs = []
# pick one target (except this system)
while True:
systemID = random.choice(objIDs)
tmpSystem = tran.db[systemID]
if tmpSystem.type == T_SYSTEM and systemID != system.oid:
break
# select planet
targetID = random.choice(tmpSystem.planets)
else:
# select planet in this system
while True:
targetID = random.choice(system.planets)
if targetID != obj.oid:
# don't target yourself
break
# create asteroid
asteroid = self.new(T_ASTEROID)
tran.db.create(asteroid)
self.cmd(asteroid).create(tran, asteroid, system.x, system.y, targetID, speed, hp)
##
## Combat related functions
##
def getPreCombatData(self, tran, obj):
# scan buildings and fire their weapons
shots = {0: [], 1: [], 2: [], 3: []}
if obj.owner == OID_NONE:
return shots, [0, 0, 0, 8], False
player = tran.db[obj.owner]
system = tran.db[obj.compOf]
desCount = {}
firing = False
systemAtt = 0;
systemDef = 0;
for struct in obj.slots:
structTechID = struct[STRUCT_IDX_TECHID]
opStatus = struct[STRUCT_IDX_OPSTATUS] / 100.0
tech = Rules.techs[structTechID]
desCount[structTechID] = desCount.get(structTechID, 0) + 1
wpnCount = {}
if not tech.structWeapons:
continue
firing = True
for cClass in range(0, 4):
weaponID = player.planetWeapons[cClass]
if weaponID is None:
continue
weapon = Rules.techs[weaponID]
maxWeaponCount = int(tech.structWeapons[cClass] * opStatus)
for weaponIdx in range(0, maxWeaponCount):
#@log.debug(obj.oid, "FIRING PLANET WEAPON", weapon.name)
wpnCount[weaponID] = wpnCount.get(weaponID, 0) + 1
#
weaponEff = Rules.techImprEff[player.techs.get(weaponID, Rules.techBaseImprovement)]
# base attack
attack = tech.combatAtt + int(weapon.weaponAtt * weaponEff)
# because ALL counters starts at 1, subtract 3
count = system.combatCounter + desCount[structTechID] + wpnCount[weaponID] - 2
# add to attacks
#@log.debug('IPlanet', obj.oid, structTechID, "Count", count, 'Shots', weapon.name, ShipUtils.getRounds(weapon.weaponROF, count))
for round in xrange(0, ShipUtils.getRounds(weapon.weaponROF, count)):
shots[weapon.weaponClass].append((attack, weaponID))
# hit limit
obj.maxHits = len(obj.slots)
obj.hitCounter = 0
obj.lastHitClass = 3
obj.hitMod = 1.0
log.debug(obj.oid, "Combat settings", obj.maxHits)
# +1 means population only hit
return shots, [0, 0, 0, 8], firing
getPreCombatData.public = 0
def applyShot(self, tran, obj, defense, attack, weaponID, cClass, count):
#@log.debug('IPlanet', 'Apply shot', weaponID, attack, cClass, count)
# compute chance to hit
weapon = Rules.techs[weaponID]
#system defense bonus is dropped for planets...structures can't move; just calculate defense off structure defense
defense = Rules.combatStructDefense
destroyed = 0
dmg = 0
# limit number of shots
if weapon.weaponClass < obj.lastHitClass:
#@log.debug(obj.oid, "Different class", obj.lastHitClass, weapon.weaponClass, obj.maxHits)
obj.maxHits = int(Rules.combatHitXferMod * obj.maxHits * (obj.lastHitClass - weapon.weaponClass))
obj.hitCounter = int(Rules.combatHitXferMod * obj.hitCounter * (obj.lastHitClass - weapon.weaponClass))
obj.lastHitClass = weapon.weaponClass
if weapon.weaponROF > 1:
#@log.debug(obj.oid, "Increasing counter PL", 1.0 / weapon.weaponROF)
obj.hitCounter += 1.0 / weapon.weaponROF
else:
#@log.debug(obj.oid, "Increasing counter PL", 1)
obj.hitCounter += 1
if obj.hitCounter > obj.maxHits:
obj.hitCounter = 0
obj.hitMod *= Rules.combatStructureHitMod
#@log.debug(obj.oid, "Increasing hit penalty", obj.hitMod, obj.maxHits)
attackChance = obj.hitMod * attack / (attack + defense)
#@log.debug(obj.oid, "Chance to attack", attackChance, obj.hitMod, obj.hitCounter, obj.maxHits,
#@ "without penalty:", float(attack) / (attack + defense))
#@log.debug('IPlanet', obj.oid, 'HIT?', attack + defense + 1, defense)
absorb = 0 #for when it doesn't hit
if random.random() <= attackChance:
# hit
player = tran.db[obj.owner]
weaponEff = Rules.techImprEff[player.techs.get(weaponID, Rules.techBaseImprovement)]
dmg = ShipUtils.computeDamage(weapon.weaponClass, 3, weapon.weaponDmgMin, weapon.weaponDmgMax, weaponEff)
#@log.debug(obj.oid, 'HIT! att=%d vs def=%d, dmg=%d '% (attack, defense, dmg))
#shield strike
if obj.shield > 0:
absorb = min(dmg,obj.shield)
obj.shield -= absorb
dmg -= absorb
if dmg == 0:
return 0+absorb, 0, 3
# select slot
if count == 7 or not obj.slots:
#@log.debug('IPlanet', 'Population hit')
# population hit
if obj.storPop == 0:
dmg = 0
else:
# free slot hit -> dmg population
# OLD dmgPop = int(Rules.popPerSlot * float(dmg) / Rules.popSlotHP * Rules.popKillMod)
dmgPop = int(dmg * Rules.popSlotKillMod)
obj.storPop = max(obj.storPop - dmgPop, 0)
obj.changePop -= dmgPop
if obj.storPop > 0:
obj.morale -= Rules.moraleModPlHit * float(dmgPop) / float(obj.storPop)
#@log.debug('IPlanet', obj.oid, 'Morale penalty', dmg, maxHP, Rules.moraleModPlHit * float(dmg) / float(maxHP))
elif count < 0:
# TODO can be count negative?
log.warning('IPlanet', 'applyShot: count is negative')
else:
if count == 6:
# random structure hit
#@log.debug('IPlanet', 'Random structure hit')
struct = obj.slots[Utils.rand(0, len(obj.slots))]
else:
# most damaged structure hit
#@log.debug('IPlanet', 'Most damaged structure hit')
struct = obj.slots[-1]
for tmpStruct in obj.slots:
if tmpStruct[STRUCT_IDX_HP] <= struct[STRUCT_IDX_HP]:
struct = tmpStruct
# compute sum hp of all buildings
sumHP = 0
for tmpStruct in obj.slots:
sumHP += tmpStruct[STRUCT_IDX_HP]
# damage building
struct[STRUCT_IDX_HP] -= dmg
# "damage" population
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
# compute struct effectivity
techEff = Utils.getTechEff(tran, struct[STRUCT_IDX_TECHID], obj.owner)
maxHP = int(tech.maxHP * techEff)
dmgPop = int(tech.operWorkers * float(dmg) / maxHP * Rules.popKillMod)
obj.storPop = max(obj.storPop - dmgPop, 0)
obj.changePop -= dmgPop
# destroy building
if struct[STRUCT_IDX_HP] <= 0:
destroyed = 1
dmg += struct[STRUCT_IDX_HP]
obj.slots.remove(struct)
# compute morale penalty
if dmg:
obj.morale -= Rules.moraleModPlHit * float(dmg) / float(sumHP)
#@log.debug('IPlanet', obj.oid, 'Morale penalty', dmg, sumHP, Rules.moraleModPlHit * float(dmg) / float(sumHP))
#@log.debug('IPlanet', 'Shot applied', dmg, destroyed)
# when destroyed, only class 3 (structure) i valid
return dmg+absorb, destroyed, 3
applyShot.public = 0
def distributeExp(self, tran, obj):
# TODO - will buildings have exp? Answ: NO
if hasattr(obj, "maxHits"):
del obj.maxHits
del obj.hitCounter
del obj.lastHitClass
del obj.hitMod
distributeExp.public = 0
def surrenderTo(self, tran, obj, newOwnerID):
# morale is lost when this is called
obj.morale -= Rules.moraleLostWhenSurrender
if obj.morale >= Rules.revoltThr:
#@log.debug('IPlanet', 'Surrender - revolt thr not reached', obj.morale)
return 0
chance = (Rules.revoltThr - obj.morale) * Rules.moralePerPointChance
#@log.debug('IPlanet', 'Surrender? mor, mor trgt, reb thr, chance', obj.morale, obj.moraleTrgt, chance)
if Utils.rand(0, 101) > chance:
# do not surrender!
#@log.debug('IPlanet', 'Surrender - pure luck', obj.morale, obj.revoltLen)
return 0
# we've lost the battle - we have a new owner
#@log.debug('IPlanet', 'Surrender - surrending to', newOwnerID)
newOwner = tran.db[newOwnerID]
if newOwner.type == T_PIRPLAYER:
# special handling for pirates
currentTurn = tran.db[OID_UNIVERSE].turn
# prevent abuse - require 8 turns between capturing the same planet and require the owner to control the planet at least 2 turns if you want to gain fame & tech (two turns prevents orbiting pirate fleet from immediately bombing)
if (currentTurn - obj.lastPirCapture) > 8 and (currentTurn - obj.ownerSince) > 2:
# gain/lose fame
self.cmd(newOwner).capturePlanet(tran, newOwner, obj)
# steal ship techs
self.cmd(newOwner).stealTechs(tran, newOwner, obj.owner, obj.oid)
else:
log.debug(obj.oid, "Pirate captured planet too soon after previous capture or colonization to gain bonuses", obj.oid)
obj.storPop = 0
obj.lastPirCapture = currentTurn
self.cmd(obj).changeOwner(tran, obj, OID_NONE, force = 1)
else:
# change owner
self.cmd(obj).changeOwner(tran, obj, newOwnerID, force = 1)
# blow up all military buildings
for struct in obj.slots[:]:
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
if tech.isMilitary:
obj.slots.remove(struct)
return 1
surrenderTo.public = 0
|
mozts2005/OuterSpace
|
server/lib/ige/ospace/IPlanet.py
|
Python
|
gpl-2.0
| 45,601
|
[
"Galaxy"
] |
506020f7e27b07348091614f5ee4f6831e902dfff8b525aa3bd69be4fafd8a69
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
f = open(pathname, 'r')
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
finally:
f.close()
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'Godeps' in dirs:
dirs.remove('Godeps')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
for name in files:
if name.endswith(".svg"):
continue
if name.endswith(".gliffy"):
continue
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', 'third_party', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if "_" in result:
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
print("%s" % "\n".join(new_excluded_flags))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
print("%s" % "\n".join(new_flags))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
printf("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If this is a false positive add the following line(s) to hack/verify-flags/exceptions.txt:")
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
if __name__ == "__main__":
sys.exit(main())
|
lvlv/kubernetes
|
hack/verify-flags-underscore.py
|
Python
|
apache-2.0
| 8,102
|
[
"VisIt"
] |
8600a5e33e24c430b6bbf8e13da1471eb1f68d4c5edd4f403f022678cbc7cebf
|
import Avogadro
import unittest
from numpy import *
#
# Rings are Fragments...
#
class TestFragment(unittest.TestCase):
def setUp(self):
self.molecule = Avogadro.molecules.addMolecule()
def test_name(self):
fragment = self.molecule.addRing() # Ring = Fragment
fragment.name = "testing"
self.assertEqual(fragment.name, "testing")
def test_atom(self):
fragment = self.molecule.addRing() # Ring = Fragment
# create two atoms
atom1 = self.molecule.addAtom()
atom2 = self.molecule.addBond()
# add atom 2 to the fragment - test addAtom(id)
fragment.addAtom(atom2.id)
# test atoms()
self.assertEqual(len(fragment.atoms), 1)
self.assert_(atom2.id in fragment.atoms)
# test removeAtom(id)
fragment.removeAtom(atom2.id)
self.assertEqual(len(fragment.atoms), 0)
def test_bond(self):
fragment = self.molecule.addRing() # Ring = Fragment
# create two bonds
bond1 = self.molecule.addBond()
bond2 = self.molecule.addBond()
# add bond 2 to the fragment - test addBond(id)
fragment.addBond(bond2.id)
# test bonds()
self.assertEqual(len(fragment.bonds), 1)
self.assert_(bond2.id in fragment.bonds)
# test removeBond(id)
fragment.removeBond(bond2.id)
self.assertEqual(len(fragment.bonds), 0)
if __name__ == "__main__":
unittest.main()
|
rcplane/periodicdisplay
|
reference/avogadro/libavogadro/src/python/unittest/fragment.py
|
Python
|
gpl-2.0
| 1,357
|
[
"Avogadro"
] |
48b433017ae130ab540bf9abe42245f58318d7507782c309c33b3efe8b38da1f
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
from __future__ import print_function
from abc import abstractproperty
import MDAnalysis
import importlib
import numpy as np
from .atoms_maps import atoms_maps
from difflib import get_close_matches
class Layers(MDAnalysis.core.topologyattrs.AtomAttr):
"""Layers for each atom"""
attrname = 'layers'
singular = 'layer'
per_object = 'atom'
class Clusters(MDAnalysis.core.topologyattrs.AtomAttr):
"""Clusters for each atom"""
attrname = 'clusters'
singular = 'cluster'
per_object = 'atom'
class Sides(MDAnalysis.core.topologyattrs.AtomAttr):
"""Sides for each atom"""
attrname = 'sides'
singular = 'side'
per_object = 'atom'
def _create_property(property_name,
docstring=None,
readonly=False,
required=False):
def getter(self):
return self.__getattribute__('_' + property_name)
def setter(self, value):
self.__setattr__('_' + property_name, value)
if readonly is True:
setter = None
if required is False:
absprop = None
else:
absprop = abstractproperty(None)
return property(fget=getter, fset=setter, doc=docstring), absprop
def _missing_attributes(interface, universe):
interface._topologyattrs = importlib.import_module(
'MDAnalysis.core.topologyattrs')
_check_missing_attribute(interface, 'names', 'Atomnames', universe.atoms,
universe.atoms.ids.astype(str))
# NOTE _check_missing_attribute() relies on radii being set to np.nan
# if the attribute radii is not present
_check_missing_attribute(interface, 'radii', 'Radii', universe.atoms,
np.nan)
_check_missing_attribute(interface, 'tempfactors', 'Tempfactors',
universe.atoms, 0.0)
_check_missing_attribute(interface, 'bfactors', 'Bfactors', universe.atoms,
0.0)
_check_missing_attribute(interface, 'altLocs', 'AltLocs', universe.atoms,
' ')
_check_missing_attribute(interface, 'icodes', 'ICodes', universe.residues,
' ')
_check_missing_attribute(interface, 'occupancies', 'Occupancies',
universe.atoms, 1)
_check_missing_attribute(interface, 'elements', 'Elements', universe.atoms,
1)
_extra_attributes(interface, universe)
def _extra_attributes(interface, universe):
# we add here the new layer, cluster and side information
# they are not part of MDAnalysis.core.topologyattrs
attr = {'layers': Layers, 'clusters': Clusters, 'sides': Sides}
for key in attr.keys():
if key not in dir(universe.atoms):
vals = np.zeros(len(universe.atoms), dtype=np.int) - 1
universe.add_TopologyAttr(attr[key](vals))
def _check_missing_attribute(interface, name, classname, group, value):
""" Add an attribute, which is necessary for pytim but
missing from the present topology.
An example of how the code below would expand is:
if 'radii' not in dir(universe.atoms):
from MDAnalysis.core.topologyattrs import Radii
radii = np.zeros(len(universe.atoms)) * np.nan
universe.add_TopologyAttr(Radii(radii))
* MDAnalysis.core.topologyattrs -> self.topologyattrs
* Radii -> missing_class
* radii -> values
"""
universe = interface.universe
if name not in dir(universe.atoms):
missing_class = getattr(interface._topologyattrs, classname)
if isinstance(value, np.ndarray) or isinstance(value, list):
if len(value) == len(group):
values = np.array(value)
else:
raise RuntimeError("improper array/list length")
else:
values = np.array([value] * len(group))
universe.add_TopologyAttr(missing_class(values))
if name == 'elements':
types = MDAnalysis.topology.guessers.guess_types(group.names)
# is there an inconsistency in the way 'element' is defined
# different modules in MDA?
n0 = {'number': 0}
# Note: the second arg in .get() is the default.
group.elements = np.array(
[atoms_maps.get(t, n0)['number'] for t in types])
if name == 'radii':
guess_radii(interface)
def weighted_close_match(string, dictionary):
# increase weight of the first letter
# this fixes problems with atom names like CH12
_wdict = {}
_dict = dictionary
_str = string[0] + string[0] + string
for key in _dict.keys():
_wdict[key[0] + key[0] + key] = _dict[key]
m = get_close_matches(_str, _wdict.keys(), n=1, cutoff=0.1)[0]
return m[2:]
def _guess_radii_from_masses(interface, group, guessed):
radii = np.copy(group.radii)
masses = group.masses
types = group.types
unique_masses = np.unique(masses)
# Let's not consider atoms with zero mass.
unique_masses = unique_masses[unique_masses > 0]
d = atoms_maps
for target_mass in unique_masses:
atype, _ = min(
d.items(),
key=lambda __entry: abs(__entry[1]['mass'] - target_mass))
try:
match_type = get_close_matches(
atype, interface.radii_dict.keys(), n=1, cutoff=0.1)
rd = interface.radii_dict
radii[masses == target_mass] = rd[match_type[0]]
for t in types[masses == target_mass]:
guessed.update({t: rd[match_type[0]]})
except BaseException:
pass
group.radii = radii
def _guess_radii_from_types(interface, group, guessed):
radii = np.copy(group.radii)
_dict = interface.radii_dict
for aname in np.unique(group.names):
try:
matching_type = weighted_close_match(aname, _dict)
radii[group.names == aname] = _dict[matching_type]
guessed.update({aname: _dict[matching_type]})
except (KeyError, IndexError):
try:
atype = group.types[group.names == aname][0]
matching_type = weighted_close_match(atype, _dict)
radii[group.types == atype] = _dict[matching_type]
guessed.update({atype: _dict[matching_type]})
except (KeyError, IndexError):
pass
group.radii = np.copy(radii)
def guess_radii(interface, group=None):
# NOTE: this code depends on the assumption that not-set radii,
# have the value np.nan (see _missing_attributes() ), so don't change it
# let's test first which information is available
guessed = {}
try:
interface.guessed_radii.update({})
except AttributeError:
interface.guessed_radii = {}
if group is None:
group = interface.universe.atoms
nans = np.isnan(group.radii)
# if no radius is nan, no need to guess anything
if not (np.any(np.equal(group.radii, None)) or np.any(nans)):
return
nones = np.equal(group.radii, None)
group.radii[nones] = np.array([np.nan] * len(group.radii[nones]))
group = group[np.isnan(group.radii)]
# We give precedence to atom names, then to types
try:
# this test failes wither if no 'type' property
# is available, or if it is, but the values are
# integers (like in lammps) and thus cannot be
# used to guess the type (in this code)
group.types.astype(int)
except AttributeError: # no types at all
pass # will try with masses
except ValueError: # types are there, and are not integers
_guess_radii_from_types(interface, group, guessed)
# We fill in the remaining ones using masses information
group = group[np.isnan(group.radii)]
if ('masses' in dir(group)):
_guess_radii_from_masses(interface, group, guessed)
interface.guessed_radii.update(guessed)
|
balazsfabian/pytim
|
pytim/properties.py
|
Python
|
gpl-3.0
| 8,110
|
[
"LAMMPS",
"MDAnalysis"
] |
f4e252862e013868a3d4a26baa1508892afbef1e5f92a12bf3793d44830f3fc8
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.utilities.transaction_base import TransactionBase
class MaintenanceVisit(TransactionBase):
def get_feed(self):
return _("To {0}").format(self.customer_name)
def get_item_details(self, item_code):
return frappe.db.get_value("Item", item_code, ["item_name", "description"], as_dict=1)
def validate_serial_no(self):
for d in self.get('purposes'):
if d.serial_no and not frappe.db.exists("Serial No", d.serial_no):
frappe.throw(_("Serial No {0} does not exist").format(d.serial_no))
def validate(self):
self.validate_serial_no()
def update_customer_issue(self, flag):
for d in self.get('purposes'):
if d.prevdoc_docname and d.prevdoc_doctype == 'Warranty Claim' :
if flag==1:
mntc_date = self.mntc_date
service_person = d.service_person
work_done = d.work_done
status = "Open"
if self.completion_status == 'Fully Completed':
status = 'Closed'
elif self.completion_status == 'Partially Completed':
status = 'Work In Progress'
else:
nm = frappe.db.sql("select t1.name, t1.mntc_date, t2.service_person, t2.work_done from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.completion_status = 'Partially Completed' and t2.prevdoc_docname = %s and t1.name!=%s and t1.docstatus = 1 order by t1.name desc limit 1", (d.prevdoc_docname, self.name))
if nm:
status = 'Work In Progress'
mntc_date = nm and nm[0][1] or ''
service_person = nm and nm[0][2] or ''
work_done = nm and nm[0][3] or ''
else:
status = 'Open'
mntc_date = ''
service_person = ''
work_done = ''
frappe.db.sql("update `tabWarranty Claim` set resolution_date=%s, resolved_by=%s, resolution_details=%s, status=%s where name =%s",(mntc_date,service_person,work_done,status,d.prevdoc_docname))
def check_if_last_visit(self):
"""check if last maintenance visit against same sales order/ Warranty Claim"""
check_for_docname = None
for d in self.get('purposes'):
if d.prevdoc_docname:
check_for_docname = d.prevdoc_docname
#check_for_doctype = d.prevdoc_doctype
if check_for_docname:
check = frappe.db.sql("select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.name!=%s and t2.prevdoc_docname=%s and t1.docstatus = 1 and (t1.mntc_date > %s or (t1.mntc_date = %s and t1.mntc_time > %s))", (self.name, check_for_docname, self.mntc_date, self.mntc_date, self.mntc_time))
if check:
check_lst = [x[0] for x in check]
check_lst =','.join(check_lst)
frappe.throw(_("Cancel Material Visits {0} before cancelling this Maintenance Visit").format(check_lst))
raise Exception
else:
self.update_customer_issue(0)
def on_submit(self):
self.update_customer_issue(1)
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
self.check_if_last_visit()
frappe.db.set(self, 'status', 'Cancelled')
def on_update(self):
pass
|
treejames/erpnext
|
erpnext/support/doctype/maintenance_visit/maintenance_visit.py
|
Python
|
agpl-3.0
| 3,196
|
[
"VisIt"
] |
90b3e8d71ba6776937d8100a16beddb63de18d51d7ab73bf48732a23f90e3f9f
|
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <mostapha@ladybug.tools>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
This component applies an OpenStudio measure to an OpenStudio file. The component will eventually be integrated to Export to OpenStudio component.
Read more about OpenStudio measures here: http://nrel.github.io/OpenStudio-user-documentation/reference/measure_writing_guide/
You can download several measures from here: https://bcl.nrel.gov/nrel/types/measure
Many thanks to NREL team for their support during the process. See (https://github.com/mostaphaRoudsari/Honeybee/issues/214) and (https://github.com/mostaphaRoudsari/Honeybee/issues/290)for just two examples!
-
Provided by Honeybee 0.0.66
Args:
_osmFilePath: A file path of the an OpemStdio file
_epwWeatherFile: An .epw file path on your system as a text string.
_OSMeasures: Any number of OpenStudio measures that you want to apply to your OepnStudio model. Use the "Honeybee_Load OpenStudio Measure" component to load a measure into Grasshopper
_runIt: Set to "True" to have the component generate an IDF file from the OSM file and run the IDF through through EnergyPlus. Set to "False" to not run the file (this is the default). You can also connect an integer for the following options:
0 = Do Not Run OSM and IDF thrrough EnergyPlus
1 = Run the OSM and IDF through EnergyPlus with a command prompt window that displays the progress of the simulation
2 = Run the OSM and IDF through EnergyPlus in the background (without the command line popup window).
3 = Generate an IDF from the OSM file but do not run it through EnergyPlus
4 = Run the OSM and IDF through EnergyPlus using only OpenStudio CLI (note that there will be no resultFileAddress produced in this case).
Returns:
readMe!: ...
osmFileAddress: The file path of the OSM file that has been generated on your machine.
idfFileAddress: The file path of the IDF file that has been generated on your machine. This file is only generated when you set "runSimulation_" to "True."
resultFileAddress: The file path of the CSV result file that has been generated on your machine.
sqlFileAddress: The file path to the SQL result file that has been generated on your machine. This file contains all results from the energy model run.
eioFileAddress: The file path of the EIO file that has been generated on your machine. This file contains information about the sizes of all HVAC equipment from the simulation. This file is only generated when you set "runSimulation_" to "True."
rddFileAddress: The file path of the Result Data Dictionary (.rdd) file that is generated after running the file through EnergyPlus. This file contains all possible outputs that can be requested from the EnergyPlus model. Use the "Honeybee_Read Result Dictionary" to see what outputs can be requested.
htmlReport:Tthe file path to the HTML report that was generated after running the file through EnergyPlus. Open this in a web browser for an overview of the energy model results.
studyFolder: The directory in which the simulation has been run. Connect this to the 'Honeybee_Lookup EnergyPlus' folder to bring many of the files in this directory into Grasshopper.
"""
ghenv.Component.Name = "Honeybee_Apply OpenStudio Measure"
ghenv.Component.NickName = 'applyOSMeasure'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "10 | Energy | Energy"
#compatibleHBVersion = VER 0.0.56\nJUL_25_2017
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "0"
except: pass
import os
import shutil
import scriptcontext as sc
import Grasshopper.Kernel as gh
import subprocess
if sc.sticky.has_key('honeybee_release'):
if sc.sticky["honeybee_folders"]["OSLibPath"] != None:
# openstudio is there
openStudioLibFolder = sc.sticky["honeybee_folders"]["OSLibPath"]
openStudioIsReady = True
# check to see that it's version 2.0 or above.
rightVersion = False
try:
osVersion = openStudioLibFolder.split('-')[-1]
if osVersion.startswith('2'):
rightVersion = True
except:
pass
if rightVersion == False:
openStudioIsReady = False
msg = "Your version of OpenStudio must be 2.0 or above to use the measures components."
print msg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
import clr
clr.AddReferenceToFileAndPath(openStudioLibFolder+"\\openStudio.dll")
import sys
if openStudioLibFolder not in sys.path:
sys.path.append(openStudioLibFolder)
import OpenStudio
else:
openStudioIsReady = False
# let the user know that they need to download OpenStudio libraries
msg1 = "You do not have OpenStudio installed on Your System.\n" + \
"You wont be able to use this component until you install it.\n" + \
"Download the latest OpenStudio for Windows from:\n"
msg2 = "https://www.openstudio.net/downloads"
print msg1
print msg2
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg1)
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg2)
else:
openStudioIsReady = False
def writeBatchFile(workingDir, idfFileName, epwFileAddress, runInBackground = False):
EPDirectory = sc.sticky["honeybee_folders"]["EPPath"]
workingDrive = workingDir[:2]
if idfFileName.EndsWith('.idf'): shIdfFileName = idfFileName.replace('.idf', '')
else: shIdfFileName = idfFileName
if not workingDir.EndsWith('\\'): workingDir = workingDir + '\\'
fullPath = workingDir + shIdfFileName
folderName = workingDir.replace( (workingDrive + '\\'), '')
batchStr = workingDrive + '\ncd\\' + folderName + '\n"' + EPDirectory + \
'Epl-run" ' + fullPath + ' ' + fullPath + ' idf ' + epwFileAddress + ' EP N nolimit N N 0 Y'
batchFileAddress = fullPath +'.bat'
batchfile = open(batchFileAddress, 'w')
batchfile.write(batchStr)
batchfile.close()
#execute the batch file
if runInBackground:
runCmd(batchFileAddress)
else:
os.system(batchFileAddress)
return fullPath + "Zsz.csv",fullPath+".sql",fullPath+".csv", fullPath+".rdd", fullPath+".eio", fullPath+"Table.html"
def runCmd(batchFileAddress, shellKey = True):
batchFileAddress.replace("\\", "/")
p = subprocess.Popen(["cmd /c ", batchFileAddress], shell=shellKey, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
def tryGetOSPath(path):
"""Try to convert a string path to OpenStudio Path."""
try:
return OpenStudio.Path(path)
except TypeError:
# OpenStudio 2.6.1
ospath = OpenStudio.OpenStudioUtilitiesCore.toPath(path)
return OpenStudio.Path(ospath)
def main(runIt, epwFile, OSMeasures, osmFile, hb_OpenStudioMeasure):
# check inputs
if not os.path.isfile(epwFile) or not epwFile.lower().endswith(".epw"):
raise Exception("Can't find epw file")
if not os.path.isfile(osmFile) or not osmFile.lower().endswith(".osm"):
raise Exception("Can't find OpenStudio file")
for OSMeasure in OSMeasures:
try:
measureArgs = OSMeasure.args
measurePath = OSMeasure.path
except:
raise Exception("Not a valid Honeybee measure. \nUse the Honeybee_Load OpenStudio Measure component to create one!")
# Set up the paths to the files
osmName = os.path.split(osmFile)[-1].split('.osm')[0]
workingDir = os.path.split(osmFile)[0]
oswAddress = workingDir + '\\' + 'workflow.osw'
osmPath = tryGetOSPath(osmFile)
epwPath = tryGetOSPath(epwFile)
oswPath = tryGetOSPath(oswAddress)
# Create the workflow JSON.
wf = OpenStudio.WorkflowJSON()
wf.setOswPath(oswPath)
wf.setSeedFile(osmPath)
if runIt == 4:
wf.setWeatherFile(epwPath)
# Sort the measures so that the OpenStudio ones come first, then E+, then reporting.
measureOrder = {"OpenStudio":[], "EnergyPlus":[], "Reporting":[]}
for measure in OSMeasures:
measureOrder[measure.type].append(measure)
sortedMeasures = measureOrder["OpenStudio"]
sortedMeasures.extend(measureOrder["EnergyPlus"])
sortedMeasures.extend(measureOrder["Reporting"])
# Add the measures to the workflow.
workflowSteps = []
for OSMeasure in sortedMeasures:
# Copy measure files to a folder next to the OSM.
measureName = OSMeasure.path.split('\\')[-1]
destDir = workingDir + '\\measures\\' + measureName + '\\'
if os.path.isdir(destDir):
shutil.rmtree(destDir)
shutil.copytree(OSMeasure.path, destDir)
# Create the measure step
measure = OpenStudio.MeasureStep(measureName)
for arg in OSMeasure.args.values():
if str(arg.userInput) != str(arg.default_value):
measure.setArgument(arg.name, str(arg.userInput))
workflowSteps.append(measure)
# Set the workflow steps and save the JSON.
stepVector = OpenStudio.WorkflowStepVector(workflowSteps)
wf.setWorkflowSteps(stepVector)
wf.save()
# Write the batch file.
workingDrive = workingDir[:2].upper()
osExePath = '/'.join(openStudioLibFolder.split('/')[:-2]) +'/bin/'
osExePath = osExePath.replace('/', '\\')
osExePath = osExePath.replace((workingDrive + '\\'), '')
# Write the batch file to apply the measures.
batchStr = workingDrive + '\ncd\\' + osExePath + '\n"' + 'openstudio.exe"' + ' run -w ' + oswAddress
batchFileAddress = workingDir + '\\' + osmName.replace(" ", "_") +'.bat'
batchfile = open(batchFileAddress, 'w')
batchfile.write(batchStr)
batchfile.close()
# Apply the measures.
if runIt == 2:
runCmd(batchFileAddress)
else:
os.system(batchFileAddress)
# Run the resulting IDF through EnergyPlus using EPl-Run.
runDir = workingDir + '\\' + 'run\\'
epRunDir = workingDir + '\\' + osmName + '\\'
idfFolder = os.path.join(epRunDir)
idfFolder = os.path.join(idfFolder, "ModelToIdf")
idfFilePath = os.path.join(idfFolder, "in.idf")
if not os.path.isfile(runDir+"in.idf"):
# The simulation has not run correctly and we must parse the error log.
logfile = runDir + 'run.log'
if os.path.isfile(logfile):
errorFound = False
errorMsg = 'The measures did not correctly as a result of the following error:\n'
with open(logfile, "r") as log:
for line in log:
if 'ERROR]' in line and errorFound == False:
errorFound = True
msg = line.split('ERROR]')[-1]
errorMsg = errorMsg + msg
print errorMsg
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, errorMsg)
return -1
if runIt < 3:
try:
os.mkdir(epRunDir)
except:
pass
try:
os.mkdir(idfFolder)
except:
pass
shutil.copy(runDir+"pre-preprocess.idf", idfFilePath)
resultFile = writeBatchFile(epRunDir, "ModelToIdf\\in.idf", epwFile, runIt > 1)
else:
idfFilePath = None
resultFile = [None, None, None, None, None, None]
osmFileAddress = runDir + 'in.osm'
return osmFileAddress, idfFilePath, resultFile[2], resultFile[1], resultFile[4], resultFile[3], resultFile[5], workingDir
#Honeybee check.
initCheck = True
if not sc.sticky.has_key('honeybee_release') == True:
initCheck = False
print "You should first let Honeybee fly..."
ghenv.Component.AddRuntimeMessage(w, "You should first let Honeybee fly...")
else:
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): initCheck = False
if sc.sticky['honeybee_release'].isInputMissing(ghenv.Component): initCheck = False
hb_OpenStudioMeasure = sc.sticky["honeybee_Measure"]
hb_OPSMeasureArg = sc.sticky["honeybee_MeasureArg"]
except:
initCheck = False
warning = "You need a newer version of Honeybee to use this compoent." + \
"Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
ghenv.Component.AddRuntimeMessage(w, warning)
if openStudioIsReady and initCheck == True and _runIt > 0 and _osmFilePath != None:
result = main(_runIt, _epwWeatherFile, _OSMeasures, _osmFilePath, hb_OpenStudioMeasure)
if result != -1:
osmFileAddress, idfFileAddress, resultFileAddress, sqlFileAddress, eioFileAddress, rddFileAddress, htmlReport, studyFolder = result
|
mostaphaRoudsari/Honeybee
|
src/Honeybee_Apply OpenStudio Measure.py
|
Python
|
gpl-3.0
| 14,079
|
[
"EPW"
] |
994c4eb0a627cf132ae71ec05f70fc724c64550f3191b163617dbc0b0366956b
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import warnings
import numpy as np
from pymatgen.io.cif import CifParser, CifWriter, CifBlock
from pymatgen.io.vasp.inputs import Poscar
from pymatgen import Element, Specie, Lattice, Structure, Composition, DummySpecie
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.util.testing import PymatgenTest
from pymatgen.electronic_structure.core import Magmom
try:
import pybtex
except ImportError:
pybtex = None
class CifBlockTest(PymatgenTest):
def test_to_string(self):
with open(self.TEST_FILES_DIR / 'Graphite.cif') as f:
s = f.read()
c = CifBlock.from_string(s)
cif_str_2 = str(CifBlock.from_string(str(c)))
cif_str = """data_53781-ICSD
_database_code_ICSD 53781
_audit_creation_date 2003-04-01
_audit_update_record 2013-02-01
_chemical_name_systematic Carbon
_chemical_formula_structural C
_chemical_formula_sum C1
_chemical_name_structure_type Graphite(2H)
_chemical_name_mineral 'Graphite 2H'
_exptl_crystal_density_diffrn 2.22
_publ_section_title 'Structure of graphite'
loop_
_citation_id
_citation_journal_full
_citation_year
_citation_journal_volume
_citation_page_first
_citation_page_last
_citation_journal_id_ASTM
primary 'Physical Review (1,1893-132,1963/141,1966-188,1969)'
1917 10 661 696 PHRVAO
loop_
_publ_author_name
'Hull, A.W.'
_cell_length_a 2.47
_cell_length_b 2.47
_cell_length_c 6.8
_cell_angle_alpha 90.
_cell_angle_beta 90.
_cell_angle_gamma 120.
_cell_volume 35.93
_cell_formula_units_Z 4
_symmetry_space_group_name_H-M 'P 63/m m c'
_symmetry_Int_Tables_number 194
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, x-y, -z+1/2'
2 '-x+y, y, -z+1/2'
3 '-y, -x, -z+1/2'
4 '-x+y, -x, -z+1/2'
5 '-y, x-y, -z+1/2'
6 'x, y, -z+1/2'
7 '-x, -x+y, z+1/2'
8 'x-y, -y, z+1/2'
9 'y, x, z+1/2'
10 'x-y, x, z+1/2'
11 'y, -x+y, z+1/2'
12 '-x, -y, z+1/2'
13 '-x, -x+y, -z'
14 'x-y, -y, -z'
15 'y, x, -z'
16 'x-y, x, -z'
17 'y, -x+y, -z'
18 '-x, -y, -z'
19 'x, x-y, z'
20 '-x+y, y, z'
21 '-y, -x, z'
22 '-x+y, -x, z'
23 '-y, x-y, z'
24 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
C0+ 0
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_symmetry_multiplicity
_atom_site_Wyckoff_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_B_iso_or_equiv
_atom_site_occupancy
_atom_site_attached_hydrogens
C1 C0+ 2 b 0 0 0.25 . 1. 0
C2 C0+ 2 c 0.3333 0.6667 0.25 . 1. 0"""
for l1, l2, l3 in zip(str(c).split("\n"), cif_str.split("\n"),
cif_str_2.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
self.assertEqual(l2.strip(), l3.strip())
def test_double_quotes_and_underscore_data(self):
cif_str = """data_test
_symmetry_space_group_name_H-M "P -3 m 1"
_thing '_annoying_data'"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_symmetry_space_group_name_H-M"], "P -3 m 1")
self.assertEqual(cb["_thing"], "_annoying_data")
self.assertEqual(str(cb), cif_str.replace('"', "'"))
def test_double_quoted_data(self):
cif_str = """data_test
_thing ' '_annoying_data''
_other " "_more_annoying_data""
_more ' "even more" ' """
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_thing"], " '_annoying_data'")
self.assertEqual(cb["_other"], ' "_more_annoying_data"')
self.assertEqual(cb["_more"], ' "even more" ')
def test_nested_fake_multiline_quotes(self):
cif_str = """data_test
_thing
;
long quotes
;
still in the quote
;
actually going to end now
;"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_thing"], " long quotes ; still in the quote"
" ; actually going to end now")
def test_long_loop(self):
data = {'_stuff1': ['A' * 30] * 2,
'_stuff2': ['B' * 30] * 2,
'_stuff3': ['C' * 30] * 2}
loops = [['_stuff1', '_stuff2', '_stuff3']]
cif_str = """data_test
loop_
_stuff1
_stuff2
_stuff3
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"""
self.assertEqual(str(CifBlock(data, loops, 'test')), cif_str)
class CifIOTest(PymatgenTest):
def test_CifParser(self):
parser = CifParser(self.TEST_FILES_DIR / 'LiFePO4.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li4 Fe4 P4 O16",
"Incorrectly parsed cif.")
parser = CifParser(self.TEST_FILES_DIR / 'V2O3.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "V4 O6")
bibtex_str = """
@article{cif-reference-0,
author = "Andersson, G.",
title = "Studies on vanadium oxides. I. Phase analysis",
journal = "Acta Chemica Scandinavica (1-27,1973-42,1988)",
volume = "8",
year = "1954",
pages = "1599--1606"
}
"""
self.assertEqual(parser.get_bibtex_string().strip(), bibtex_str.strip())
parser = CifParser(self.TEST_FILES_DIR / 'Li2O.cif')
prim = parser.get_structures(True)[0]
self.assertEqual(prim.formula, "Li2 O1")
conv = parser.get_structures(False)[0]
self.assertEqual(conv.formula, "Li8 O4")
# test for disordered structures
parser = CifParser(self.TEST_FILES_DIR / 'Li10GeP2S12.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li20.2 Ge2.06 P3.94 S24",
"Incorrectly parsed cif.")
cif_str = """#\#CIF1.1
##########################################################################
# Crystallographic Information Format file
# Produced by PyCifRW module
#
# This is a CIF file. CIF has been adopted by the International
# Union of Crystallography as the standard for data archiving and
# transmission.
#
# For information on this file format, follow the CIF links at
# http://www.iucr.org
##########################################################################
data_FePO4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 10.4117668699
_cell_length_b 6.06717187997
_cell_length_c 4.75948953998
loop_ # sometimes this is in a loop (incorrectly)
_cell_angle_alpha
91.0
_cell_angle_beta 92.0
_cell_angle_gamma 93.0
_chemical_name_systematic 'Generated by pymatgen'
_symmetry_Int_Tables_number 1
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_attached_hydrogens
_atom_site_B_iso_or_equiv
_atom_site_occupancy
Fe Fe1 1 0.218728 0.750000 0.474867 0 . 1
Fe JJ2 1 0.281272 0.250000 0.974867 0 . 1
# there's a typo here, parser should read the symbol from the
# _atom_site_type_symbol
Fe Fe3 1 0.718728 0.750000 0.025133 0 . 1
Fe Fe4 1 0.781272 0.250000 0.525133 0 . 1
P P5 1 0.094613 0.250000 0.418243 0 . 1
P P6 1 0.405387 0.750000 0.918243 0 . 1
P P7 1 0.594613 0.250000 0.081757 0 . 1
P P8 1 0.905387 0.750000 0.581757 0 . 1
O O9 1 0.043372 0.750000 0.707138 0 . 1
O O10 1 0.096642 0.250000 0.741320 0 . 1
O O11 1 0.165710 0.046072 0.285384 0 . 1
O O12 1 0.165710 0.453928 0.285384 0 . 1
O O13 1 0.334290 0.546072 0.785384 0 . 1
O O14 1 0.334290 0.953928 0.785384 0 . 1
O O15 1 0.403358 0.750000 0.241320 0 . 1
O O16 1 0.456628 0.250000 0.207138 0 . 1
O O17 1 0.543372 0.750000 0.792862 0 . 1
O O18 1 0.596642 0.250000 0.758680 0 . 1
O O19 1 0.665710 0.046072 0.214616 0 . 1
O O20 1 0.665710 0.453928 0.214616 0 . 1
O O21 1 0.834290 0.546072 0.714616 0 . 1
O O22 1 0.834290 0.953928 0.714616 0 . 1
O O23 1 0.903358 0.750000 0.258680 0 . 1
O O24 1 0.956628 0.250000 0.292862 0 . 1
"""
parser = CifParser.from_string(cif_str)
struct = parser.get_structures(primitive=False)[0]
self.assertEqual(struct.formula, "Fe4 P4 O16")
self.assertAlmostEqual(struct.lattice.a, 10.4117668699)
self.assertAlmostEqual(struct.lattice.b, 6.06717187997)
self.assertAlmostEqual(struct.lattice.c, 4.75948953998)
self.assertAlmostEqual(struct.lattice.alpha, 91)
self.assertAlmostEqual(struct.lattice.beta, 92)
self.assertAlmostEqual(struct.lattice.gamma, 93)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / 'srycoo.cif')
self.assertEqual(parser.get_structures()[0].formula,
"Sr5.6 Y2.4 Co8 O21")
# Test with a decimal Xyz. This should parse as two atoms in
# conventional cell if it is correct, one if not.
parser = CifParser(self.TEST_FILES_DIR / "Fe.cif")
self.assertEqual(len(parser.get_structures(primitive=False)[0]), 2)
self.assertFalse(parser.has_errors)
def test_site_symbol_preference(self):
parser = CifParser(self.TEST_FILES_DIR / 'site_type_symbol_test.cif')
self.assertEqual(parser.get_structures()[0].formula, "Ge0.4 Sb0.4 Te1")
def test_implicit_hydrogen(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / 'Senegalite_implicit_hydrogen.cif')
for s in parser.get_structures():
self.assertEqual(s.formula, "Al8 P4 O32")
self.assertEqual(sum(s.site_properties['implicit_hydrogens']), 20)
self.assertIn("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.", parser.errors)
parser = CifParser(self.TEST_FILES_DIR / 'cif_implicit_hydrogens_cod_1011130.cif')
s = parser.get_structures()[0]
self.assertIn("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.", parser.errors)
def test_CifParserSpringerPauling(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Below are 10 tests for CIFs from the Springer Materials/Pauling file DBs.
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / 'PF_sd_1928405.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Er1 Mn3.888 Fe2.112 Sn6")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, previously parsed as an ordered structure
parser = CifParser(self.TEST_FILES_DIR / 'PF_sd_1011081.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Zr0.2 Nb0.8")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / 'PF_sd_1615854.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Na2 Al2 Si6 O16")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / 'PF_sd_1622133.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ca0.184 Mg13.016 Fe2.8 Si16 O48")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, previously parsed as an ordered structure
parser = CifParser(self.TEST_FILES_DIR / 'PF_sd_1908491.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Mn0.48 Zn0.52 Ga2 Se4")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / 'PF_sd_1811457.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ba2 Mg0.6 Zr0.2 Ta1.2 O6")
self.assertTrue(parser.has_errors)
# Incomplete powder diffraction data, previously unparsable
# This CIF file contains the molecular species "NH3" which is
# parsed as "N" because the label is "N{x}" (x = 1,2,..) and the
# corresponding symbol is "NH3". Since, the label and symbol are switched
# in CIFs from Springer Materials/Pauling file DBs, CifParser parses the
# element as "N".
parser = CifParser(self.TEST_FILES_DIR / 'PF_sd_1002871.cif')
self.assertEqual(parser.get_structures(True)[0].formula, "Cu1 Br2 N6")
self.assertEqual(parser.get_structures(True)[1].formula, "Cu1 Br4 N6")
self.assertTrue(parser.has_errors)
# Incomplete powder diffraction data, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / 'PF_sd_1704003.cif')
for s in parser.get_structures():
self.assertEqual(s.formula, "Rb4 Mn2 F12")
self.assertTrue(parser.has_errors)
# Unparsable species 'OH/OH2', previously parsed as "O"
parser = CifParser(self.TEST_FILES_DIR / 'PF_sd_1500382.cif')
for s in parser.get_structures():
self.assertEqual(s.formula, "Mg6 B2 O6 F1.764")
self.assertTrue(parser.has_errors)
# Unparsable species 'OH/OH2', previously parsed as "O"
parser = CifParser(self.TEST_FILES_DIR / 'PF_sd_1601634.cif')
for s in parser.get_structures():
self.assertEqual(s.formula, "Zn1.29 Fe0.69 As2 Pb1.02 O8")
def test_CifParserCod(self):
"""
Parsing problematic cif files from the COD database
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Symbol in capital letters
parser = CifParser(self.TEST_FILES_DIR / 'Cod_2100513.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ca4 Nb2.0 Al2 O12")
# Label in capital letters
parser = CifParser(self.TEST_FILES_DIR / 'Cod_4115344.cif')
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Mo4 P2 H60 C60 I4 O4")
def test_parse_symbol(self):
"""
Test the _parse_symbol function with several potentially
problematic examples of symbols and labels.
"""
test_cases = {
"MgT": "Mg",
"MgT1": "Mg",
"H(46A)": "H",
"O(M)": "O",
"N(Am)": "N",
"H1N2a": "H",
"CO(1)": "Co",
"Wat1": "O",
"MgM2A": "Mg",
"CaX": "Ca",
"X1": "X",
"X": "X",
"OA1": "O",
"NaA2": "Na",
"O-H2": "O",
"OD2": "O",
"OW": "O",
"SiT": "Si",
"SiTet": "Si",
"Na-Int": "Na",
"CaD1": "Ca",
"KAm": "K",
"D+1": "D",
"D": "D",
"D1-": "D",
"D4": "D",
"D0": "D",
"NH": "N",
"NH2": "N",
"NH3": "N",
"SH": "S"
}
for e in Element:
name = e.name
test_cases[name] = name
if len(name) == 2:
test_cases[name.upper()] = name
test_cases[name.upper() + str(1)] = name
test_cases[name.upper() + "A"] = name
test_cases[name + str(1)] = name
test_cases[name + str(2)] = name
test_cases[name + str(3)] = name
test_cases[name + str(1) + "A"] = name
special = {"Hw": "H", "Ow": "O", "Wat": "O",
"wat": "O", "OH": "", "OH2": ""}
test_cases.update(special)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / 'LiFePO4.cif')
for sym, expected_symbol in test_cases.items():
self.assertEqual(parser._parse_symbol(sym), expected_symbol)
def test_CifWriter(self):
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath)
writer = CifWriter(poscar.structure, symprec=0.01)
ans = """# generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe1 4 0.218728 0.250000 0.525133 1
P P2 4 0.094613 0.750000 0.581757 1
O O3 8 0.165710 0.546072 0.714616 1
O O4 4 0.043372 0.250000 0.292862 1
O O5 4 0.096642 0.750000 0.258680 1"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
def test_symmetrized(self):
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath, check_for_POTCAR=False)
writer = CifWriter(poscar.structure, symprec=0.1)
ans = """# generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe1 4 0.218728 0.250000 0.525133 1
P P2 4 0.094613 0.750000 0.581757 1
O O3 8 0.165710 0.546072 0.714616 1
O O4 4 0.043372 0.250000 0.292862 1
O O5 4 0.096642 0.750000 0.258680 1"""
cif = CifParser.from_string(str(writer))
m = StructureMatcher()
self.assertTrue(m.fit(cif.get_structures()[0], poscar.structure))
# for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
# self.assertEqual(l1.strip(), l2.strip())
ans = """# generated using pymatgen
data_LiFePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41037000
_cell_length_b 6.06577000
_cell_length_c 4.74480000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural LiFePO4
_chemical_formula_sum 'Li4 Fe4 P4 O16'
_cell_volume 299.619458734
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Li Li1 4 0.000000 0.000000 0.000000 1.0
Fe Fe2 4 0.218845 0.750000 0.474910 1.0
P P3 4 0.094445 0.250000 0.417920 1.0
O O4 8 0.165815 0.044060 0.286540 1.0
O O5 4 0.043155 0.750000 0.708460 1.0
O O6 4 0.096215 0.250000 0.741480 1.0
"""
s = Structure.from_file(self.TEST_FILES_DIR / 'LiFePO4.cif')
writer = CifWriter(s, symprec=0.1)
s2 = CifParser.from_string(str(writer)).get_structures()[0]
self.assertTrue(m.fit(s, s2))
s = self.get_structure("Li2O")
writer = CifWriter(s, symprec=0.1)
s2 = CifParser.from_string(str(writer)).get_structures()[0]
self.assertTrue(m.fit(s, s2))
def test_disordered(self):
si = Element("Si")
n = Element("N")
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
lattice = Lattice(np.array([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]))
struct = Structure(lattice, [si, {si:0.5, n:0.5}], coords)
writer = CifWriter(struct)
ans = """# generated using pymatgen
data_Si1.5N0.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural Si1.5N0.5
_chemical_formula_sum 'Si1.5 N0.5'
_cell_volume 40.04479464
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Si Si1 1 0.000000 0.000000 0.000000 1
Si Si2 1 0.750000 0.500000 0.750000 0.5
N N3 1 0.750000 0.500000 0.750000 0.5
"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
def test_specie_cifwriter(self):
si4 = Specie("Si", 4)
si3 = Specie("Si", 3)
n = DummySpecie("X", -3)
coords = list()
coords.append(np.array([0.5, 0.5, 0.5]))
coords.append(np.array([0.75, 0.5, 0.75]))
coords.append(np.array([0, 0, 0]))
lattice = Lattice(np.array([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]))
struct = Structure(lattice, [n, {si3:0.5, n:0.5}, si4], coords)
writer = CifWriter(struct)
ans = """# generated using pymatgen
data_X1.5Si1.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural X1.5Si1.5
_chemical_formula_sum 'X1.5 Si1.5'
_cell_volume 40.04479464
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
X3- -3.0
Si3+ 3.0
Si4+ 4.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
X3- X1 1 0.500000 0.500000 0.500000 1
X3- X2 1 0.750000 0.500000 0.750000 0.5
Si3+ Si3 1 0.750000 0.500000 0.750000 0.5
Si4+ Si4 1 0.000000 0.000000 0.000000 1
"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
# test that mixed valence works properly
s2 = Structure.from_str(ans, "cif")
self.assertEqual(struct.composition, s2.composition)
def test_primes(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / 'C26H16BeN2O2S2.cif')
for s in parser.get_structures(False):
self.assertEqual(s.composition, 8 * Composition('C26H16BeN2O2S2'))
def test_missing_atom_site_type_with_oxistates(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / 'P24Ru4H252C296S24N16.cif')
c = Composition({'S0+': 24, 'Ru0+': 4, 'H0+': 252, 'C0+': 296,
'N0+': 16, 'P0+': 24})
for s in parser.get_structures(False):
self.assertEqual(s.composition, c)
def test_no_coords_or_species(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
string= """#generated using pymatgen
data_Si1.5N1.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural Si1.5N1.5
_chemical_formula_sum 'Si1.5 N1.5'
_cell_volume 40.0447946443
_cell_formula_units_Z 0
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
Si3+ 3.0
Si4+ 4.0
N3- -3.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
? ? ? ? ? ? ?
"""
parser = CifParser.from_string(string)
self.assertRaises(ValueError, parser.get_structures)
def test_get_lattice_from_lattice_type(self):
cif_structure = """#generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
_symmetry_cell_setting Orthorhombic
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe1 1 0.218728 0.750000 0.474867 1
Fe Fe2 1 0.281272 0.250000 0.974867 1
Fe Fe3 1 0.718728 0.750000 0.025133 1
Fe Fe4 1 0.781272 0.250000 0.525133 1
P P5 1 0.094613 0.250000 0.418243 1
P P6 1 0.405387 0.750000 0.918243 1
P P7 1 0.594613 0.250000 0.081757 1
P P8 1 0.905387 0.750000 0.581757 1
O O9 1 0.043372 0.750000 0.707138 1
O O10 1 0.096642 0.250000 0.741320 1
O O11 1 0.165710 0.046072 0.285384 1
O O12 1 0.165710 0.453928 0.285384 1
O O13 1 0.334290 0.546072 0.785384 1
O O14 1 0.334290 0.953928 0.785384 1
O O15 1 0.403358 0.750000 0.241320 1
O O16 1 0.456628 0.250000 0.207138 1
O O17 1 0.543372 0.750000 0.792862 1
O O18 1 0.596642 0.250000 0.758680 1
O O19 1 0.665710 0.046072 0.214616 1
O O20 1 0.665710 0.453928 0.214616 1
O O21 1 0.834290 0.546072 0.714616 1
O O22 1 0.834290 0.953928 0.714616 1
O O23 1 0.903358 0.750000 0.258680 1
O O24 1 0.956628 0.250000 0.292862 1
"""
cp = CifParser.from_string(cif_structure)
s_test = cp.get_structures(False)[0]
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath)
s_ref = poscar.structure
sm = StructureMatcher(stol=0.05, ltol=0.01, angle_tol=0.1)
self.assertTrue(sm.fit(s_ref, s_test))
def test_empty(self):
# single line
cb = CifBlock.from_string("data_mwe\nloop_\n_tag\n ''")
self.assertEqual(cb.data['_tag'][0], '')
# multi line
cb = CifBlock.from_string("data_mwe\nloop_\n_tag\n;\n;")
self.assertEqual(cb.data['_tag'][0], '')
cb2 = CifBlock.from_string(str(cb))
self.assertEqual(cb, cb2)
def test_bad_cif(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "bad_occu.cif"
p = CifParser(f)
self.assertRaises(ValueError, p.get_structures)
p = CifParser(f, occupancy_tolerance=2)
s = p.get_structures()[0]
self.assertAlmostEqual(s[0].species["Al3+"], 0.5)
def test_one_line_symm(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "OneLineSymmP1.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "Ga4 Pb2 O8")
def test_no_symmops(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "nosymm.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "H96 C60 O8")
def test_dot_positions(self):
f = self.TEST_FILES_DIR / "ICSD59959.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "K1 Mn1 F3")
def test_replacing_finite_precision_frac_coords(self):
f = self.TEST_FILES_DIR / "cif_finite_precision_frac_coord_error.cif"
with warnings.catch_warnings():
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(str(s.composition), "N5+24")
self.assertIn("Some fractional co-ordinates rounded to ideal values to "
"avoid finite precision errors.", p.errors)
def test_empty_deque(self):
s = """data_1526655
_journal_name_full
_space_group_IT_number 227
_symmetry_space_group_name_Hall 'F 4d 2 3 -1d'
_symmetry_space_group_name_H-M 'F d -3 m :1'
_cell_angle_alpha 90
_cell_angle_beta 90
_cell_angle_gamma 90
_cell_formula_units_Z 8
_cell_length_a 5.381
_cell_length_b 5.381
_cell_length_c 5.381
_cell_volume 155.808
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
_atom_site_U_iso_or_equiv
Si1 Si 0 0 0 1 0.0
_iucr_refine_fcf_details
;
data_symmetries
loop_
_space_group_symop_id
_space_group_symop_operation_xyz
1 x,y,z
2 -x+1/2,y+1/2,-z+1/2
3 -x,-y,-z
4 x-1/2,-y-1/2,z-1/2
;"""
p = CifParser.from_string(s)
self.assertEqual(p.get_structures()[0].formula, "Si1")
cif = """
data_1526655
_journal_name_full
_space_group_IT_number 227
_symmetry_space_group_name_Hall 'F 4d 2 3 -1d'
_symmetry_space_group_name_H-M 'F d -3 m :1'
_cell_angle_alpha 90
_cell_angle_beta 90
_cell_angle_gamma 90
_cell_formula_units_Z 8
_cell_length_a 5.381
_cell_length_b 5.381
_cell_length_c 5.381
_cell_volume 155.808
_iucr_refine_fcf_details
;
data_symmetries
Some arbitrary multiline string
;
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
_atom_site_U_iso_or_equiv
Si1 Si 0 0 0 1 0.0
"""
parser = CifParser.from_string(cif)
self.assertEqual(p.get_structures()[0].formula, "Si1")
class MagCifTest(PymatgenTest):
def setUp(self):
warnings.filterwarnings("ignore")
self.mcif = CifParser(self.TEST_FILES_DIR / "magnetic.example.NiO.mcif")
self.mcif_ncl = CifParser(self.TEST_FILES_DIR / "magnetic.ncl.example.GdB4.mcif")
self.mcif_incom = CifParser(self.TEST_FILES_DIR / "magnetic.incommensurate.example.Cr.mcif")
self.mcif_disord = CifParser(self.TEST_FILES_DIR / "magnetic.disordered.example.CuMnO2.mcif")
self.mcif_ncl2 = CifParser(self.TEST_FILES_DIR / "Mn3Ge_IR2.mcif")
def tearDown(self):
warnings.simplefilter("default")
def test_mcif_detection(self):
self.assertTrue(self.mcif.feature_flags["magcif"])
self.assertTrue(self.mcif_ncl.feature_flags["magcif"])
self.assertTrue(self.mcif_incom.feature_flags["magcif"])
self.assertTrue(self.mcif_disord.feature_flags["magcif"])
self.assertFalse(self.mcif.feature_flags["magcif_incommensurate"])
self.assertFalse(self.mcif_ncl.feature_flags["magcif_incommensurate"])
self.assertTrue(self.mcif_incom.feature_flags["magcif_incommensurate"])
self.assertFalse(self.mcif_disord.feature_flags["magcif_incommensurate"])
def test_get_structures(self):
# incommensurate structures not currently supported
self.assertRaises(NotImplementedError, self.mcif_incom.get_structures)
# disordered magnetic structures not currently supported
self.assertRaises(NotImplementedError, self.mcif_disord.get_structures)
# taken from self.mcif_ncl, removing explicit magnetic symmops
# so that MagneticSymmetryGroup() has to be invoked
magcifstr = """
data_5yOhtAoR
_space_group.magn_name_BNS "P 4/m' b' m' "
_cell_length_a 7.1316
_cell_length_b 7.1316
_cell_length_c 4.0505
_cell_angle_alpha 90.00
_cell_angle_beta 90.00
_cell_angle_gamma 90.00
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd1 Gd 0.31746 0.81746 0.00000 1
B1 B 0.00000 0.00000 0.20290 1
B2 B 0.17590 0.03800 0.50000 1
B3 B 0.08670 0.58670 0.50000 1
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd1 5.05 5.05 0.0"""
s = self.mcif.get_structures(primitive=False)[0]
self.assertEqual(s.formula, "Ni32 O32")
self.assertTrue(Magmom.are_collinear(s.site_properties['magmom']))
# example with non-collinear spin
s_ncl = self.mcif_ncl.get_structures(primitive=False)[0]
s_ncl_from_msg = CifParser.from_string(magcifstr).get_structures(primitive=False)[0]
self.assertEqual(s_ncl.formula, "Gd4 B16")
self.assertFalse(Magmom.are_collinear(s_ncl.site_properties['magmom']))
self.assertTrue(s_ncl.matches(s_ncl_from_msg))
def test_write(self):
cw_ref_string = """# generated using pymatgen
data_GdB4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 7.13160000
_cell_length_b 7.13160000
_cell_length_c 4.05050000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural GdB4
_chemical_formula_sum 'Gd4 B16'
_cell_volume 206.00729003
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd Gd1 1 0.317460 0.817460 0.000000 1.0
Gd Gd2 1 0.182540 0.317460 0.000000 1.0
Gd Gd3 1 0.817460 0.682540 0.000000 1.0
Gd Gd4 1 0.682540 0.182540 0.000000 1.0
B B5 1 0.000000 0.000000 0.202900 1.0
B B6 1 0.500000 0.500000 0.797100 1.0
B B7 1 0.000000 0.000000 0.797100 1.0
B B8 1 0.500000 0.500000 0.202900 1.0
B B9 1 0.175900 0.038000 0.500000 1.0
B B10 1 0.962000 0.175900 0.500000 1.0
B B11 1 0.038000 0.824100 0.500000 1.0
B B12 1 0.675900 0.462000 0.500000 1.0
B B13 1 0.324100 0.538000 0.500000 1.0
B B14 1 0.824100 0.962000 0.500000 1.0
B B15 1 0.538000 0.675900 0.500000 1.0
B B16 1 0.462000 0.324100 0.500000 1.0
B B17 1 0.086700 0.586700 0.500000 1.0
B B18 1 0.413300 0.086700 0.500000 1.0
B B19 1 0.586700 0.913300 0.500000 1.0
B B20 1 0.913300 0.413300 0.500000 1.0
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd1 5.05000 5.05000 0.00000
Gd2 -5.05000 5.05000 0.00000
Gd3 5.05000 -5.05000 0.00000
Gd4 -5.05000 -5.05000 0.00000
"""
s_ncl = self.mcif_ncl.get_structures(primitive=False)[0]
cw = CifWriter(s_ncl, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_ref_string)
# from list-type magmoms
list_magmoms = [list(m) for m in s_ncl.site_properties['magmom']]
# float magmoms (magnitude only)
float_magmoms = [float(m) for m in s_ncl.site_properties['magmom']]
s_ncl.add_site_property('magmom', list_magmoms)
cw = CifWriter(s_ncl, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_ref_string)
s_ncl.add_site_property('magmom', float_magmoms)
cw = CifWriter(s_ncl, write_magmoms=True)
cw_ref_string_magnitudes = """# generated using pymatgen
data_GdB4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 7.13160000
_cell_length_b 7.13160000
_cell_length_c 4.05050000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural GdB4
_chemical_formula_sum 'Gd4 B16'
_cell_volume 206.00729003
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd Gd1 1 0.317460 0.817460 0.000000 1.0
Gd Gd2 1 0.182540 0.317460 0.000000 1.0
Gd Gd3 1 0.817460 0.682540 0.000000 1.0
Gd Gd4 1 0.682540 0.182540 0.000000 1.0
B B5 1 0.000000 0.000000 0.202900 1.0
B B6 1 0.500000 0.500000 0.797100 1.0
B B7 1 0.000000 0.000000 0.797100 1.0
B B8 1 0.500000 0.500000 0.202900 1.0
B B9 1 0.175900 0.038000 0.500000 1.0
B B10 1 0.962000 0.175900 0.500000 1.0
B B11 1 0.038000 0.824100 0.500000 1.0
B B12 1 0.675900 0.462000 0.500000 1.0
B B13 1 0.324100 0.538000 0.500000 1.0
B B14 1 0.824100 0.962000 0.500000 1.0
B B15 1 0.538000 0.675900 0.500000 1.0
B B16 1 0.462000 0.324100 0.500000 1.0
B B17 1 0.086700 0.586700 0.500000 1.0
B B18 1 0.413300 0.086700 0.500000 1.0
B B19 1 0.586700 0.913300 0.500000 1.0
B B20 1 0.913300 0.413300 0.500000 1.0
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd1 0.00000 0.00000 7.14178
Gd2 0.00000 0.00000 7.14178
Gd3 0.00000 0.00000 -7.14178
Gd4 0.00000 0.00000 -7.14178
"""
self.assertEqual(cw.__str__(), cw_ref_string_magnitudes)
# test we're getting correct magmoms in ncl case
s_ncl2 = self.mcif_ncl2.get_structures()[0]
list_magmoms = [list(m) for m in s_ncl2.site_properties['magmom']]
self.assertEqual(list_magmoms[0][0], 0.0)
self.assertAlmostEqual(list_magmoms[0][1], 5.9160793408726366)
self.assertAlmostEqual(list_magmoms[1][0], -5.1234749999999991)
self.assertAlmostEqual(list_magmoms[1][1], 2.9580396704363183)
# test creating an structure without oxidation state doesn't raise errors
s_manual = Structure(Lattice.cubic(4.2), ["Cs", "Cl"],[[0, 0, 0], [0.5, 0.5, 0.5]])
s_manual.add_spin_by_site([1, -1])
cw = CifWriter(s_manual, write_magmoms=True)
# check oxidation state
cw_manual_oxi_string = """# generated using pymatgen
data_CsCl
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 4.20000000
_cell_length_b 4.20000000
_cell_length_c 4.20000000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural CsCl
_chemical_formula_sum 'Cs1 Cl1'
_cell_volume 74.08800000
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
Cs+ 1.0
Cl+ 1.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Cs+ Cs1 1 0.000000 0.000000 0.000000 1
Cl+ Cl2 1 0.500000 0.500000 0.500000 1
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
"""
s_manual.add_oxidation_state_by_site([1,1])
cw = CifWriter(s_manual, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_manual_oxi_string)
@unittest.skipIf(pybtex is None, "pybtex not present")
def test_bibtex(self):
ref_bibtex_string = """@article{cif-reference-0,
author = "Blanco, J.A.",
journal = "PHYSICAL REVIEW B",
volume = "73",
year = "2006",
pages = "?--?"
}
"""
self.assertEqual(self.mcif_ncl.get_bibtex_string(), ref_bibtex_string)
if __name__ == '__main__':
unittest.main()
|
montoyjh/pymatgen
|
pymatgen/io/tests/test_cif.py
|
Python
|
mit
| 42,896
|
[
"VASP",
"pymatgen"
] |
7bbefb60d82e1d69519dfb29de43a698a6c141e53e9c98a78c45eebdc1f27b77
|
from splinter import Browser
# with Browser() as br:
# br.visit('http://www.baidu.com/')
br = Browser();
br.visit('http://www.baidu.com/')
br.fill("wd", "haha")
bt = br.find_by_id(u"su")
bt.click()
br.quit()
|
jypeitao/pythontools
|
splinterlearn.py
|
Python
|
apache-2.0
| 212
|
[
"VisIt"
] |
736b7d42f39a0fb60671d2e48267b732516a88223dc3101731dc43480b68a164
|
class Module(object):
def __init__(self, declarations):
self._declarations = declarations
def visit(self, visitor, arg):
return visitor.visit_module(self, arg)
class Class(object):
def __init__(self, name, declarations):
self._name = name
self._declarations = declarations
def visit(self, visitor, arg):
return visitor.visit_class(self, arg)
class Enum(object):
def __init__(self, name, cases):
self._name = name
self._cases = cases
def visit(self, visitor, arg):
return visitor.visit_enum(self, arg)
class EnumCase(object):
def __init__(self, name, members=None):
self._name = name
self._members = members
def visit(self, visitor, arg):
return visitor.visit_enum_case(self, arg)
class Attribute(object):
def __init__(self, name, tp):
self._name = name
self._tp = tp
def visit(self, visitor, arg):
return visitor.visit_attribute(self, arg)
class Function(object):
def __init__(self, name, arguments, return_type, body):
self._name = name
self._arguments = arguments
self._return_type = return_type
self._body = body
def visit(self, visitor, arg):
return visitor.visit_function(self, arg)
class Suite(object):
def __init__(self, body):
self._body = body
def visit(self, visitor, arg):
return visitor.visit_suite(self, arg)
class Return(object):
def __init__(self, value):
self._value = value
def visit(self, visitor, arg):
return visitor.visit_return(self, arg)
class Assignment(object):
def __init__(self, target, value):
self._target = target
self._value = value
def visit(self, visitor, arg):
return visitor.visit_assignment(self, arg)
class Match(object):
def __init__(self, condition, cases):
self._condition = condition
self._cases = cases
def visit(self, visitor, arg):
return visitor.visit_match(self, arg)
class MatchCase(object):
def __init__(self, matcher, body):
self._matcher = matcher
self._body = body
def visit(self, visitor, arg):
return visitor.visit_match_case(self, arg)
class If(object):
def __init__(self, condition, if_body, else_body):
self._condition = condition
self._if_body = if_body
self._else_body = else_body
def visit(self, visitor, arg):
return visitor.visit_if(self, arg)
class BinOp(object):
def __init__(self, op, lhs, rhs):
self._op = op
self._lhs = lhs
self._rhs = rhs
def visit(self, visitor, arg):
return visitor.visit_binop(self, arg)
class Name(object):
def __init__(self, value):
self._value = value
def visit(self, visitor, arg):
return visitor.visit_name(self, arg)
class Integer(object):
def __init__(self, value):
self._value = value
def visit(self, visitor, arg):
return visitor.visit_integer(self, arg)
|
alex/bagel
|
bagel/ast.py
|
Python
|
bsd-3-clause
| 3,066
|
[
"VisIt"
] |
0cfbe7de70d0261763566d3df1026286fabc2ba42b3a5b07941eb3ef40929c96
|
"""
This module contains all the functions needed for smoothing or filtering a
time-serie.
"""
import numpy as np
from scipy import signal
########################## Wrapper to all functions ###########################
###############################################################################
def general_filtering(Y, method, parameters={}):
"""Wrapper function to contain all the possible smoothing functions in
order to be easy and quick usable for other parts of this package.
"""
assert(len(Y.shape) == 2)
# if method == 'order_filter':
# Ys = signal.order_filter(Y, **parameters)
# elif method == 'medfilt':
# Ys = signal.medfilt(Y, **parameters)
# elif method == 'wiener':
# Ys = signal.wiener(Y, **parameters)
# elif method == 'lfilter':
# Ys = signal.lfilter(Y, **parameters)
# elif method == 'filtfilt':
# Ys = signal.filtfilt(Y, **parameters)
# if method == 'savgol_filter':
# Ys = signal.savgol_filter(Y, **parameters)
if method == 'savitzky_golay':
Ys = savitzky_golay_matrix(Y, **parameters)
elif method == 'weighted_MA':
Ys = smooth_weighted_MA_matrix(Y, **parameters)
elif method == 'fft_passband':
Ys = fft_passband_filter(Y, **parameters)
elif method == 'reweighting':
Ys = general_reweighting(Y, **parameters)
## DISCRETE TS
elif method == 'collapse':
Ys = collapser(Y, **parameters)
elif method == 'substitution':
Ys = substitution(Y, **parameters)
return Ys
################################## functions ##################################
###############################################################################
def savitzky_golay_matrix(Y, window_size, order):
"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
This function acts as a wrapper of the savitzky_golay function.
Parameters
----------
Y : array_like, shape (N,M)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv : int
the order of the derivative to compute (default=0 means only smoothing)
Returns
-------
Ys : ndarray, shape (N,M)
the smoothed signal (or it's n-th derivative).
Examples
--------
import numpy as np
t = np.linspace(-4, 4, 500)
n = 10
X = [np.exp(-t**2) + np.random.normal(0, 0.05, t.shape) for i in range(n)]
X = np.vstack(X).T
Xsg = savitzky_golay_matrix(X, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, X[:,0], label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, Xsg[:,0], 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
See also
--------
savitzky_golay
More information
----------------
TODO: vectorization. np.vectorize?
"""
Ys = np.zeros(Y.shape)
for i in range(Y.shape[1]):
Ys[:, i] = savitzky_golay(Y[:, i], window_size, order)
return Ys
def smooth_weighted_MA_matrix(Y, window_len=11, window='hanning', args=[]):
"""Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
This is the wrapper to accept a multidimensional array.
Parameters
----------
Y : numpy.array, shape (N,M)
the input signal
window_len : int
the dimension of the smoothing window; should be an odd integer
window : str
the type of window {'flat','hanning','hamming','bartlett','blackman'}
flat window will produce a moving average smoothing.
Returns
-------
Ys : array_like
the smoothed signal
Examples
--------
import numpy as np
t = np.linspace(-4, 4, 500)
n = 10
X = [np.exp(-t**2) + np.random.normal(0, 0.05, t.shape) for i in range(n)]
X = np.vstack(X).T
Xsg = smooth_weighted_MA_matrix(X, window_len=31)
import matplotlib.pyplot as plt
plt.plot(t, X[:,0], label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, Xsg[:,0], 'r', label='Filtered signal')
plt.legend()
plt.show()
See also
--------
smooth_weighted_MA_matrix
More information
----------------
TODO: vectorization. np.vectorize?
"""
Ys = np.zeros(Y.shape)
for i in range(Y.shape[1]):
Ys[:, i] = smooth_weighted_MA(Y[:, i], window_len, window, args)
return Ys
def fft_passband_filter(y, f_low=0, f_high=1, axis=0):
"""Pass band filter using fft for real 1D signal.
Parameters
----------
y : array_like shape (N,M)
the values of the time history of the signal.
f_low : int
low pass niquist frequency (1 = samplin_rate/2)
f_high : int
high cut niquist frequency (1 = samplin_rate/2)
axis : int
axis along the which each individual signal is represented.
Returns
-------
ys : ndarray, shape (N,M)
the smoothed signal (or it's n-th derivative).
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp(-t**2) + np.random.normal(0, 0.05, t.shape)
ysg = fft_passband_filter(y, f_low=0, f_high=0.05)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
See also
--------
smooth_weigthed_MA_matrix, savitzky_golay_matrix, np.fft.fft, np.fft.ifft
References
----------
.. [1] Nasir. Ahmed, Discrete-Time Signals and Systems
(Reston, Virginia: Reston Publication Company, 1983), pp. 243-258.
.. [2] Raphael C. Gonzalez and Richard E. Woods. Digital Image Processing
(Boston: Addison-Wesley, 1992), pp. 201-213, 244.
.. [3] Belle A. Shenoi, Introduction to digital signal processing and
filter design. John Wiley and Sons(2006) p.120. ISBN 978-0-471-46482-2
"""
# Length of the transformed signal
n = y.shape[axis]
N = int(2**(np.ceil(np.log(n)/np.log(2))))
# Signal to filter expressed in the frequency domain.
SIG = np.fft.fft(y, n=N, axis=axis)
# Transform the cuts in units of array elements.
n_low = int(np.floor((N-1)*f_low/2)+1)
fract_low = 1-((N-1)*f_low/2-np.floor((N-1)*f_low/2))
n_high = int(np.floor((N-1)*f_high/2)+1)
fract_high = 1-((N-1)*f_high/2-np.floor((N-1)*f_high/2))
# Creation of the slide
s = [slice(None) for i in range(y.ndim)]
# High-pass filter
if f_low > 0:
# Defining the signal regarding the cuts
s[axis] = 0
SIG[s] = 0
s[axis] = slice(1, n_low)
SIG[s] = 0
s[axis] = n_low
SIG[s] *= fract_low
s[axis] = -n_low
SIG[s] *= fract_low
if n_low != 1:
s[axis] = slice(-n_low+1, None)
SIG[s] = 0
# Low-pass filter
if f_high < 1:
# Defining the signal regarding the cuts
s[axis] = n_high
SIG[s] *= fract_high
s[axis] = slice(n_high+1, -n_high)
SIG[s] = 0
s[axis] = -n_high
SIG[s] *= fract_high
s[axis] = slice(0, n)
# Inverse transformation in order to recover the signal smoothed.
ys = np.real(np.fft.ifft(SIG, axis=axis)[s])
return ys
################## 1-array smoothing
#############################################
def smooth_weighted_MA(x, window_len=11, window='hanning', args=[]):
"""Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
This function acts over continious-valued signals.
Parameters
----------
x : numpy.array, shape (N,)
the input signal
window_len : int
the dimension of the smoothing window; should be an odd integer
window : str
the type of window {'flat','hanning','hamming','bartlett','blackman'}
flat window will produce a moving average smoothing.
Returns
-------
y : array_like
the smoothed signal
Examples
--------
import numpy as np
t=np.linspace(-2,2,500)
x=np.sin(t)+np.random.randn(len(t))*0.1
y=smooth_weighted_MA(x,27)
import matplotlib.pyplot as plt
plt.plot(t, x, label='Noisy signal')
plt.plot(t, np.sin(t), 'k', lw=1.5, label='Original signal')
plt.plot(t, y, 'r', label='Filtered signal')
plt.legend()
plt.show()
See also
--------
savitzky_golay, np.hamming, np.hanning, np.bartlett, np.blackman,
scipy.signal.get_window
Code
----
http://wiki.scipy.org/Cookbook/SignalSmooth
More information
----------------
TODO: the window parameter could be the window itself if an array instead
of a string
NOTE: length(output) != length(input), to correct this:
"""
## 0. Check inputs
type0 = ['flat']
type1 = ['hamming', 'hanning', 'bartlett', 'blackman']
type2 = ['triang', 'flattop', 'parzen', 'bohman',
'blackmanharris', 'nuttall', 'barthann']
type3 = ['kaiser', 'gaussian', 'slepian', 'chebwin']
type4 = ['general_gaussian']
type5 = ['alpha_trim_window']
type6 = ['median_window', 'snn_1d']
if x.ndim != 1:
raise ValueError("Smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in type0 + type1 + type2 + type3 + type4 + type5 + type6:
raise ValueError("Window is on of the possible values.")
if window in type3 and len(args) <= 0:
raise ValueError("Window selected needs an extra parameter.")
## 1. Creation of the window
if window in type0: # moving average
w = np.ones(window_len, 'd')
elif window in type1:
w = eval('np.'+window+'(window_len)')
elif window in type2:
inputs = "'"+window+"'"
w = eval('signal.get_window('+inputs+',window_len)')
elif window in type3:
inputs = "('"+window+"',"+str(args[0])+")"
w = eval('signal.get_window('+inputs+',window_len)')
elif window in type4:
inputs = "('"+window+"',"+str(args[0])+','+str(args[1])+")"
w = eval('signal.get_window('+inputs+',window_len)')
## 2. Convolution
s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
if window in type5:
y = eval(window+'(s, args[0])')
y = y[window_len/2:len(y)-window_len/2]
elif window in type6:
y = eval(window+'(s)')
y = y[window_len/2:len(y)-window_len/2]
else:
y = np.convolve(w/w.sum(), s, mode='valid')
## 3. Format output: Same shape as input
if window_len % 2:
y = y[(window_len/2):-(window_len/2)]
else:
y = y[(window_len/2 - 1):-(window_len/2)]
return y
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv : int
the order of the derivative to compute
(default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
See also
--------
smooth_weigthed_MA
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
Code
----
http://nbviewer.ipython.org/github/pv/
SciPy-CookBook/blob/master/ipython/SavitzkyGolay.ipynb
"""
from math import factorial
## 0. Control of input and setting needed variables
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError: # , msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size - 1) // 2
## 1. Precompute coefficients
b = np.mat([[k**i for i in order_range]
for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
## 2. Pad the signal at the extremes with values taken from the signal
firstvals = y[0] - np.abs(y[1:half_window+1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
## 3. Convolve
ys = np.convolve(m[::-1], y, mode='valid')
return ys
############################# auxiliar functions ##############################
###############################################################################
from scipy.stats import tmean, scoreatpercentile
def trim_mean(arr, proportion):
"""
"""
#TODO: windowing (window len) and avoid error try:
# except: np.sort(p)[window_len/2]
percent = proportion*100.
lower_lim = scoreatpercentile(arr, percent/2)
upper_lim = scoreatpercentile(arr, 100-percent/2)
tm = tmean(arr, limits=(lower_lim, upper_lim), inclusive=(False, False))
return tm
def alpha_trim_window(window, alpha):
"""This function built a window in which weight each time in order to do
the moving average for each window. It prepares the window with the
weights in order to perform the trimmed average of the window.
When the alpha is too big we have the median filter.
Parameters
----------
window : array_like, shape (N,)
the values of the signal in a given window.
alhpa : float, in the interval [0,1]
proportion of values to be trimmed.
Returns
-------
ys : array_like, shape (N)
a {0,1}-array with the weights for the mean.
See also
--------
np.hamming, np.hanning, np.bartlett, np.blackman
Notes
-----
The trimmed average consist in exclude from the average the alpha
proportion of extreme values. Trimmed mean is a non-linear smoothing
filter, which recall the disadvantage of weighting all the points with the
same value, and it do it truncating or 'trimming' before averaging. This
filter it is not edge-preserving.
Examples
--------
>>> w = np.random.normal(0, 0.05, 10)
>>> alpha_trim_window(w, alpha=0.1)
array([1, 1, 1, 1, 0, 1, 1, 1, 1, 0])
References
----------
.. [1] Hall, M. Smooth operator: smoothing seismic horizons and attributes.
The Leading Edge 26 (1), January 2007, p16-20. doi:10.1190/1.2431821
.. [2] http://subsurfwiki.org/wiki/Smoothing_filter
"""
# calculate upper and lower limits
percent = alpha * 100.
lower_limit = scoreatpercentile(window, percent/2)
upper_limit = scoreatpercentile(window, 100-percent/2)
# Extract logical vector
w = np.logical_and(window >= lower_limit, window <= upper_limit)
w = window.astype(int)
if np.sum(w) == 0:
w[window.argsort()[window.shape[0]/2]] = 1
window = w
return window
def median_window(window):
w = np.zeros(window.shape)
w[window.argsort()[window.shape[0]/2]] = 1
return w
def snn_1d(window):
window_len = window.shape[0]
w = np.zeros(window_len)
center = window_len/2
w[center] = 1
w[window_len - center - 1] = 1
value_center = np.mean((w*window)[w*window != 0])
for i in range(window_len/2):
res_a = abs(window[i] - value_center)
res_o = abs(window[window_len-1-i] - value_center)
if res_a <= res_o:
w[i] = 1
elif res_o <= res_a:
w[window_len-1-i] = 1
return w
########################### discrete functions ################################
###############################################################################
def collapser(regimes, reference, collapse_info):
"""General functions which performs filtering/smoothing in discrete
time-series functions collapsing values to a concrete time points.
Parameters
----------
regimes: array_like, shape (N, M)
signals in which are represented some regimes. This regimes are usually
asigned to integer values.
reference: int or float
the value considered reference regime.
collapse_info: dict, str, function and list
the information of how collapse each possible regime of each time-serie
of each element of the system.
Returns
-------
event_ts: array_like, shape (N, M)
the signals with the same shape as regimes but now with the collapse
elements that are not the reference regime.
"""
## 0. Creation of needed variables
values = np.unique(regimes)
M = regimes.shape[1]
# A collapsing for each value
if type(collapse_info) == dict:
assert np.all([c in values for c in collapse_info.keys()])
# dictionary creation
aux = {}
for c in collapse_info.keys():
aux[c] = lambda x: general_collapser_func(x, collapse_info[c])
# list of dicts creation
collapse = [aux for i in range(M)]
# A precoded transformation for all elements and values
elif type(collapse_info) == str:
aux_0 = lambda x: general_collapser_func(x, method=collapse_info)
# dictionary creation
aux = {}
for v in values:
aux[v] = aux_0
# list of dicts creation
collapse = [aux for i in range(M)]
# A personal transformation for all elements and values
elif type(collapse_info).__name__ == 'function':
aux_0 = collapse_info
# dictionary creation
aux = {}
for v in values:
aux[v] = aux_0
# list of dicts creation
collapse = [aux for i in range(M)]
# A transformation for each element and possibly each value
elif type(collapse_info) == list:
# list of dicts creation
aux = []
for coll in collapse_info:
if type(coll) == dict:
assert np.all([c in values for c in coll.keys()])
# dictionary creation
aux_d = {}
for c in coll.keys():
aux_d[c] = lambda x: general_collapser_func(x, coll[c])
# list of dicts appending
aux.append(aux_d)
elif type(coll) == str:
aux_d = {}
for v in values:
aux_d[v] = lambda x: general_collapser_func(x, coll)
aux.append(aux_d)
elif type(coll).__name__ == 'function':
aux_d = {}
for v in values:
aux_d[v] = coll
aux.append(aux_d)
collapse = aux
## 1. Collapsing process
event_ts = reference*np.ones(regimes.shape)
for i in range(M):
for val in values:
# Compute vector of changes in regime.
APbool = (regimes[:, i] == val).astype(int)
# Collapsing to
APindices = collapse[i][val](APbool)
# Inputation the result
event_ts[APindices-1, i] = val
return event_ts
def general_collapser_func(APbool, method):
"""Specific function which performs the collapsing. It is called by the
collapser. It is used over boolean masks.
Parameters
----------
APbool: array_like boolean, shape(N,)
boolean mask over the regime we are interested in collapse.
method: str or function
method to collapse regarding the available information.
Returns
-------
APindices: array_like
the integer number of the indices of this 1d array that the values are
collapsed to.
"""
## 1. Preparing for collapsing
# Obtaining ups and downs
diffe = np.diff(APbool.astype(int), axis=0)
ups = np.where(diffe == 1)[0] + 1
downs = np.where(diffe == -1)[0] + 1
# Correcting the borders
if diffe[np.where(diffe)[0][-1]] == 1:
downs = np.hstack([downs, np.array([APbool.shape[0]])])
if diffe[np.where(diffe)[0][0]] == -1:
ups = np.hstack([np.array([0]), ups])
# Ranges in which there are changes
ranges = np.vstack([ups, downs]).T
assert(len(ranges.shape) == 2)
## 2. Collpase process
# Preparing for collapsing
if type(method).__name__ == 'function':
f = method
method = 'personal'
# Select the function and apply
if method == 'center':
APindices = ranges.mean(axis=1).round().astype(int)
elif method == 'initial':
APindices = ranges.min(axis=1).round().astype(int)
elif method == 'final':
APindices = ranges.max(axis=1).round().astype(int)
elif method == 'personal':
APindices = f(ranges)
return APindices
def substitution(X, subs={}):
"""This function is used to substitute values of the signals for others.
Parameters
----------
X: array_like
signals of the system.
subs: dict
the values of the time series that you want to substitute as keys and
the values for which you want to substitute as values of the dict.
Returns
-------
X: array_like
the initial with substituted values.
"""
# Substitution
for val in subs.keys():
indexs = np.where(X == val)
X[indexs] = subs[val]
return X
def general_reweighting(Y, method, kwargs={}):
"""The general reweighting methods to change the values of the time series
depending of the global value of the system. It could be used in spiking
systems in which we want to weight more the activity of spiking alone than
the ones which spike alltogether.
Parameters
----------
Y: array_like, shape (N, M)
the signals of the system.
method: str, optional or function
the method selected. If it is a str, we choose a precoded method. If it
is a function we apply this non-precoded in-module function.
kwargs: dict
variables needed for the choosen method.
"""
if method == 'power_sutera':
Ys = power_sutera_reweighing(Y)
elif type(method).__name__ == 'function':
Ys = method(Y, **kwargs)
return Ys
def power_sutera_reweighing(Y, f_pow=lambda x: 1):
"""Re-weights the time series giving more value to the values of the time
serie when there are a low global activity.
References
---------
.. [1] Antonio Sutera et al. Simple connectome inference from partial
correlation statistics in calcium imaging
"""
## 0. Prepare variables needed
m = Y.shape[1]
global_y = np.sum(Y, axis=1)
## 1. Transformation
Yt = np.zeros(Y.shape)
for j in range(m):
Yt[:, j] = np.power((Y[:, j] + 1.),
np.power((1.+np.divide(1., global_y)),
f_pow(global_y)))
# Correct global 0
Yt[global_y == 0, :] = 1.
return Yt
|
tgquintela/TimeSeriesTools
|
TimeSeriesTools/Transformation/filtering.py
|
Python
|
mit
| 25,860
|
[
"Gaussian"
] |
ff412af14a93572812377d2a737e65eec4561ccfa9f66f46565f410817f8fff8
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Alex Roitman
"Rebuild reference map tables"
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".RebuildRefMap")
#-------------------------------------------------------------------------
#
# gtk modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gui.plug import tool
from gramps.gui.dialog import OkDialog
#-------------------------------------------------------------------------
#
# runTool
#
#-------------------------------------------------------------------------
class RebuildRefMap(tool.Tool):
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
tool.Tool.__init__(self, dbstate, options_class, name)
if self.db.readonly:
return
self.db.disable_signals()
if uistate:
self.callback = uistate.pulse_progressbar
uistate.set_busy_cursor(True)
uistate.progress.show()
uistate.push_message(dbstate, _("Rebuilding reference maps..."))
else:
self.callback = None
print(_("Rebuilding reference maps..."))
self.db.reindex_reference_map(self.callback)
if uistate:
uistate.set_busy_cursor(False)
uistate.progress.hide()
OkDialog(_("Reference maps rebuilt"),
_('All reference maps have been rebuilt.'),
parent=uistate.window)
else:
print(_("All reference maps have been rebuilt."))
self.db.enable_signals()
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class RebuildRefMapOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name,person_id=None):
tool.ToolOptions.__init__(self, name,person_id)
|
SNoiraud/gramps
|
gramps/plugins/tool/rebuildrefmap.py
|
Python
|
gpl-2.0
| 3,424
|
[
"Brian"
] |
c9ca6b486c13181ca089bcc8442c2e4b32abdb22b14c2efb48a3158b61a8872a
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
out_path):
from genomicode import config
from genomicode import filelib
from genomicode import parallel
from genomicode import alignlib
bwa = filelib.which_assert(config.bwa)
ref = alignlib.standardize_reference_genome(
in_data.identifier, out_path, use_symlinks=True)
# bwa index <out_stem.fa>
# Makes files:
# <out_stem>.fa.amb .ann .bwt .pac .sa
sq = parallel.quote
cmd = [
sq(bwa),
"index",
sq(ref.fasta_file_full),
]
parallel.sshell(cmd, path=out_path)
# Make sure the indexing worked properly.
EXTENSIONS = [".amb", ".ann", ".bwt", ".pac", ".sa"]
for ext in EXTENSIONS:
f = "%s%s" % (ref.fasta_file_full, ext)
assert filelib.exists_nz(f), "Missing: %s" % f
def name_outfile(self, antecedents, user_options):
return "reference.bwa"
|
jefftc/changlab
|
Betsy/Betsy/modules/index_reference_bwa.py
|
Python
|
mit
| 1,180
|
[
"BWA"
] |
567f77c10ca57ed927243bb297cc69bca3df785bf482fb69a528a2ef820c280c
|
#!/usr/bin/env python
import os
import sys
import pandas as pd
import numpy as np
import netCDF4 as nc
#import tables
# adjust the paths here, where your files are located
wfd_base_dir = "/data/external/global/Climate/WFD"
lpj_soil_data = "/data/LPJ/input/soils_lpj.dat"
# spatial resolution in degree
res = 0.5
# global longitudes/latitudes;
# lon from west to east
# lat from north to south
out_lat = np.arange(90 - res/2, -90 - res/2, -res)
out_lon = np.arange(-180 + res/2, 180 + res/2, res)
# initialize the input. Dont't close the netCDF file,
# data is read, when it is used and not immediately!
# read the WFD land surface file
wfd_ncin = nc.Dataset(os.path.join(wfd_base_dir, 'WFD-land-lat-long-z.nc'), 'r')
wfd_land_id = wfd_ncin.variables["land"]
wfd_lat = wfd_ncin.variables["Latitude"]
wfd_lon = wfd_ncin.variables["Longitude"]
wfd_z = wfd_ncin.variables["Z"]
wfd_lat_id = (max(out_lat[:]) - wfd_lat[:]) * 2
wfd_lon_id = (wfd_lon[:] - min(out_lon[:])) * 2
wfd_lat_id = np.array(wfd_lat_id.round(0), 'i')
wfd_lon_id = np.array(wfd_lon_id.round(0), 'i')
# convert to 2D
wfd_z_2d = np.zeros((len(out_lat), len(out_lon)) , "f") - 999.9
wfd_z_2d[wfd_lat_id, wfd_lon_id] = wfd_z[:]
# read the LPJ soil data
lpj_soil = pd.io.parsers.read_csv(lpj_soil_data,
skipinitialspace=True, sep=" ",
header=None, names=["lon", "lat", "type"],
dtype={'lon':np.float32,'lat':np.float32,'type':np.int32})
# lpj coordinates are at the south-western edge of a gridcell
lpj_soil.lon += res/2.0
lpj_soil.lat += res/2.0
lpj_lat_id = (max(out_lat[:]) - lpj_soil.lat[:]) * 2
lpj_lon_id = (lpj_soil.lon[:] - min(out_lon[:])) * 2
lpj_lat_id = np.array(lpj_lat_id.round(0), 'i')
lpj_lon_id = np.array(lpj_lon_id.round(0), 'i')
# convert to 2D
lpj_soil_2d = np.zeros((len(out_lat), len(out_lon)) , "i")
lpj_soil_2d[lpj_lat_id, lpj_lon_id] = lpj_soil.type[:]
# WFD and WFDEI do not fully agree (figured out manually)
# set these 10 locations to 0
lon_exclude = np.array([-106.25, 49.75, 49.25, 46.25, 46.25, 45.75, 48.25, 44.75, -67.25, -67.25])
lat_exclude = np.array([ 77.25, 49.25, 48.75, 47.25, 46.75, 45.25, 40.25, 1.25, -49.25, -49.75])
for i in range(len(lon_exclude)):
lpj_soil_2d[(np.max(out_lat) - lat_exclude[i]) * 2, (lon_exclude[i] - np.min(out_lon)) * 2] = 0
# take the smallest overlap of WFD and LPJ soil data
lpj_soil_2d[wfd_z_2d == -999.9] = 0
wfd_z_2d[lpj_soil_2d == 0] = -999.9
# create a new index (count along longitude and from north to south)
index_2d = np.arange(1, len(out_lat) * len(out_lon) + 1).reshape(len(out_lat), len(out_lon))
index_2d = np.where(lpj_soil_2d == 0, -1, index_2d)
out_index = range( len(index_2d[index_2d != -1]) )
index_2d[index_2d != -1] = out_index[:]
# create 2D longitude and latitude field
lon_2d = [out_lon] * len(out_lat)
lon_2d = np.array(lon_2d)
lat_2d = [out_lat] * len(out_lon)
lat_2d = np.array(lat_2d)
lat_2d = lat_2d[:].transpose()
#########################
### write the 2D data
#########################
ncout = nc.Dataset(os.path.join(wfd_base_dir, 'LPJ', 'spatial_2D.nc'), 'w')
ncout_dim_lat = ncout.createDimension('lat', len(out_lat))
ncout_dim_lon = ncout.createDimension('lon', len(out_lon))
ncout_lat = ncout.createVariable('lat', 'f4', ('lat'))
ncout_lat.standard_name = "latitude"
ncout_lat.long_name = "latitude"
ncout_lat.units = "degrees_north"
ncout_lon = ncout.createVariable('lon', 'f4', ('lon'))
ncout_lon.standard_name = "longitude"
ncout_lon.long_name = "longitude"
ncout_lon.units = "degrees_east"
ncout_z = ncout.createVariable('altitude', 'f4', ('lat', 'lon'), fill_value=-999.9)
ncout_z.long_name = "altitude"
ncout_z.units = "m"
ncout_z.missing_value = -999.9
ncout_soil = ncout.createVariable('soiltype', 'i', ('lat', 'lon'), fill_value=0)
ncout_soil.long_name = "LPJ soilcode"
ncout_soil.units = "-"
ncout_soil.missing_value = 0
ncout_index_2d = ncout.createVariable('index', 'i', ('lat', 'lon'), fill_value=-1)
ncout_index_2d.long_name = "index of gridcell"
ncout_index_2d.units = "-"
ncout_index_2d.missing_value = -1
ncout_lat[:] = out_lat
ncout_lon[:] = out_lon
ncout_z[:] = wfd_z_2d[:]
ncout_soil[:] = lpj_soil_2d[:]
ncout_index_2d[:] = index_2d[:]
ncout_lon_2d = ncout.createVariable('lon_2d', 'f4', ('lat', 'lon'), fill_value=-999.9)
ncout_lat_2d = ncout.createVariable('lat_2d', 'f4', ('lat', 'lon'), fill_value=-999.9)
ncout_lon_2d[:] = lon_2d
ncout_lat_2d[:] = lat_2d
ncout.close()
out_lon_1d = lon_2d[index_2d != -1]
out_lat_1d = lat_2d[index_2d != -1]
out_soil = lpj_soil_2d[index_2d != -1]
out_index = index_2d[index_2d != -1]
# write the gridlist file for LPJ input
fout = open(os.path.join(wfd_base_dir, 'LPJ', "gridlist_cf.txt"), 'w')
for i in range(len(out_index)):
fout.write("%7.2f %7.2f\n" % (out_lon_1d[i], out_lat_1d[i]))
fout.close()
fout = open(os.path.join(wfd_base_dir, 'LPJ', "landid_cf.txt"), 'w')
for i in range(len(out_index)):
fout.write("%i\n" % (out_index[i]))
fout.close()
#######################################################
### create the NetCDF files for each input valiable
#######################################################
for v in ['Rainf', 'Tair', 'SWdown']:
if (v == "Rainf"):
ncout = nc.Dataset(os.path.join(wfd_base_dir, 'LPJ', 'prec.nc'), 'w')
elif (v == "Tair"):
ncout = nc.Dataset(os.path.join(wfd_base_dir, 'LPJ', 'temp.nc'), 'w')
elif (v == "SWdown"):
ncout = nc.Dataset(os.path.join(wfd_base_dir, 'LPJ', 'insol.nc'), 'w')
ncout.Conventions = "CF-1.4"
ncout_dim_id = ncout.createDimension('land_id', len(out_index))
ncout_dim_time = ncout.createDimension('time', None)
ncout_id = ncout.createVariable('land_id', 'i', ('land_id',))
ncout_id[:] = out_index
ncout_time = ncout.createVariable('time', 'i', ('time',))
ncout_time.standard_name = "time"
ncout_time.long_name = "time"
ncout_time.units = "days since 1901-01-01 00:00:00"
ncout_time.calendar = "365_day"
ncout_lon = ncout.createVariable('lon', 'f', ('land_id',))
ncout_lon.standard_name = "longitude"
ncout_lon.long_name = "longitude"
ncout_lon.units = "degrees_east"
ncout_lat = ncout.createVariable('lat', 'f', ('land_id',))
ncout_lat.standard_name = "latitude"
ncout_lat.long_name = "latitude"
ncout_lat.units = "degrees_north"
ncout_soil = ncout.createVariable('soil', 'f', ('land_id',))
ncout_soil.long_name = "LPJ soilcode"
ncout_soil.units = "-"
ncout_lon[:] = out_lon_1d
ncout_lat[:] = out_lat_1d
ncout_soil[:] = out_soil
##############################################
### read and write the compressed WFD data
##############################################
if (v == "Rainf"):
ncout_clim = ncout.createVariable('prec', 'f', ('time', 'land_id',))
ncout_clim.standard_name = "precipitation_amount"
ncout_clim.long_name = "Daily precipitation amount"
ncout_clim.units = "kg m-2"
elif (v == "Tair"):
ncout_clim = ncout.createVariable('temp', 'f', ('time', 'land_id',))
ncout_clim.standard_name = "air_temperature"
ncout_clim.long_name = "Near surface air temperature at 2m"
ncout_clim.units = "K"
elif (v == "SWdown"):
ncout_clim = ncout.createVariable('insol', 'f', ('time', 'land_id',))
ncout_clim.standard_name = "surface_downwelling_shortwave_flux"
ncout_clim.long_name = "Mean daily surface incident shortwave radiation"
ncout_clim.units = "W m-2"
ncout_clim.coordinates = "lat lon"
days = 0
for y in range(1901, 1979):
print(v, y)
for m in range(1, 13):
if (v == "Rainf"):
in_file = "%s_daily_WFD_GPCC_%04i%02i.nc" % (v, y, m)
else:
in_file = "%s_daily_WFD_%04i%02i.nc" % (v, y, m)
in_file = os.path.join(wfd_base_dir, v, in_file)
clim_ncin = nc.Dataset(in_file, 'r')
clim_land_id = clim_ncin.variables["land"]
clim = clim_ncin.variables[v]
if (v=="Rainf"):
clim = clim[:] * 86400.0
else:
clim = clim[:]
if (clim.shape[0] == 29):
clim = clim[0:28,:]
if (sum(clim_land_id[:] == wfd_land_id[:]) != len(clim_land_id)):
print("shape of ID not matching in '%s'" % in_file)
sys.exit(99)
clim_2d = np.zeros((len(out_lat), len(out_lon)) , "f") - 999.9
for d in np.arange(clim.shape[0]):
clim_2d[wfd_lat_id, wfd_lon_id] = clim[d,:]
ncout_clim[days,:] = np.array(clim_2d[index_2d != -1], 'f')
days += 1
ncout.sync()
##############################################
### read and write the WFDEI data
##############################################
for y in range(1979, 2001):
print(v, y)
for m in range(1, 13):
if (v == "Rainf"):
in_file = "%s_daily_WFDEI_GPCC_%04i%02i.nc" % (v, y, m)
else:
in_file = "%s_daily_WFDEI_%04i%02i.nc" % (v, y, m)
in_file = os.path.join(wfd_base_dir, "WFDEI", v, in_file)
clim_ncin = nc.Dataset(in_file, 'r')
clim = clim_ncin.variables[v]
if (v=="Rainf"):
clim = clim[:] * 86400.0
else:
clim = clim[:]
if (clim.shape[0] == 29):
clim = clim[0:28,:,:]
for d in np.arange(clim.shape[0]):
clim_2d = clim[d,:,:]
clim_2d[clim_2d>1.e19] = -999.9
clim_2d = np.flipud(clim_2d)
clim_out = np.array(clim_2d[index_2d != -1], 'f')
ncout_clim[days,:] = clim_out
days += 1
ncout.sync()
##############################################
### write the time var and close
##############################################
ncout_time[:] = np.arange(days) + 1
ncout.close()
|
joergsteinkamp/wfd4lpj
|
convert4lpj.py
|
Python
|
gpl-3.0
| 10,152
|
[
"NetCDF"
] |
b3f4234774be9b363af7ac4776cfcb20a4eb87fb89f8fdcb78a42ac571986e75
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def waterline_time(zheights, diam, length,s,sampling):
t_total = time.time()
for zh in zheights:
cutter = ocl.BallCutter( diam , length )
wl = ocl.Waterline()
wl.setSTL(s)
wl.setCutter(cutter)
wl.setZ(zh)
wl.setSampling(sampling)
wl.setThreads(1)
wl.run()
cutter_loops = wl.getLoops()
for l in cutter_loops:
loops.append(l)
timeTotal = time.time()-t_total
print(" ALL Waterlines done in ", timeTotal ," s")
return timeTotal
if __name__ == "__main__":
print(ocl.version())
a = ocl.Point(0,1,0.3)
b = ocl.Point(1,0.5,0.3)
c = ocl.Point(0,0,0)
t = ocl.Triangle(b,c,a)
s = ocl.STLSurf()
s.addTriangle(t) # a one-triangle STLSurf
# alternatively, run on the tux model
stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
#myscreen.addActor(stl)
#stl.SetWireframe() # render tux as wireframe
#stl.SetSurface() # render tux as surface
#stl.SetColor(camvtk.cyan)
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
zheights=[-0.3, -0.2, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.26, 0.27, 0.28, 0.29 ] # the z-coordinates for the waterlines
zheights=[-0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, 0.28 ]
zheights=[ -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, 0.28]
zheights=[ 1.75145 ]
diam = 0.6 # run the thing for all these cutter diameters
length = 5
loops = []
cutter = ocl.CylCutter( 1 , 1 )
sampling=0.005
waterline_time(zheights, diam, length,s,sampling)
|
aewallin/opencamlib
|
examples/python/waterline/waterline_6_weave2.py
|
Python
|
lgpl-2.1
| 1,756
|
[
"VTK"
] |
b20b0bf08a1dd6313be49926293da8327137d840ac7e96f2eccefc9271b7dbf5
|
"""
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links.
See <https://pythonhosted.org/Markdown/extensions/wikilinks.html>
for documentation.
Original code Copyright [Waylan Limberg](http://achinghead.com/).
All changes Copyright The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from markdown import Extension
from markdown.inlinepatterns import Pattern
import re
from abimkdocs.website import Website
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
#print(clean_label)
variables = website.variables_code["abinit"]
if clean_label in variables:
var = variables[clean_label]
return "/input_variables/%s/#%s" % (var.varset, var.name)
if clean_label in website.bib_data.entries:
return "/bibliography/#%s" % clean_label
return '%s%s%s' % (base, clean_label, end)
class WikiLinkExtension(Extension):
def __init__(self, *args, **kwargs):
self.config = {
'base_url': ['/', 'String to append to beginning or URL.'],
'end_url': ['/', 'String to append to end of URL.'],
'html_class': ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url': [build_url, 'Callable formats URL from label.'],
}
super(WikiLinkExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
self.md = md
# append to end of inline patterns
wikilinkPattern = WikiLinks(Website.WIKILINK_RE, self.getConfigs())
wikilinkPattern.md = md
#md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
# This needed to treat [[ngfft]](1:3) before []() markdown syntax
md.inlinePatterns.add('wikilink', wikilinkPattern, "<link")
class WikiLinks(Pattern):
def __init__(self, pattern, config):
super(WikiLinks, self).__init__(pattern)
self.config = config
def handleMatch(self, m):
token = m.group(2)
#base_url, end_url, html_class = self._getMeta()
#url = self.config['build_url'](token, base_url, end_url)
#page_rpath = "??"
#if hasattr(self.md, 'Meta') and "rpath" in self.md.Meta:
# page_rpath = self.md.Meta["rpath"][0]
# Remove quotes (neeeded in py2.7 because mkdocs does not use pyyaml to parse meta).
#if "authors" in self.md.Meta:
# print("authors", self.md.Meta["authors"])
website = Website.get()
page_rpath = None
try:
#print("meta_rpath:", self.md.Meta["rpath"][0])
page_rpath = self.md.Meta["rpath"][0].replace("'", "").replace('"', "")
return website.get_wikilink(token, page_rpath)
except Exception as exc:
#print("Meta", self.md.Meta)
website.warn("Exception `%s:%s`\nwhile treating wikilink token: `%s` in `%s`" %
(exc.__class__, str(exc), token, page_rpath))
return ""
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url']
end_url = self.config['end_url']
html_class = self.config['html_class']
if hasattr(self.md, 'Meta'):
if 'wiki_base_url' in self.md.Meta:
base_url = self.md.Meta['wiki_base_url'][0]
if 'wiki_end_url' in self.md.Meta:
end_url = self.md.Meta['wiki_end_url'][0]
if 'wiki_html_class' in self.md.Meta:
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(*args, **kwargs):
return WikiLinkExtension(*args, **kwargs)
|
abinit/abinit
|
abimkdocs/wikilinks.py
|
Python
|
gpl-3.0
| 3,827
|
[
"ABINIT"
] |
63efa69b3808c10586054775017f88b1b84a5ac5b8a777465e7d28eb04623461
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.securitycenter_v1.services.security_center import (
SecurityCenterAsyncClient,
)
from google.cloud.securitycenter_v1.services.security_center import SecurityCenterClient
from google.cloud.securitycenter_v1.services.security_center import pagers
from google.cloud.securitycenter_v1.services.security_center import transports
from google.cloud.securitycenter_v1.types import access
from google.cloud.securitycenter_v1.types import bigquery_export
from google.cloud.securitycenter_v1.types import external_system
from google.cloud.securitycenter_v1.types import external_system as gcs_external_system
from google.cloud.securitycenter_v1.types import finding
from google.cloud.securitycenter_v1.types import finding as gcs_finding
from google.cloud.securitycenter_v1.types import indicator
from google.cloud.securitycenter_v1.types import mitre_attack
from google.cloud.securitycenter_v1.types import mute_config
from google.cloud.securitycenter_v1.types import mute_config as gcs_mute_config
from google.cloud.securitycenter_v1.types import notification_config
from google.cloud.securitycenter_v1.types import (
notification_config as gcs_notification_config,
)
from google.cloud.securitycenter_v1.types import organization_settings
from google.cloud.securitycenter_v1.types import (
organization_settings as gcs_organization_settings,
)
from google.cloud.securitycenter_v1.types import run_asset_discovery_response
from google.cloud.securitycenter_v1.types import security_marks
from google.cloud.securitycenter_v1.types import security_marks as gcs_security_marks
from google.cloud.securitycenter_v1.types import securitycenter_service
from google.cloud.securitycenter_v1.types import source
from google.cloud.securitycenter_v1.types import source as gcs_source
from google.cloud.securitycenter_v1.types import vulnerability
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import options_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import expr_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SecurityCenterClient._get_default_mtls_endpoint(None) is None
assert (
SecurityCenterClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
SecurityCenterClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
SecurityCenterClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
SecurityCenterClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
SecurityCenterClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [SecurityCenterClient, SecurityCenterAsyncClient,]
)
def test_security_center_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "securitycenter.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.SecurityCenterGrpcTransport, "grpc"),
(transports.SecurityCenterGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_security_center_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [SecurityCenterClient, SecurityCenterAsyncClient,]
)
def test_security_center_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "securitycenter.googleapis.com:443"
def test_security_center_client_get_transport_class():
transport = SecurityCenterClient.get_transport_class()
available_transports = [
transports.SecurityCenterGrpcTransport,
]
assert transport in available_transports
transport = SecurityCenterClient.get_transport_class("grpc")
assert transport == transports.SecurityCenterGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc"),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
SecurityCenterClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterClient),
)
@mock.patch.object(
SecurityCenterAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterAsyncClient),
)
def test_security_center_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SecurityCenterClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SecurityCenterClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc", "true"),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc", "false"),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
SecurityCenterClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterClient),
)
@mock.patch.object(
SecurityCenterAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_security_center_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [SecurityCenterClient, SecurityCenterAsyncClient]
)
@mock.patch.object(
SecurityCenterClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterClient),
)
@mock.patch.object(
SecurityCenterAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SecurityCenterAsyncClient),
)
def test_security_center_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(SecurityCenterClient, transports.SecurityCenterGrpcTransport, "grpc"),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_security_center_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
SecurityCenterClient,
transports.SecurityCenterGrpcTransport,
"grpc",
grpc_helpers,
),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_security_center_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_security_center_client_client_options_from_dict():
with mock.patch(
"google.cloud.securitycenter_v1.services.security_center.transports.SecurityCenterGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = SecurityCenterClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
SecurityCenterClient,
transports.SecurityCenterGrpcTransport,
"grpc",
grpc_helpers,
),
(
SecurityCenterAsyncClient,
transports.SecurityCenterGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_security_center_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"securitycenter.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="securitycenter.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.BulkMuteFindingsRequest, dict,]
)
def test_bulk_mute_findings(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.bulk_mute_findings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.bulk_mute_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.BulkMuteFindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_bulk_mute_findings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.bulk_mute_findings), "__call__"
) as call:
client.bulk_mute_findings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.BulkMuteFindingsRequest()
@pytest.mark.asyncio
async def test_bulk_mute_findings_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.BulkMuteFindingsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.bulk_mute_findings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.bulk_mute_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.BulkMuteFindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_bulk_mute_findings_async_from_dict():
await test_bulk_mute_findings_async(request_type=dict)
def test_bulk_mute_findings_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.BulkMuteFindingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.bulk_mute_findings), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.bulk_mute_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_bulk_mute_findings_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.BulkMuteFindingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.bulk_mute_findings), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.bulk_mute_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_bulk_mute_findings_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.bulk_mute_findings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.bulk_mute_findings(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_bulk_mute_findings_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.bulk_mute_findings(
securitycenter_service.BulkMuteFindingsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_bulk_mute_findings_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.bulk_mute_findings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.bulk_mute_findings(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_bulk_mute_findings_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.bulk_mute_findings(
securitycenter_service.BulkMuteFindingsRequest(), parent="parent_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.CreateSourceRequest, dict,]
)
def test_create_source(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
canonical_name="canonical_name_value",
)
response = client.create_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.canonical_name == "canonical_name_value"
def test_create_source_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
client.create_source()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateSourceRequest()
@pytest.mark.asyncio
async def test_create_source_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.CreateSourceRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
canonical_name="canonical_name_value",
)
)
response = await client.create_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.canonical_name == "canonical_name_value"
@pytest.mark.asyncio
async def test_create_source_async_from_dict():
await test_create_source_async(request_type=dict)
def test_create_source_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateSourceRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
call.return_value = gcs_source.Source()
client.create_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_source_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateSourceRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source())
await client.create_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_source_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_source(
parent="parent_value", source=gcs_source.Source(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].source
mock_val = gcs_source.Source(name="name_value")
assert arg == mock_val
def test_create_source_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_source(
securitycenter_service.CreateSourceRequest(),
parent="parent_value",
source=gcs_source.Source(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_source_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_source(
parent="parent_value", source=gcs_source.Source(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].source
mock_val = gcs_source.Source(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_source_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_source(
securitycenter_service.CreateSourceRequest(),
parent="parent_value",
source=gcs_source.Source(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.CreateFindingRequest, dict,]
)
def test_create_finding(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=gcs_finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
severity=gcs_finding.Finding.Severity.CRITICAL,
canonical_name="canonical_name_value",
mute=gcs_finding.Finding.Mute.MUTED,
finding_class=gcs_finding.Finding.FindingClass.THREAT,
mute_initiator="mute_initiator_value",
)
response = client.create_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateFindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == gcs_finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
assert response.severity == gcs_finding.Finding.Severity.CRITICAL
assert response.canonical_name == "canonical_name_value"
assert response.mute == gcs_finding.Finding.Mute.MUTED
assert response.finding_class == gcs_finding.Finding.FindingClass.THREAT
assert response.mute_initiator == "mute_initiator_value"
def test_create_finding_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
client.create_finding()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateFindingRequest()
@pytest.mark.asyncio
async def test_create_finding_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.CreateFindingRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=gcs_finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
severity=gcs_finding.Finding.Severity.CRITICAL,
canonical_name="canonical_name_value",
mute=gcs_finding.Finding.Mute.MUTED,
finding_class=gcs_finding.Finding.FindingClass.THREAT,
mute_initiator="mute_initiator_value",
)
)
response = await client.create_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateFindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == gcs_finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
assert response.severity == gcs_finding.Finding.Severity.CRITICAL
assert response.canonical_name == "canonical_name_value"
assert response.mute == gcs_finding.Finding.Mute.MUTED
assert response.finding_class == gcs_finding.Finding.FindingClass.THREAT
assert response.mute_initiator == "mute_initiator_value"
@pytest.mark.asyncio
async def test_create_finding_async_from_dict():
await test_create_finding_async(request_type=dict)
def test_create_finding_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateFindingRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
call.return_value = gcs_finding.Finding()
client.create_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_finding_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateFindingRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding())
await client.create_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_finding_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_finding(
parent="parent_value",
finding_id="finding_id_value",
finding=gcs_finding.Finding(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].finding_id
mock_val = "finding_id_value"
assert arg == mock_val
arg = args[0].finding
mock_val = gcs_finding.Finding(name="name_value")
assert arg == mock_val
def test_create_finding_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_finding(
securitycenter_service.CreateFindingRequest(),
parent="parent_value",
finding_id="finding_id_value",
finding=gcs_finding.Finding(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_finding_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_finding(
parent="parent_value",
finding_id="finding_id_value",
finding=gcs_finding.Finding(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].finding_id
mock_val = "finding_id_value"
assert arg == mock_val
arg = args[0].finding
mock_val = gcs_finding.Finding(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_finding_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_finding(
securitycenter_service.CreateFindingRequest(),
parent="parent_value",
finding_id="finding_id_value",
finding=gcs_finding.Finding(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.CreateMuteConfigRequest, dict,]
)
def test_create_mute_config(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_mute_config.MuteConfig(
name="name_value",
display_name="display_name_value",
description="description_value",
filter="filter_value",
most_recent_editor="most_recent_editor_value",
)
response = client.create_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateMuteConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_mute_config.MuteConfig)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.most_recent_editor == "most_recent_editor_value"
def test_create_mute_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_mute_config), "__call__"
) as call:
client.create_mute_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateMuteConfigRequest()
@pytest.mark.asyncio
async def test_create_mute_config_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.CreateMuteConfigRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_mute_config.MuteConfig(
name="name_value",
display_name="display_name_value",
description="description_value",
filter="filter_value",
most_recent_editor="most_recent_editor_value",
)
)
response = await client.create_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateMuteConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_mute_config.MuteConfig)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.most_recent_editor == "most_recent_editor_value"
@pytest.mark.asyncio
async def test_create_mute_config_async_from_dict():
await test_create_mute_config_async(request_type=dict)
def test_create_mute_config_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateMuteConfigRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_mute_config), "__call__"
) as call:
call.return_value = gcs_mute_config.MuteConfig()
client.create_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_mute_config_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateMuteConfigRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_mute_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_mute_config.MuteConfig()
)
await client.create_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_mute_config_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_mute_config.MuteConfig()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_mute_config(
parent="parent_value",
mute_config=gcs_mute_config.MuteConfig(name="name_value"),
mute_config_id="mute_config_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].mute_config
mock_val = gcs_mute_config.MuteConfig(name="name_value")
assert arg == mock_val
arg = args[0].mute_config_id
mock_val = "mute_config_id_value"
assert arg == mock_val
def test_create_mute_config_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_mute_config(
securitycenter_service.CreateMuteConfigRequest(),
parent="parent_value",
mute_config=gcs_mute_config.MuteConfig(name="name_value"),
mute_config_id="mute_config_id_value",
)
@pytest.mark.asyncio
async def test_create_mute_config_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_mute_config.MuteConfig()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_mute_config.MuteConfig()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_mute_config(
parent="parent_value",
mute_config=gcs_mute_config.MuteConfig(name="name_value"),
mute_config_id="mute_config_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].mute_config
mock_val = gcs_mute_config.MuteConfig(name="name_value")
assert arg == mock_val
arg = args[0].mute_config_id
mock_val = "mute_config_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_mute_config_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_mute_config(
securitycenter_service.CreateMuteConfigRequest(),
parent="parent_value",
mute_config=gcs_mute_config.MuteConfig(name="name_value"),
mute_config_id="mute_config_id_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.CreateNotificationConfigRequest, dict,]
)
def test_create_notification_config(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_notification_config.NotificationConfig(
name="name_value",
description="description_value",
pubsub_topic="pubsub_topic_value",
service_account="service_account_value",
streaming_config=gcs_notification_config.NotificationConfig.StreamingConfig(
filter="filter_value"
),
)
response = client.create_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateNotificationConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_notification_config.NotificationConfig)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.pubsub_topic == "pubsub_topic_value"
assert response.service_account == "service_account_value"
def test_create_notification_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_notification_config), "__call__"
) as call:
client.create_notification_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateNotificationConfigRequest()
@pytest.mark.asyncio
async def test_create_notification_config_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.CreateNotificationConfigRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_notification_config.NotificationConfig(
name="name_value",
description="description_value",
pubsub_topic="pubsub_topic_value",
service_account="service_account_value",
)
)
response = await client.create_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateNotificationConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_notification_config.NotificationConfig)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.pubsub_topic == "pubsub_topic_value"
assert response.service_account == "service_account_value"
@pytest.mark.asyncio
async def test_create_notification_config_async_from_dict():
await test_create_notification_config_async(request_type=dict)
def test_create_notification_config_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateNotificationConfigRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_notification_config), "__call__"
) as call:
call.return_value = gcs_notification_config.NotificationConfig()
client.create_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_notification_config_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateNotificationConfigRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_notification_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_notification_config.NotificationConfig()
)
await client.create_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_notification_config_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_notification_config.NotificationConfig()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_notification_config(
parent="parent_value",
config_id="config_id_value",
notification_config=gcs_notification_config.NotificationConfig(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].config_id
mock_val = "config_id_value"
assert arg == mock_val
arg = args[0].notification_config
mock_val = gcs_notification_config.NotificationConfig(name="name_value")
assert arg == mock_val
def test_create_notification_config_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_notification_config(
securitycenter_service.CreateNotificationConfigRequest(),
parent="parent_value",
config_id="config_id_value",
notification_config=gcs_notification_config.NotificationConfig(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_notification_config_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_notification_config.NotificationConfig()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_notification_config.NotificationConfig()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_notification_config(
parent="parent_value",
config_id="config_id_value",
notification_config=gcs_notification_config.NotificationConfig(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].config_id
mock_val = "config_id_value"
assert arg == mock_val
arg = args[0].notification_config
mock_val = gcs_notification_config.NotificationConfig(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_notification_config_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_notification_config(
securitycenter_service.CreateNotificationConfigRequest(),
parent="parent_value",
config_id="config_id_value",
notification_config=gcs_notification_config.NotificationConfig(
name="name_value"
),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.DeleteMuteConfigRequest, dict,]
)
def test_delete_mute_config(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.DeleteMuteConfigRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_mute_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_mute_config), "__call__"
) as call:
client.delete_mute_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.DeleteMuteConfigRequest()
@pytest.mark.asyncio
async def test_delete_mute_config_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.DeleteMuteConfigRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.DeleteMuteConfigRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_mute_config_async_from_dict():
await test_delete_mute_config_async(request_type=dict)
def test_delete_mute_config_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.DeleteMuteConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_mute_config), "__call__"
) as call:
call.return_value = None
client.delete_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_mute_config_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.DeleteMuteConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_mute_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_mute_config_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_mute_config(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_mute_config_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_mute_config(
securitycenter_service.DeleteMuteConfigRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_mute_config_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_mute_config(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_mute_config_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_mute_config(
securitycenter_service.DeleteMuteConfigRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.DeleteNotificationConfigRequest, dict,]
)
def test_delete_notification_config(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.DeleteNotificationConfigRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_notification_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_notification_config), "__call__"
) as call:
client.delete_notification_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.DeleteNotificationConfigRequest()
@pytest.mark.asyncio
async def test_delete_notification_config_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.DeleteNotificationConfigRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.DeleteNotificationConfigRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_notification_config_async_from_dict():
await test_delete_notification_config_async(request_type=dict)
def test_delete_notification_config_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.DeleteNotificationConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_notification_config), "__call__"
) as call:
call.return_value = None
client.delete_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_notification_config_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.DeleteNotificationConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_notification_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_notification_config_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_notification_config(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_notification_config_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_notification_config(
securitycenter_service.DeleteNotificationConfigRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_notification_config_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_notification_config(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_notification_config_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_notification_config(
securitycenter_service.DeleteNotificationConfigRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.GetBigQueryExportRequest, dict,]
)
def test_get_big_query_export(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigquery_export.BigQueryExport(
name="name_value",
description="description_value",
filter="filter_value",
dataset="dataset_value",
most_recent_editor="most_recent_editor_value",
principal="principal_value",
)
response = client.get_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetBigQueryExportRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, bigquery_export.BigQueryExport)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.dataset == "dataset_value"
assert response.most_recent_editor == "most_recent_editor_value"
assert response.principal == "principal_value"
def test_get_big_query_export_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_big_query_export), "__call__"
) as call:
client.get_big_query_export()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetBigQueryExportRequest()
@pytest.mark.asyncio
async def test_get_big_query_export_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.GetBigQueryExportRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigquery_export.BigQueryExport(
name="name_value",
description="description_value",
filter="filter_value",
dataset="dataset_value",
most_recent_editor="most_recent_editor_value",
principal="principal_value",
)
)
response = await client.get_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetBigQueryExportRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, bigquery_export.BigQueryExport)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.dataset == "dataset_value"
assert response.most_recent_editor == "most_recent_editor_value"
assert response.principal == "principal_value"
@pytest.mark.asyncio
async def test_get_big_query_export_async_from_dict():
await test_get_big_query_export_async(request_type=dict)
def test_get_big_query_export_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetBigQueryExportRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_big_query_export), "__call__"
) as call:
call.return_value = bigquery_export.BigQueryExport()
client.get_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_big_query_export_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetBigQueryExportRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_big_query_export), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigquery_export.BigQueryExport()
)
await client.get_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_big_query_export_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigquery_export.BigQueryExport()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_big_query_export(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_big_query_export_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_big_query_export(
securitycenter_service.GetBigQueryExportRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_big_query_export_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigquery_export.BigQueryExport()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigquery_export.BigQueryExport()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_big_query_export(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_big_query_export_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_big_query_export(
securitycenter_service.GetBigQueryExportRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [iam_policy_pb2.GetIamPolicyRequest, dict,])
def test_get_iam_policy(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_get_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
client.get_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
@pytest.mark.asyncio
async def test_get_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_get_iam_policy_async_from_dict():
await test_get_iam_policy_async(request_type=dict)
def test_get_iam_policy_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_get_iam_policy_from_dict_foreign():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
def test_get_iam_policy_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_get_iam_policy_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.GetMuteConfigRequest, dict,]
)
def test_get_mute_config(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_mute_config), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = mute_config.MuteConfig(
name="name_value",
display_name="display_name_value",
description="description_value",
filter="filter_value",
most_recent_editor="most_recent_editor_value",
)
response = client.get_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetMuteConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, mute_config.MuteConfig)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.most_recent_editor == "most_recent_editor_value"
def test_get_mute_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_mute_config), "__call__") as call:
client.get_mute_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetMuteConfigRequest()
@pytest.mark.asyncio
async def test_get_mute_config_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.GetMuteConfigRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_mute_config), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
mute_config.MuteConfig(
name="name_value",
display_name="display_name_value",
description="description_value",
filter="filter_value",
most_recent_editor="most_recent_editor_value",
)
)
response = await client.get_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetMuteConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, mute_config.MuteConfig)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.most_recent_editor == "most_recent_editor_value"
@pytest.mark.asyncio
async def test_get_mute_config_async_from_dict():
await test_get_mute_config_async(request_type=dict)
def test_get_mute_config_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetMuteConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_mute_config), "__call__") as call:
call.return_value = mute_config.MuteConfig()
client.get_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_mute_config_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetMuteConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_mute_config), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
mute_config.MuteConfig()
)
await client.get_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_mute_config_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_mute_config), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = mute_config.MuteConfig()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_mute_config(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_mute_config_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_mute_config(
securitycenter_service.GetMuteConfigRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_mute_config_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_mute_config), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = mute_config.MuteConfig()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
mute_config.MuteConfig()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_mute_config(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_mute_config_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_mute_config(
securitycenter_service.GetMuteConfigRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.GetNotificationConfigRequest, dict,]
)
def test_get_notification_config(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = notification_config.NotificationConfig(
name="name_value",
description="description_value",
pubsub_topic="pubsub_topic_value",
service_account="service_account_value",
streaming_config=notification_config.NotificationConfig.StreamingConfig(
filter="filter_value"
),
)
response = client.get_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetNotificationConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, notification_config.NotificationConfig)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.pubsub_topic == "pubsub_topic_value"
assert response.service_account == "service_account_value"
def test_get_notification_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_notification_config), "__call__"
) as call:
client.get_notification_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetNotificationConfigRequest()
@pytest.mark.asyncio
async def test_get_notification_config_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.GetNotificationConfigRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
notification_config.NotificationConfig(
name="name_value",
description="description_value",
pubsub_topic="pubsub_topic_value",
service_account="service_account_value",
)
)
response = await client.get_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetNotificationConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, notification_config.NotificationConfig)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.pubsub_topic == "pubsub_topic_value"
assert response.service_account == "service_account_value"
@pytest.mark.asyncio
async def test_get_notification_config_async_from_dict():
await test_get_notification_config_async(request_type=dict)
def test_get_notification_config_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetNotificationConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_notification_config), "__call__"
) as call:
call.return_value = notification_config.NotificationConfig()
client.get_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_notification_config_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetNotificationConfigRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_notification_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
notification_config.NotificationConfig()
)
await client.get_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_notification_config_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = notification_config.NotificationConfig()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_notification_config(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_notification_config_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_notification_config(
securitycenter_service.GetNotificationConfigRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_notification_config_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = notification_config.NotificationConfig()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
notification_config.NotificationConfig()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_notification_config(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_notification_config_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_notification_config(
securitycenter_service.GetNotificationConfigRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.GetOrganizationSettingsRequest, dict,]
)
def test_get_organization_settings(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = organization_settings.OrganizationSettings(
name="name_value", enable_asset_discovery=True,
)
response = client.get_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetOrganizationSettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, organization_settings.OrganizationSettings)
assert response.name == "name_value"
assert response.enable_asset_discovery is True
def test_get_organization_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
client.get_organization_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetOrganizationSettingsRequest()
@pytest.mark.asyncio
async def test_get_organization_settings_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.GetOrganizationSettingsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
organization_settings.OrganizationSettings(
name="name_value", enable_asset_discovery=True,
)
)
response = await client.get_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetOrganizationSettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, organization_settings.OrganizationSettings)
assert response.name == "name_value"
assert response.enable_asset_discovery is True
@pytest.mark.asyncio
async def test_get_organization_settings_async_from_dict():
await test_get_organization_settings_async(request_type=dict)
def test_get_organization_settings_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetOrganizationSettingsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
call.return_value = organization_settings.OrganizationSettings()
client.get_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_organization_settings_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetOrganizationSettingsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
organization_settings.OrganizationSettings()
)
await client.get_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_organization_settings_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = organization_settings.OrganizationSettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_organization_settings(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_organization_settings_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_organization_settings(
securitycenter_service.GetOrganizationSettingsRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_organization_settings_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = organization_settings.OrganizationSettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
organization_settings.OrganizationSettings()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_organization_settings(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_organization_settings_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_organization_settings(
securitycenter_service.GetOrganizationSettingsRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.GetSourceRequest, dict,]
)
def test_get_source(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
canonical_name="canonical_name_value",
)
response = client.get_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.canonical_name == "canonical_name_value"
def test_get_source_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
client.get_source()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetSourceRequest()
@pytest.mark.asyncio
async def test_get_source_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.GetSourceRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
canonical_name="canonical_name_value",
)
)
response = await client.get_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GetSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.canonical_name == "canonical_name_value"
@pytest.mark.asyncio
async def test_get_source_async_from_dict():
await test_get_source_async(request_type=dict)
def test_get_source_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetSourceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
call.return_value = source.Source()
client.get_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_source_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GetSourceRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(source.Source())
await client.get_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_source_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = source.Source()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_source(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_source_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_source(
securitycenter_service.GetSourceRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_source_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = source.Source()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(source.Source())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_source(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_source_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_source(
securitycenter_service.GetSourceRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.GroupAssetsRequest, dict,]
)
def test_group_assets(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.GroupAssetsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
response = client.group_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.GroupAssetsPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
def test_group_assets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
client.group_assets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupAssetsRequest()
@pytest.mark.asyncio
async def test_group_assets_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.GroupAssetsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.GroupAssetsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
)
response = await client.group_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.GroupAssetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
@pytest.mark.asyncio
async def test_group_assets_async_from_dict():
await test_group_assets_async(request_type=dict)
def test_group_assets_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GroupAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
call.return_value = securitycenter_service.GroupAssetsResponse()
client.group_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_group_assets_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GroupAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.GroupAssetsResponse()
)
await client.group_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_group_assets_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.group_assets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, securitycenter_service.GroupResult) for i in results)
def test_group_assets_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
pages = list(client.group_assets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_group_assets_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.group_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
async_pager = await client.group_assets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, securitycenter_service.GroupResult) for i in responses)
@pytest.mark.asyncio
async def test_group_assets_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.group_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupAssetsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.group_assets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.GroupFindingsRequest, dict,]
)
def test_group_findings(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.GroupFindingsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
response = client.group_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupFindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.GroupFindingsPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
def test_group_findings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
client.group_findings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupFindingsRequest()
@pytest.mark.asyncio
async def test_group_findings_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.GroupFindingsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.GroupFindingsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
)
response = await client.group_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.GroupFindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.GroupFindingsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
@pytest.mark.asyncio
async def test_group_findings_async_from_dict():
await test_group_findings_async(request_type=dict)
def test_group_findings_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GroupFindingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
call.return_value = securitycenter_service.GroupFindingsResponse()
client.group_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_group_findings_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.GroupFindingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.GroupFindingsResponse()
)
await client.group_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_group_findings_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.GroupFindingsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.group_findings(
parent="parent_value", group_by="group_by_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].group_by
mock_val = "group_by_value"
assert arg == mock_val
def test_group_findings_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.group_findings(
securitycenter_service.GroupFindingsRequest(),
parent="parent_value",
group_by="group_by_value",
)
@pytest.mark.asyncio
async def test_group_findings_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.GroupFindingsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.GroupFindingsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.group_findings(
parent="parent_value", group_by="group_by_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].group_by
mock_val = "group_by_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_group_findings_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.group_findings(
securitycenter_service.GroupFindingsRequest(),
parent="parent_value",
group_by="group_by_value",
)
def test_group_findings_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.group_findings(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, securitycenter_service.GroupResult) for i in results)
def test_group_findings_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.group_findings), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
pages = list(client.group_findings(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_group_findings_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.group_findings), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
async_pager = await client.group_findings(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, securitycenter_service.GroupResult) for i in responses)
@pytest.mark.asyncio
async def test_group_findings_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.group_findings), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
next_page_token="abc",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[], next_page_token="def",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[securitycenter_service.GroupResult(),],
next_page_token="ghi",
),
securitycenter_service.GroupFindingsResponse(
group_by_results=[
securitycenter_service.GroupResult(),
securitycenter_service.GroupResult(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.group_findings(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.ListAssetsRequest, dict,]
)
def test_list_assets(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListAssetsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
response = client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAssetsPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
def test_list_assets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
client.list_assets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListAssetsRequest()
@pytest.mark.asyncio
async def test_list_assets_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.ListAssetsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListAssetsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
)
response = await client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAssetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
@pytest.mark.asyncio
async def test_list_assets_async_from_dict():
await test_list_assets_async(request_type=dict)
def test_list_assets_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
call.return_value = securitycenter_service.ListAssetsResponse()
client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_assets_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListAssetsResponse()
)
await client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_assets_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[], next_page_token="def",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_assets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, securitycenter_service.ListAssetsResponse.ListAssetsResult)
for i in results
)
def test_list_assets_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[], next_page_token="def",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
),
RuntimeError,
)
pages = list(client.list_assets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_assets_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[], next_page_token="def",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
),
RuntimeError,
)
async_pager = await client.list_assets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, securitycenter_service.ListAssetsResponse.ListAssetsResult)
for i in responses
)
@pytest.mark.asyncio
async def test_list_assets_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[], next_page_token="def",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListAssetsResponse(
list_assets_results=[
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
securitycenter_service.ListAssetsResponse.ListAssetsResult(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_assets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.ListFindingsRequest, dict,]
)
def test_list_findings(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListFindingsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
response = client.list_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListFindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListFindingsPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
def test_list_findings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
client.list_findings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListFindingsRequest()
@pytest.mark.asyncio
async def test_list_findings_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.ListFindingsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListFindingsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
)
response = await client.list_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListFindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListFindingsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.total_size == 1086
@pytest.mark.asyncio
async def test_list_findings_async_from_dict():
await test_list_findings_async(request_type=dict)
def test_list_findings_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListFindingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
call.return_value = securitycenter_service.ListFindingsResponse()
client.list_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_findings_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListFindingsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListFindingsResponse()
)
await client.list_findings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_findings_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[], next_page_token="def",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_findings(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(
i, securitycenter_service.ListFindingsResponse.ListFindingsResult
)
for i in results
)
def test_list_findings_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_findings), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[], next_page_token="def",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
),
RuntimeError,
)
pages = list(client.list_findings(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_findings_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_findings), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[], next_page_token="def",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
),
RuntimeError,
)
async_pager = await client.list_findings(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(
i, securitycenter_service.ListFindingsResponse.ListFindingsResult
)
for i in responses
)
@pytest.mark.asyncio
async def test_list_findings_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_findings), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
next_page_token="abc",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[], next_page_token="def",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
next_page_token="ghi",
),
securitycenter_service.ListFindingsResponse(
list_findings_results=[
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
securitycenter_service.ListFindingsResponse.ListFindingsResult(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_findings(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.ListMuteConfigsRequest, dict,]
)
def test_list_mute_configs(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_mute_configs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListMuteConfigsResponse(
next_page_token="next_page_token_value",
)
response = client.list_mute_configs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListMuteConfigsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMuteConfigsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_mute_configs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_mute_configs), "__call__"
) as call:
client.list_mute_configs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListMuteConfigsRequest()
@pytest.mark.asyncio
async def test_list_mute_configs_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.ListMuteConfigsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_mute_configs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListMuteConfigsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_mute_configs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListMuteConfigsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListMuteConfigsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_mute_configs_async_from_dict():
await test_list_mute_configs_async(request_type=dict)
def test_list_mute_configs_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListMuteConfigsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_mute_configs), "__call__"
) as call:
call.return_value = securitycenter_service.ListMuteConfigsResponse()
client.list_mute_configs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_mute_configs_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListMuteConfigsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_mute_configs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListMuteConfigsResponse()
)
await client.list_mute_configs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_mute_configs_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_mute_configs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListMuteConfigsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_mute_configs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_mute_configs_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_mute_configs(
securitycenter_service.ListMuteConfigsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_mute_configs_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_mute_configs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListMuteConfigsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListMuteConfigsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_mute_configs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_mute_configs_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_mute_configs(
securitycenter_service.ListMuteConfigsRequest(), parent="parent_value",
)
def test_list_mute_configs_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_mute_configs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[
mute_config.MuteConfig(),
mute_config.MuteConfig(),
mute_config.MuteConfig(),
],
next_page_token="abc",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[], next_page_token="def",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[mute_config.MuteConfig(),], next_page_token="ghi",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[mute_config.MuteConfig(), mute_config.MuteConfig(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_mute_configs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, mute_config.MuteConfig) for i in results)
def test_list_mute_configs_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_mute_configs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[
mute_config.MuteConfig(),
mute_config.MuteConfig(),
mute_config.MuteConfig(),
],
next_page_token="abc",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[], next_page_token="def",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[mute_config.MuteConfig(),], next_page_token="ghi",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[mute_config.MuteConfig(), mute_config.MuteConfig(),],
),
RuntimeError,
)
pages = list(client.list_mute_configs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_mute_configs_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_mute_configs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[
mute_config.MuteConfig(),
mute_config.MuteConfig(),
mute_config.MuteConfig(),
],
next_page_token="abc",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[], next_page_token="def",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[mute_config.MuteConfig(),], next_page_token="ghi",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[mute_config.MuteConfig(), mute_config.MuteConfig(),],
),
RuntimeError,
)
async_pager = await client.list_mute_configs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, mute_config.MuteConfig) for i in responses)
@pytest.mark.asyncio
async def test_list_mute_configs_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_mute_configs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[
mute_config.MuteConfig(),
mute_config.MuteConfig(),
mute_config.MuteConfig(),
],
next_page_token="abc",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[], next_page_token="def",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[mute_config.MuteConfig(),], next_page_token="ghi",
),
securitycenter_service.ListMuteConfigsResponse(
mute_configs=[mute_config.MuteConfig(), mute_config.MuteConfig(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_mute_configs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.ListNotificationConfigsRequest, dict,]
)
def test_list_notification_configs(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_notification_configs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListNotificationConfigsResponse(
next_page_token="next_page_token_value",
)
response = client.list_notification_configs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListNotificationConfigsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListNotificationConfigsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_notification_configs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_notification_configs), "__call__"
) as call:
client.list_notification_configs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListNotificationConfigsRequest()
@pytest.mark.asyncio
async def test_list_notification_configs_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.ListNotificationConfigsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_notification_configs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListNotificationConfigsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_notification_configs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListNotificationConfigsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListNotificationConfigsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_notification_configs_async_from_dict():
await test_list_notification_configs_async(request_type=dict)
def test_list_notification_configs_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListNotificationConfigsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_notification_configs), "__call__"
) as call:
call.return_value = securitycenter_service.ListNotificationConfigsResponse()
client.list_notification_configs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_notification_configs_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListNotificationConfigsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_notification_configs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListNotificationConfigsResponse()
)
await client.list_notification_configs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_notification_configs_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_notification_configs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListNotificationConfigsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_notification_configs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_notification_configs_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_notification_configs(
securitycenter_service.ListNotificationConfigsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_notification_configs_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_notification_configs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListNotificationConfigsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListNotificationConfigsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_notification_configs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_notification_configs_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_notification_configs(
securitycenter_service.ListNotificationConfigsRequest(),
parent="parent_value",
)
def test_list_notification_configs_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_notification_configs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
],
next_page_token="abc",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[], next_page_token="def",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[notification_config.NotificationConfig(),],
next_page_token="ghi",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_notification_configs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, notification_config.NotificationConfig) for i in results
)
def test_list_notification_configs_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_notification_configs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
],
next_page_token="abc",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[], next_page_token="def",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[notification_config.NotificationConfig(),],
next_page_token="ghi",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
],
),
RuntimeError,
)
pages = list(client.list_notification_configs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_notification_configs_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_notification_configs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
],
next_page_token="abc",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[], next_page_token="def",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[notification_config.NotificationConfig(),],
next_page_token="ghi",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
],
),
RuntimeError,
)
async_pager = await client.list_notification_configs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, notification_config.NotificationConfig) for i in responses
)
@pytest.mark.asyncio
async def test_list_notification_configs_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_notification_configs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
],
next_page_token="abc",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[], next_page_token="def",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[notification_config.NotificationConfig(),],
next_page_token="ghi",
),
securitycenter_service.ListNotificationConfigsResponse(
notification_configs=[
notification_config.NotificationConfig(),
notification_config.NotificationConfig(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_notification_configs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.ListSourcesRequest, dict,]
)
def test_list_sources(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListSourcesResponse(
next_page_token="next_page_token_value",
)
response = client.list_sources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListSourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSourcesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_sources_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
client.list_sources()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListSourcesRequest()
@pytest.mark.asyncio
async def test_list_sources_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.ListSourcesRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListSourcesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_sources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListSourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSourcesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_sources_async_from_dict():
await test_list_sources_async(request_type=dict)
def test_list_sources_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListSourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
call.return_value = securitycenter_service.ListSourcesResponse()
client.list_sources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_sources_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListSourcesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListSourcesResponse()
)
await client.list_sources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_sources_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListSourcesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_sources(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_sources_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_sources(
securitycenter_service.ListSourcesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_sources_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListSourcesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListSourcesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_sources(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_sources_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_sources(
securitycenter_service.ListSourcesRequest(), parent="parent_value",
)
def test_list_sources_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(), source.Source(),],
next_page_token="abc",
),
securitycenter_service.ListSourcesResponse(
sources=[], next_page_token="def",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(),], next_page_token="ghi",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_sources(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, source.Source) for i in results)
def test_list_sources_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_sources), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(), source.Source(),],
next_page_token="abc",
),
securitycenter_service.ListSourcesResponse(
sources=[], next_page_token="def",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(),], next_page_token="ghi",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(),],
),
RuntimeError,
)
pages = list(client.list_sources(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_sources_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sources), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(), source.Source(),],
next_page_token="abc",
),
securitycenter_service.ListSourcesResponse(
sources=[], next_page_token="def",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(),], next_page_token="ghi",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(),],
),
RuntimeError,
)
async_pager = await client.list_sources(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, source.Source) for i in responses)
@pytest.mark.asyncio
async def test_list_sources_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_sources), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(), source.Source(),],
next_page_token="abc",
),
securitycenter_service.ListSourcesResponse(
sources=[], next_page_token="def",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(),], next_page_token="ghi",
),
securitycenter_service.ListSourcesResponse(
sources=[source.Source(), source.Source(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_sources(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [securitycenter_service.RunAssetDiscoveryRequest, dict,]
)
def test_run_asset_discovery(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.run_asset_discovery(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.RunAssetDiscoveryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_run_asset_discovery_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
client.run_asset_discovery()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.RunAssetDiscoveryRequest()
@pytest.mark.asyncio
async def test_run_asset_discovery_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.RunAssetDiscoveryRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.run_asset_discovery(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.RunAssetDiscoveryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_run_asset_discovery_async_from_dict():
await test_run_asset_discovery_async(request_type=dict)
def test_run_asset_discovery_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.RunAssetDiscoveryRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.run_asset_discovery(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_run_asset_discovery_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.RunAssetDiscoveryRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.run_asset_discovery(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_run_asset_discovery_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.run_asset_discovery(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_run_asset_discovery_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.run_asset_discovery(
securitycenter_service.RunAssetDiscoveryRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_run_asset_discovery_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_asset_discovery), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.run_asset_discovery(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_run_asset_discovery_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.run_asset_discovery(
securitycenter_service.RunAssetDiscoveryRequest(), parent="parent_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.SetFindingStateRequest, dict,]
)
def test_set_finding_state(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
severity=finding.Finding.Severity.CRITICAL,
canonical_name="canonical_name_value",
mute=finding.Finding.Mute.MUTED,
finding_class=finding.Finding.FindingClass.THREAT,
mute_initiator="mute_initiator_value",
)
response = client.set_finding_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.SetFindingStateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
assert response.severity == finding.Finding.Severity.CRITICAL
assert response.canonical_name == "canonical_name_value"
assert response.mute == finding.Finding.Mute.MUTED
assert response.finding_class == finding.Finding.FindingClass.THREAT
assert response.mute_initiator == "mute_initiator_value"
def test_set_finding_state_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
client.set_finding_state()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.SetFindingStateRequest()
@pytest.mark.asyncio
async def test_set_finding_state_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.SetFindingStateRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
severity=finding.Finding.Severity.CRITICAL,
canonical_name="canonical_name_value",
mute=finding.Finding.Mute.MUTED,
finding_class=finding.Finding.FindingClass.THREAT,
mute_initiator="mute_initiator_value",
)
)
response = await client.set_finding_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.SetFindingStateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
assert response.severity == finding.Finding.Severity.CRITICAL
assert response.canonical_name == "canonical_name_value"
assert response.mute == finding.Finding.Mute.MUTED
assert response.finding_class == finding.Finding.FindingClass.THREAT
assert response.mute_initiator == "mute_initiator_value"
@pytest.mark.asyncio
async def test_set_finding_state_async_from_dict():
await test_set_finding_state_async(request_type=dict)
def test_set_finding_state_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.SetFindingStateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
call.return_value = finding.Finding()
client.set_finding_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_finding_state_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.SetFindingStateRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(finding.Finding())
await client.set_finding_state(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_finding_state_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = finding.Finding()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_finding_state(
name="name_value",
state=finding.Finding.State.ACTIVE,
start_time=timestamp_pb2.Timestamp(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].state
mock_val = finding.Finding.State.ACTIVE
assert arg == mock_val
assert TimestampRule().to_proto(args[0].start_time) == timestamp_pb2.Timestamp(
seconds=751
)
def test_set_finding_state_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_finding_state(
securitycenter_service.SetFindingStateRequest(),
name="name_value",
state=finding.Finding.State.ACTIVE,
start_time=timestamp_pb2.Timestamp(seconds=751),
)
@pytest.mark.asyncio
async def test_set_finding_state_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_finding_state), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = finding.Finding()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(finding.Finding())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_finding_state(
name="name_value",
state=finding.Finding.State.ACTIVE,
start_time=timestamp_pb2.Timestamp(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].state
mock_val = finding.Finding.State.ACTIVE
assert arg == mock_val
assert TimestampRule().to_proto(args[0].start_time) == timestamp_pb2.Timestamp(
seconds=751
)
@pytest.mark.asyncio
async def test_set_finding_state_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_finding_state(
securitycenter_service.SetFindingStateRequest(),
name="name_value",
state=finding.Finding.State.ACTIVE,
start_time=timestamp_pb2.Timestamp(seconds=751),
)
@pytest.mark.parametrize("request_type", [securitycenter_service.SetMuteRequest, dict,])
def test_set_mute(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_mute), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
severity=finding.Finding.Severity.CRITICAL,
canonical_name="canonical_name_value",
mute=finding.Finding.Mute.MUTED,
finding_class=finding.Finding.FindingClass.THREAT,
mute_initiator="mute_initiator_value",
)
response = client.set_mute(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.SetMuteRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
assert response.severity == finding.Finding.Severity.CRITICAL
assert response.canonical_name == "canonical_name_value"
assert response.mute == finding.Finding.Mute.MUTED
assert response.finding_class == finding.Finding.FindingClass.THREAT
assert response.mute_initiator == "mute_initiator_value"
def test_set_mute_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_mute), "__call__") as call:
client.set_mute()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.SetMuteRequest()
@pytest.mark.asyncio
async def test_set_mute_async(
transport: str = "grpc_asyncio", request_type=securitycenter_service.SetMuteRequest
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_mute), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
severity=finding.Finding.Severity.CRITICAL,
canonical_name="canonical_name_value",
mute=finding.Finding.Mute.MUTED,
finding_class=finding.Finding.FindingClass.THREAT,
mute_initiator="mute_initiator_value",
)
)
response = await client.set_mute(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.SetMuteRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
assert response.severity == finding.Finding.Severity.CRITICAL
assert response.canonical_name == "canonical_name_value"
assert response.mute == finding.Finding.Mute.MUTED
assert response.finding_class == finding.Finding.FindingClass.THREAT
assert response.mute_initiator == "mute_initiator_value"
@pytest.mark.asyncio
async def test_set_mute_async_from_dict():
await test_set_mute_async(request_type=dict)
def test_set_mute_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.SetMuteRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_mute), "__call__") as call:
call.return_value = finding.Finding()
client.set_mute(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_mute_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.SetMuteRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_mute), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(finding.Finding())
await client.set_mute(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_set_mute_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_mute), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = finding.Finding()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_mute(
name="name_value", mute=finding.Finding.Mute.MUTED,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].mute
mock_val = finding.Finding.Mute.MUTED
assert arg == mock_val
def test_set_mute_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_mute(
securitycenter_service.SetMuteRequest(),
name="name_value",
mute=finding.Finding.Mute.MUTED,
)
@pytest.mark.asyncio
async def test_set_mute_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_mute), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = finding.Finding()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(finding.Finding())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_mute(
name="name_value", mute=finding.Finding.Mute.MUTED,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].mute
mock_val = finding.Finding.Mute.MUTED
assert arg == mock_val
@pytest.mark.asyncio
async def test_set_mute_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_mute(
securitycenter_service.SetMuteRequest(),
name="name_value",
mute=finding.Finding.Mute.MUTED,
)
@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,])
def test_set_iam_policy(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
client.set_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
@pytest.mark.asyncio
async def test_set_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(version=774, etag=b"etag_blob",)
)
response = await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_set_iam_policy_async_from_dict():
await test_set_iam_policy_async(request_type=dict)
def test_set_iam_policy_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_set_iam_policy_from_dict_foreign():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
}
)
call.assert_called()
def test_set_iam_policy_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_set_iam_policy_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_iam_policy(resource="resource_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value",
)
@pytest.mark.parametrize(
"request_type", [iam_policy_pb2.TestIamPermissionsRequest, dict,]
)
def test_test_iam_permissions(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
response = client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
client.test_iam_permissions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
@pytest.mark.asyncio
async def test_test_iam_permissions_async(
transport: str = "grpc_asyncio",
request_type=iam_policy_pb2.TestIamPermissionsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
)
response = await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_async_from_dict():
await test_test_iam_permissions_async(request_type=dict)
def test_test_iam_permissions_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_test_iam_permissions_from_dict_foreign():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
response = client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
def test_test_iam_permissions_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
arg = args[0].permissions
mock_val = ["permissions_value"]
assert arg == mock_val
def test_test_iam_permissions_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.test_iam_permissions(
resource="resource_value", permissions=["permissions_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
arg = args[0].permissions
mock_val = ["permissions_value"]
assert arg == mock_val
@pytest.mark.asyncio
async def test_test_iam_permissions_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.test_iam_permissions(
iam_policy_pb2.TestIamPermissionsRequest(),
resource="resource_value",
permissions=["permissions_value"],
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateExternalSystemRequest, dict,]
)
def test_update_external_system(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_external_system), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_external_system.ExternalSystem(
name="name_value",
assignees=["assignees_value"],
external_uid="external_uid_value",
status="status_value",
)
response = client.update_external_system(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateExternalSystemRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_external_system.ExternalSystem)
assert response.name == "name_value"
assert response.assignees == ["assignees_value"]
assert response.external_uid == "external_uid_value"
assert response.status == "status_value"
def test_update_external_system_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_external_system), "__call__"
) as call:
client.update_external_system()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateExternalSystemRequest()
@pytest.mark.asyncio
async def test_update_external_system_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateExternalSystemRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_external_system), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_external_system.ExternalSystem(
name="name_value",
assignees=["assignees_value"],
external_uid="external_uid_value",
status="status_value",
)
)
response = await client.update_external_system(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateExternalSystemRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_external_system.ExternalSystem)
assert response.name == "name_value"
assert response.assignees == ["assignees_value"]
assert response.external_uid == "external_uid_value"
assert response.status == "status_value"
@pytest.mark.asyncio
async def test_update_external_system_async_from_dict():
await test_update_external_system_async(request_type=dict)
def test_update_external_system_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateExternalSystemRequest()
request.external_system.name = "external_system.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_external_system), "__call__"
) as call:
call.return_value = gcs_external_system.ExternalSystem()
client.update_external_system(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"external_system.name=external_system.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_external_system_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateExternalSystemRequest()
request.external_system.name = "external_system.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_external_system), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_external_system.ExternalSystem()
)
await client.update_external_system(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"external_system.name=external_system.name/value",
) in kw["metadata"]
def test_update_external_system_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_external_system), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_external_system.ExternalSystem()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_external_system(
external_system=gcs_external_system.ExternalSystem(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].external_system
mock_val = gcs_external_system.ExternalSystem(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_external_system_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_external_system(
securitycenter_service.UpdateExternalSystemRequest(),
external_system=gcs_external_system.ExternalSystem(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_external_system_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_external_system), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_external_system.ExternalSystem()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_external_system.ExternalSystem()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_external_system(
external_system=gcs_external_system.ExternalSystem(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].external_system
mock_val = gcs_external_system.ExternalSystem(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_external_system_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_external_system(
securitycenter_service.UpdateExternalSystemRequest(),
external_system=gcs_external_system.ExternalSystem(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateFindingRequest, dict,]
)
def test_update_finding(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=gcs_finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
severity=gcs_finding.Finding.Severity.CRITICAL,
canonical_name="canonical_name_value",
mute=gcs_finding.Finding.Mute.MUTED,
finding_class=gcs_finding.Finding.FindingClass.THREAT,
mute_initiator="mute_initiator_value",
)
response = client.update_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateFindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == gcs_finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
assert response.severity == gcs_finding.Finding.Severity.CRITICAL
assert response.canonical_name == "canonical_name_value"
assert response.mute == gcs_finding.Finding.Mute.MUTED
assert response.finding_class == gcs_finding.Finding.FindingClass.THREAT
assert response.mute_initiator == "mute_initiator_value"
def test_update_finding_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
client.update_finding()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateFindingRequest()
@pytest.mark.asyncio
async def test_update_finding_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateFindingRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_finding.Finding(
name="name_value",
parent="parent_value",
resource_name="resource_name_value",
state=gcs_finding.Finding.State.ACTIVE,
category="category_value",
external_uri="external_uri_value",
severity=gcs_finding.Finding.Severity.CRITICAL,
canonical_name="canonical_name_value",
mute=gcs_finding.Finding.Mute.MUTED,
finding_class=gcs_finding.Finding.FindingClass.THREAT,
mute_initiator="mute_initiator_value",
)
)
response = await client.update_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateFindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_finding.Finding)
assert response.name == "name_value"
assert response.parent == "parent_value"
assert response.resource_name == "resource_name_value"
assert response.state == gcs_finding.Finding.State.ACTIVE
assert response.category == "category_value"
assert response.external_uri == "external_uri_value"
assert response.severity == gcs_finding.Finding.Severity.CRITICAL
assert response.canonical_name == "canonical_name_value"
assert response.mute == gcs_finding.Finding.Mute.MUTED
assert response.finding_class == gcs_finding.Finding.FindingClass.THREAT
assert response.mute_initiator == "mute_initiator_value"
@pytest.mark.asyncio
async def test_update_finding_async_from_dict():
await test_update_finding_async(request_type=dict)
def test_update_finding_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateFindingRequest()
request.finding.name = "finding.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
call.return_value = gcs_finding.Finding()
client.update_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "finding.name=finding.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_finding_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateFindingRequest()
request.finding.name = "finding.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding())
await client.update_finding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "finding.name=finding.name/value",) in kw[
"metadata"
]
def test_update_finding_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_finding(finding=gcs_finding.Finding(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].finding
mock_val = gcs_finding.Finding(name="name_value")
assert arg == mock_val
def test_update_finding_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_finding(
securitycenter_service.UpdateFindingRequest(),
finding=gcs_finding.Finding(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_finding_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_finding), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_finding.Finding()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_finding.Finding())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_finding(
finding=gcs_finding.Finding(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].finding
mock_val = gcs_finding.Finding(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_finding_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_finding(
securitycenter_service.UpdateFindingRequest(),
finding=gcs_finding.Finding(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateMuteConfigRequest, dict,]
)
def test_update_mute_config(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_mute_config.MuteConfig(
name="name_value",
display_name="display_name_value",
description="description_value",
filter="filter_value",
most_recent_editor="most_recent_editor_value",
)
response = client.update_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateMuteConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_mute_config.MuteConfig)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.most_recent_editor == "most_recent_editor_value"
def test_update_mute_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_mute_config), "__call__"
) as call:
client.update_mute_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateMuteConfigRequest()
@pytest.mark.asyncio
async def test_update_mute_config_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateMuteConfigRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_mute_config.MuteConfig(
name="name_value",
display_name="display_name_value",
description="description_value",
filter="filter_value",
most_recent_editor="most_recent_editor_value",
)
)
response = await client.update_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateMuteConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_mute_config.MuteConfig)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.most_recent_editor == "most_recent_editor_value"
@pytest.mark.asyncio
async def test_update_mute_config_async_from_dict():
await test_update_mute_config_async(request_type=dict)
def test_update_mute_config_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateMuteConfigRequest()
request.mute_config.name = "mute_config.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_mute_config), "__call__"
) as call:
call.return_value = gcs_mute_config.MuteConfig()
client.update_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "mute_config.name=mute_config.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_mute_config_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateMuteConfigRequest()
request.mute_config.name = "mute_config.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_mute_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_mute_config.MuteConfig()
)
await client.update_mute_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "mute_config.name=mute_config.name/value",) in kw[
"metadata"
]
def test_update_mute_config_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_mute_config.MuteConfig()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_mute_config(
mute_config=gcs_mute_config.MuteConfig(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].mute_config
mock_val = gcs_mute_config.MuteConfig(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_mute_config_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_mute_config(
securitycenter_service.UpdateMuteConfigRequest(),
mute_config=gcs_mute_config.MuteConfig(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_mute_config_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_mute_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_mute_config.MuteConfig()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_mute_config.MuteConfig()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_mute_config(
mute_config=gcs_mute_config.MuteConfig(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].mute_config
mock_val = gcs_mute_config.MuteConfig(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_mute_config_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_mute_config(
securitycenter_service.UpdateMuteConfigRequest(),
mute_config=gcs_mute_config.MuteConfig(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateNotificationConfigRequest, dict,]
)
def test_update_notification_config(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_notification_config.NotificationConfig(
name="name_value",
description="description_value",
pubsub_topic="pubsub_topic_value",
service_account="service_account_value",
streaming_config=gcs_notification_config.NotificationConfig.StreamingConfig(
filter="filter_value"
),
)
response = client.update_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateNotificationConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_notification_config.NotificationConfig)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.pubsub_topic == "pubsub_topic_value"
assert response.service_account == "service_account_value"
def test_update_notification_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_notification_config), "__call__"
) as call:
client.update_notification_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateNotificationConfigRequest()
@pytest.mark.asyncio
async def test_update_notification_config_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateNotificationConfigRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_notification_config.NotificationConfig(
name="name_value",
description="description_value",
pubsub_topic="pubsub_topic_value",
service_account="service_account_value",
)
)
response = await client.update_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateNotificationConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_notification_config.NotificationConfig)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.pubsub_topic == "pubsub_topic_value"
assert response.service_account == "service_account_value"
@pytest.mark.asyncio
async def test_update_notification_config_async_from_dict():
await test_update_notification_config_async(request_type=dict)
def test_update_notification_config_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateNotificationConfigRequest()
request.notification_config.name = "notification_config.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_notification_config), "__call__"
) as call:
call.return_value = gcs_notification_config.NotificationConfig()
client.update_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"notification_config.name=notification_config.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_notification_config_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateNotificationConfigRequest()
request.notification_config.name = "notification_config.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_notification_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_notification_config.NotificationConfig()
)
await client.update_notification_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"notification_config.name=notification_config.name/value",
) in kw["metadata"]
def test_update_notification_config_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_notification_config.NotificationConfig()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_notification_config(
notification_config=gcs_notification_config.NotificationConfig(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].notification_config
mock_val = gcs_notification_config.NotificationConfig(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_notification_config_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_notification_config(
securitycenter_service.UpdateNotificationConfigRequest(),
notification_config=gcs_notification_config.NotificationConfig(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_notification_config_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_notification_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_notification_config.NotificationConfig()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_notification_config.NotificationConfig()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_notification_config(
notification_config=gcs_notification_config.NotificationConfig(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].notification_config
mock_val = gcs_notification_config.NotificationConfig(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_notification_config_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_notification_config(
securitycenter_service.UpdateNotificationConfigRequest(),
notification_config=gcs_notification_config.NotificationConfig(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateOrganizationSettingsRequest, dict,]
)
def test_update_organization_settings(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_organization_settings.OrganizationSettings(
name="name_value", enable_asset_discovery=True,
)
response = client.update_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateOrganizationSettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_organization_settings.OrganizationSettings)
assert response.name == "name_value"
assert response.enable_asset_discovery is True
def test_update_organization_settings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
client.update_organization_settings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateOrganizationSettingsRequest()
@pytest.mark.asyncio
async def test_update_organization_settings_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateOrganizationSettingsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_organization_settings.OrganizationSettings(
name="name_value", enable_asset_discovery=True,
)
)
response = await client.update_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateOrganizationSettingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_organization_settings.OrganizationSettings)
assert response.name == "name_value"
assert response.enable_asset_discovery is True
@pytest.mark.asyncio
async def test_update_organization_settings_async_from_dict():
await test_update_organization_settings_async(request_type=dict)
def test_update_organization_settings_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateOrganizationSettingsRequest()
request.organization_settings.name = "organization_settings.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
call.return_value = gcs_organization_settings.OrganizationSettings()
client.update_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"organization_settings.name=organization_settings.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_organization_settings_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateOrganizationSettingsRequest()
request.organization_settings.name = "organization_settings.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_organization_settings.OrganizationSettings()
)
await client.update_organization_settings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"organization_settings.name=organization_settings.name/value",
) in kw["metadata"]
def test_update_organization_settings_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_organization_settings.OrganizationSettings()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_organization_settings(
organization_settings=gcs_organization_settings.OrganizationSettings(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].organization_settings
mock_val = gcs_organization_settings.OrganizationSettings(name="name_value")
assert arg == mock_val
def test_update_organization_settings_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_organization_settings(
securitycenter_service.UpdateOrganizationSettingsRequest(),
organization_settings=gcs_organization_settings.OrganizationSettings(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_update_organization_settings_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_organization_settings), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_organization_settings.OrganizationSettings()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_organization_settings.OrganizationSettings()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_organization_settings(
organization_settings=gcs_organization_settings.OrganizationSettings(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].organization_settings
mock_val = gcs_organization_settings.OrganizationSettings(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_organization_settings_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_organization_settings(
securitycenter_service.UpdateOrganizationSettingsRequest(),
organization_settings=gcs_organization_settings.OrganizationSettings(
name="name_value"
),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateSourceRequest, dict,]
)
def test_update_source(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
canonical_name="canonical_name_value",
)
response = client.update_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.canonical_name == "canonical_name_value"
def test_update_source_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
client.update_source()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSourceRequest()
@pytest.mark.asyncio
async def test_update_source_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateSourceRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_source.Source(
name="name_value",
display_name="display_name_value",
description="description_value",
canonical_name="canonical_name_value",
)
)
response = await client.update_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSourceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_source.Source)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.canonical_name == "canonical_name_value"
@pytest.mark.asyncio
async def test_update_source_async_from_dict():
await test_update_source_async(request_type=dict)
def test_update_source_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateSourceRequest()
request.source.name = "source.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
call.return_value = gcs_source.Source()
client.update_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "source.name=source.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_source_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateSourceRequest()
request.source.name = "source.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source())
await client.update_source(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "source.name=source.name/value",) in kw["metadata"]
def test_update_source_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_source(source=gcs_source.Source(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].source
mock_val = gcs_source.Source(name="name_value")
assert arg == mock_val
def test_update_source_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_source(
securitycenter_service.UpdateSourceRequest(),
source=gcs_source.Source(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_source_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_source), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_source.Source()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcs_source.Source())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_source(
source=gcs_source.Source(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].source
mock_val = gcs_source.Source(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_source_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_source(
securitycenter_service.UpdateSourceRequest(),
source=gcs_source.Source(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateSecurityMarksRequest, dict,]
)
def test_update_security_marks(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_security_marks.SecurityMarks(
name="name_value", canonical_name="canonical_name_value",
)
response = client.update_security_marks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSecurityMarksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_security_marks.SecurityMarks)
assert response.name == "name_value"
assert response.canonical_name == "canonical_name_value"
def test_update_security_marks_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
client.update_security_marks()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSecurityMarksRequest()
@pytest.mark.asyncio
async def test_update_security_marks_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateSecurityMarksRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_security_marks.SecurityMarks(
name="name_value", canonical_name="canonical_name_value",
)
)
response = await client.update_security_marks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateSecurityMarksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcs_security_marks.SecurityMarks)
assert response.name == "name_value"
assert response.canonical_name == "canonical_name_value"
@pytest.mark.asyncio
async def test_update_security_marks_async_from_dict():
await test_update_security_marks_async(request_type=dict)
def test_update_security_marks_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateSecurityMarksRequest()
request.security_marks.name = "security_marks.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
call.return_value = gcs_security_marks.SecurityMarks()
client.update_security_marks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"security_marks.name=security_marks.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_security_marks_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateSecurityMarksRequest()
request.security_marks.name = "security_marks.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_security_marks.SecurityMarks()
)
await client.update_security_marks(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"security_marks.name=security_marks.name/value",
) in kw["metadata"]
def test_update_security_marks_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_security_marks.SecurityMarks()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_security_marks(
security_marks=gcs_security_marks.SecurityMarks(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].security_marks
mock_val = gcs_security_marks.SecurityMarks(name="name_value")
assert arg == mock_val
def test_update_security_marks_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_security_marks(
securitycenter_service.UpdateSecurityMarksRequest(),
security_marks=gcs_security_marks.SecurityMarks(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_security_marks_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_security_marks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcs_security_marks.SecurityMarks()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcs_security_marks.SecurityMarks()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_security_marks(
security_marks=gcs_security_marks.SecurityMarks(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].security_marks
mock_val = gcs_security_marks.SecurityMarks(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_security_marks_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_security_marks(
securitycenter_service.UpdateSecurityMarksRequest(),
security_marks=gcs_security_marks.SecurityMarks(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.CreateBigQueryExportRequest, dict,]
)
def test_create_big_query_export(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigquery_export.BigQueryExport(
name="name_value",
description="description_value",
filter="filter_value",
dataset="dataset_value",
most_recent_editor="most_recent_editor_value",
principal="principal_value",
)
response = client.create_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateBigQueryExportRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, bigquery_export.BigQueryExport)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.dataset == "dataset_value"
assert response.most_recent_editor == "most_recent_editor_value"
assert response.principal == "principal_value"
def test_create_big_query_export_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_big_query_export), "__call__"
) as call:
client.create_big_query_export()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateBigQueryExportRequest()
@pytest.mark.asyncio
async def test_create_big_query_export_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.CreateBigQueryExportRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigquery_export.BigQueryExport(
name="name_value",
description="description_value",
filter="filter_value",
dataset="dataset_value",
most_recent_editor="most_recent_editor_value",
principal="principal_value",
)
)
response = await client.create_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.CreateBigQueryExportRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, bigquery_export.BigQueryExport)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.dataset == "dataset_value"
assert response.most_recent_editor == "most_recent_editor_value"
assert response.principal == "principal_value"
@pytest.mark.asyncio
async def test_create_big_query_export_async_from_dict():
await test_create_big_query_export_async(request_type=dict)
def test_create_big_query_export_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateBigQueryExportRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_big_query_export), "__call__"
) as call:
call.return_value = bigquery_export.BigQueryExport()
client.create_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_big_query_export_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.CreateBigQueryExportRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_big_query_export), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigquery_export.BigQueryExport()
)
await client.create_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_big_query_export_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigquery_export.BigQueryExport()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_big_query_export(
parent="parent_value",
big_query_export=bigquery_export.BigQueryExport(name="name_value"),
big_query_export_id="big_query_export_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].big_query_export
mock_val = bigquery_export.BigQueryExport(name="name_value")
assert arg == mock_val
arg = args[0].big_query_export_id
mock_val = "big_query_export_id_value"
assert arg == mock_val
def test_create_big_query_export_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_big_query_export(
securitycenter_service.CreateBigQueryExportRequest(),
parent="parent_value",
big_query_export=bigquery_export.BigQueryExport(name="name_value"),
big_query_export_id="big_query_export_id_value",
)
@pytest.mark.asyncio
async def test_create_big_query_export_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigquery_export.BigQueryExport()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigquery_export.BigQueryExport()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_big_query_export(
parent="parent_value",
big_query_export=bigquery_export.BigQueryExport(name="name_value"),
big_query_export_id="big_query_export_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].big_query_export
mock_val = bigquery_export.BigQueryExport(name="name_value")
assert arg == mock_val
arg = args[0].big_query_export_id
mock_val = "big_query_export_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_big_query_export_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_big_query_export(
securitycenter_service.CreateBigQueryExportRequest(),
parent="parent_value",
big_query_export=bigquery_export.BigQueryExport(name="name_value"),
big_query_export_id="big_query_export_id_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.DeleteBigQueryExportRequest, dict,]
)
def test_delete_big_query_export(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.DeleteBigQueryExportRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_big_query_export_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_big_query_export), "__call__"
) as call:
client.delete_big_query_export()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.DeleteBigQueryExportRequest()
@pytest.mark.asyncio
async def test_delete_big_query_export_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.DeleteBigQueryExportRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.DeleteBigQueryExportRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_big_query_export_async_from_dict():
await test_delete_big_query_export_async(request_type=dict)
def test_delete_big_query_export_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.DeleteBigQueryExportRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_big_query_export), "__call__"
) as call:
call.return_value = None
client.delete_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_big_query_export_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.DeleteBigQueryExportRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_big_query_export), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_big_query_export_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_big_query_export(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_big_query_export_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_big_query_export(
securitycenter_service.DeleteBigQueryExportRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_big_query_export_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_big_query_export(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_big_query_export_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_big_query_export(
securitycenter_service.DeleteBigQueryExportRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.UpdateBigQueryExportRequest, dict,]
)
def test_update_big_query_export(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigquery_export.BigQueryExport(
name="name_value",
description="description_value",
filter="filter_value",
dataset="dataset_value",
most_recent_editor="most_recent_editor_value",
principal="principal_value",
)
response = client.update_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateBigQueryExportRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, bigquery_export.BigQueryExport)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.dataset == "dataset_value"
assert response.most_recent_editor == "most_recent_editor_value"
assert response.principal == "principal_value"
def test_update_big_query_export_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_big_query_export), "__call__"
) as call:
client.update_big_query_export()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateBigQueryExportRequest()
@pytest.mark.asyncio
async def test_update_big_query_export_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.UpdateBigQueryExportRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigquery_export.BigQueryExport(
name="name_value",
description="description_value",
filter="filter_value",
dataset="dataset_value",
most_recent_editor="most_recent_editor_value",
principal="principal_value",
)
)
response = await client.update_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.UpdateBigQueryExportRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, bigquery_export.BigQueryExport)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.filter == "filter_value"
assert response.dataset == "dataset_value"
assert response.most_recent_editor == "most_recent_editor_value"
assert response.principal == "principal_value"
@pytest.mark.asyncio
async def test_update_big_query_export_async_from_dict():
await test_update_big_query_export_async(request_type=dict)
def test_update_big_query_export_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateBigQueryExportRequest()
request.big_query_export.name = "big_query_export.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_big_query_export), "__call__"
) as call:
call.return_value = bigquery_export.BigQueryExport()
client.update_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"big_query_export.name=big_query_export.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_big_query_export_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.UpdateBigQueryExportRequest()
request.big_query_export.name = "big_query_export.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_big_query_export), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigquery_export.BigQueryExport()
)
await client.update_big_query_export(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"big_query_export.name=big_query_export.name/value",
) in kw["metadata"]
def test_update_big_query_export_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigquery_export.BigQueryExport()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_big_query_export(
big_query_export=bigquery_export.BigQueryExport(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].big_query_export
mock_val = bigquery_export.BigQueryExport(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_big_query_export_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_big_query_export(
securitycenter_service.UpdateBigQueryExportRequest(),
big_query_export=bigquery_export.BigQueryExport(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_big_query_export_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_big_query_export), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = bigquery_export.BigQueryExport()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
bigquery_export.BigQueryExport()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_big_query_export(
big_query_export=bigquery_export.BigQueryExport(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].big_query_export
mock_val = bigquery_export.BigQueryExport(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_big_query_export_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_big_query_export(
securitycenter_service.UpdateBigQueryExportRequest(),
big_query_export=bigquery_export.BigQueryExport(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [securitycenter_service.ListBigQueryExportsRequest, dict,]
)
def test_list_big_query_exports(request_type, transport: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_big_query_exports), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListBigQueryExportsResponse(
next_page_token="next_page_token_value",
)
response = client.list_big_query_exports(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListBigQueryExportsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBigQueryExportsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_big_query_exports_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_big_query_exports), "__call__"
) as call:
client.list_big_query_exports()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListBigQueryExportsRequest()
@pytest.mark.asyncio
async def test_list_big_query_exports_async(
transport: str = "grpc_asyncio",
request_type=securitycenter_service.ListBigQueryExportsRequest,
):
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_big_query_exports), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListBigQueryExportsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_big_query_exports(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == securitycenter_service.ListBigQueryExportsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBigQueryExportsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_big_query_exports_async_from_dict():
await test_list_big_query_exports_async(request_type=dict)
def test_list_big_query_exports_field_headers():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListBigQueryExportsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_big_query_exports), "__call__"
) as call:
call.return_value = securitycenter_service.ListBigQueryExportsResponse()
client.list_big_query_exports(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_big_query_exports_field_headers_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = securitycenter_service.ListBigQueryExportsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_big_query_exports), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListBigQueryExportsResponse()
)
await client.list_big_query_exports(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_big_query_exports_flattened():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_big_query_exports), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListBigQueryExportsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_big_query_exports(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_big_query_exports_flattened_error():
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_big_query_exports(
securitycenter_service.ListBigQueryExportsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_big_query_exports_flattened_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_big_query_exports), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = securitycenter_service.ListBigQueryExportsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
securitycenter_service.ListBigQueryExportsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_big_query_exports(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_big_query_exports_flattened_error_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_big_query_exports(
securitycenter_service.ListBigQueryExportsRequest(), parent="parent_value",
)
def test_list_big_query_exports_pager(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_big_query_exports), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
],
next_page_token="abc",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[], next_page_token="def",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[bigquery_export.BigQueryExport(),],
next_page_token="ghi",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_big_query_exports(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, bigquery_export.BigQueryExport) for i in results)
def test_list_big_query_exports_pages(transport_name: str = "grpc"):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_big_query_exports), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
],
next_page_token="abc",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[], next_page_token="def",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[bigquery_export.BigQueryExport(),],
next_page_token="ghi",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
],
),
RuntimeError,
)
pages = list(client.list_big_query_exports(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_big_query_exports_async_pager():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_big_query_exports),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
],
next_page_token="abc",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[], next_page_token="def",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[bigquery_export.BigQueryExport(),],
next_page_token="ghi",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
],
),
RuntimeError,
)
async_pager = await client.list_big_query_exports(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, bigquery_export.BigQueryExport) for i in responses)
@pytest.mark.asyncio
async def test_list_big_query_exports_async_pages():
client = SecurityCenterAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_big_query_exports),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
],
next_page_token="abc",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[], next_page_token="def",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[bigquery_export.BigQueryExport(),],
next_page_token="ghi",
),
securitycenter_service.ListBigQueryExportsResponse(
big_query_exports=[
bigquery_export.BigQueryExport(),
bigquery_export.BigQueryExport(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_big_query_exports(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecurityCenterClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SecurityCenterClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SecurityCenterClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SecurityCenterClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SecurityCenterClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SecurityCenterGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SecurityCenterGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.SecurityCenterGrpcTransport,
transports.SecurityCenterGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SecurityCenterClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.SecurityCenterGrpcTransport,)
def test_security_center_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SecurityCenterTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_security_center_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.securitycenter_v1.services.security_center.transports.SecurityCenterTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.SecurityCenterTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"bulk_mute_findings",
"create_source",
"create_finding",
"create_mute_config",
"create_notification_config",
"delete_mute_config",
"delete_notification_config",
"get_big_query_export",
"get_iam_policy",
"get_mute_config",
"get_notification_config",
"get_organization_settings",
"get_source",
"group_assets",
"group_findings",
"list_assets",
"list_findings",
"list_mute_configs",
"list_notification_configs",
"list_sources",
"run_asset_discovery",
"set_finding_state",
"set_mute",
"set_iam_policy",
"test_iam_permissions",
"update_external_system",
"update_finding",
"update_mute_config",
"update_notification_config",
"update_organization_settings",
"update_source",
"update_security_marks",
"create_big_query_export",
"delete_big_query_export",
"update_big_query_export",
"list_big_query_exports",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_security_center_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.securitycenter_v1.services.security_center.transports.SecurityCenterTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecurityCenterTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_security_center_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.securitycenter_v1.services.security_center.transports.SecurityCenterTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SecurityCenterTransport()
adc.assert_called_once()
def test_security_center_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SecurityCenterClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SecurityCenterGrpcTransport,
transports.SecurityCenterGrpcAsyncIOTransport,
],
)
def test_security_center_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SecurityCenterGrpcTransport, grpc_helpers),
(transports.SecurityCenterGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_security_center_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"securitycenter.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="securitycenter.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SecurityCenterGrpcTransport,
transports.SecurityCenterGrpcAsyncIOTransport,
],
)
def test_security_center_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_security_center_host_no_port():
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="securitycenter.googleapis.com"
),
)
assert client.transport._host == "securitycenter.googleapis.com:443"
def test_security_center_host_with_port():
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="securitycenter.googleapis.com:8000"
),
)
assert client.transport._host == "securitycenter.googleapis.com:8000"
def test_security_center_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SecurityCenterGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_security_center_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SecurityCenterGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.SecurityCenterGrpcTransport,
transports.SecurityCenterGrpcAsyncIOTransport,
],
)
def test_security_center_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.SecurityCenterGrpcTransport,
transports.SecurityCenterGrpcAsyncIOTransport,
],
)
def test_security_center_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_security_center_grpc_lro_client():
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_security_center_grpc_lro_async_client():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_asset_path():
organization = "squid"
asset = "clam"
expected = "organizations/{organization}/assets/{asset}".format(
organization=organization, asset=asset,
)
actual = SecurityCenterClient.asset_path(organization, asset)
assert expected == actual
def test_parse_asset_path():
expected = {
"organization": "whelk",
"asset": "octopus",
}
path = SecurityCenterClient.asset_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_asset_path(path)
assert expected == actual
def test_big_query_export_path():
organization = "oyster"
export = "nudibranch"
expected = "organizations/{organization}/bigQueryExports/{export}".format(
organization=organization, export=export,
)
actual = SecurityCenterClient.big_query_export_path(organization, export)
assert expected == actual
def test_parse_big_query_export_path():
expected = {
"organization": "cuttlefish",
"export": "mussel",
}
path = SecurityCenterClient.big_query_export_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_big_query_export_path(path)
assert expected == actual
def test_external_system_path():
organization = "winkle"
source = "nautilus"
finding = "scallop"
externalsystem = "abalone"
expected = "organizations/{organization}/sources/{source}/findings/{finding}/externalSystems/{externalsystem}".format(
organization=organization,
source=source,
finding=finding,
externalsystem=externalsystem,
)
actual = SecurityCenterClient.external_system_path(
organization, source, finding, externalsystem
)
assert expected == actual
def test_parse_external_system_path():
expected = {
"organization": "squid",
"source": "clam",
"finding": "whelk",
"externalsystem": "octopus",
}
path = SecurityCenterClient.external_system_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_external_system_path(path)
assert expected == actual
def test_finding_path():
organization = "oyster"
source = "nudibranch"
finding = "cuttlefish"
expected = "organizations/{organization}/sources/{source}/findings/{finding}".format(
organization=organization, source=source, finding=finding,
)
actual = SecurityCenterClient.finding_path(organization, source, finding)
assert expected == actual
def test_parse_finding_path():
expected = {
"organization": "mussel",
"source": "winkle",
"finding": "nautilus",
}
path = SecurityCenterClient.finding_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_finding_path(path)
assert expected == actual
def test_mute_config_path():
organization = "scallop"
mute_config = "abalone"
expected = "organizations/{organization}/muteConfigs/{mute_config}".format(
organization=organization, mute_config=mute_config,
)
actual = SecurityCenterClient.mute_config_path(organization, mute_config)
assert expected == actual
def test_parse_mute_config_path():
expected = {
"organization": "squid",
"mute_config": "clam",
}
path = SecurityCenterClient.mute_config_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_mute_config_path(path)
assert expected == actual
def test_notification_config_path():
organization = "whelk"
notification_config = "octopus"
expected = "organizations/{organization}/notificationConfigs/{notification_config}".format(
organization=organization, notification_config=notification_config,
)
actual = SecurityCenterClient.notification_config_path(
organization, notification_config
)
assert expected == actual
def test_parse_notification_config_path():
expected = {
"organization": "oyster",
"notification_config": "nudibranch",
}
path = SecurityCenterClient.notification_config_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_notification_config_path(path)
assert expected == actual
def test_organization_settings_path():
organization = "cuttlefish"
expected = "organizations/{organization}/organizationSettings".format(
organization=organization,
)
actual = SecurityCenterClient.organization_settings_path(organization)
assert expected == actual
def test_parse_organization_settings_path():
expected = {
"organization": "mussel",
}
path = SecurityCenterClient.organization_settings_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_organization_settings_path(path)
assert expected == actual
def test_security_marks_path():
organization = "winkle"
asset = "nautilus"
expected = "organizations/{organization}/assets/{asset}/securityMarks".format(
organization=organization, asset=asset,
)
actual = SecurityCenterClient.security_marks_path(organization, asset)
assert expected == actual
def test_parse_security_marks_path():
expected = {
"organization": "scallop",
"asset": "abalone",
}
path = SecurityCenterClient.security_marks_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_security_marks_path(path)
assert expected == actual
def test_source_path():
organization = "squid"
source = "clam"
expected = "organizations/{organization}/sources/{source}".format(
organization=organization, source=source,
)
actual = SecurityCenterClient.source_path(organization, source)
assert expected == actual
def test_parse_source_path():
expected = {
"organization": "whelk",
"source": "octopus",
}
path = SecurityCenterClient.source_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_source_path(path)
assert expected == actual
def test_topic_path():
project = "oyster"
topic = "nudibranch"
expected = "projects/{project}/topics/{topic}".format(project=project, topic=topic,)
actual = SecurityCenterClient.topic_path(project, topic)
assert expected == actual
def test_parse_topic_path():
expected = {
"project": "cuttlefish",
"topic": "mussel",
}
path = SecurityCenterClient.topic_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_topic_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = SecurityCenterClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = SecurityCenterClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = SecurityCenterClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = SecurityCenterClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = SecurityCenterClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = SecurityCenterClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = SecurityCenterClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = SecurityCenterClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = SecurityCenterClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = SecurityCenterClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SecurityCenterClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.SecurityCenterTransport, "_prep_wrapped_messages"
) as prep:
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.SecurityCenterTransport, "_prep_wrapped_messages"
) as prep:
transport_class = SecurityCenterClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = SecurityCenterAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = SecurityCenterClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(SecurityCenterClient, transports.SecurityCenterGrpcTransport),
(SecurityCenterAsyncClient, transports.SecurityCenterGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-securitycenter
|
tests/unit/gapic/securitycenter_v1/test_security_center.py
|
Python
|
apache-2.0
| 435,450
|
[
"Octopus"
] |
96640d2b3c35ae3f44383149a8eefdf8fc709e19a8ddec5f6f7313b37d1632da
|
# (c) 2013-2016, Michael DeHaan <michael.dehaan@gmail.com>
# Stephen Fromm <sfromm@gmail.com>
# Brian Coca <briancoca+dev@gmail.com>
# Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import codecs
import os
import os.path
import re
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum_s
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, decrypt=True):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
tmp = os.fdopen(tmpfd, 'wb')
delimit_me = False
add_newline = False
for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = u"%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
with open(self._loader.get_real_file(fragment, decrypt=decrypt), 'rb') as fragment_fh:
fragment_content = fragment_fh.read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write(b'\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = codecs.escape_decode(delimiter)[0]
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != b'\n':
tmp.write(b'\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith(b'\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = False
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if task_vars is None:
task_vars = dict()
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
delimiter = self._task.args.get('delimiter', None)
remote_src = self._task.args.get('remote_src', 'yes')
regexp = self._task.args.get('regexp', None)
follow = self._task.args.get('follow', False)
ignore_hidden = self._task.args.get('ignore_hidden', False)
decrypt = self._task.args.pop('decrypt', True)
try:
if src is None or dest is None:
raise AnsibleActionFail("src and dest are required")
if boolean(remote_src, strict=False):
# call assemble via ansible.legacy to allow library/ overrides of the module without collection search
result.update(self._execute_module(module_name='ansible.legacy.assemble', task_vars=task_vars))
raise _AnsibleActionDone()
else:
try:
src = self._find_needle('files', src)
except AnsibleError as e:
raise AnsibleActionFail(to_native(e))
if not os.path.isdir(src):
raise AnsibleActionFail(u"Source (%s) is not a directory" % src)
_re = None
if regexp is not None:
_re = re.compile(regexp)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt)
path_checksum = checksum_s(path)
dest = self._remote_expand_user(dest)
dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow)
diff = {}
# setup args for running modules
new_module_args = self._task.args.copy()
# clean assemble specific options
for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']:
if opt in new_module_args:
del new_module_args[opt]
new_module_args['dest'] = dest
if path_checksum != dest_stat['checksum']:
if self._play_context.diff:
diff = self._get_diff_data(dest, path, task_vars)
remote_path = self._connection._shell.join_path(self._connection._shell.tmpdir, 'src')
xfered = self._transfer_file(path, remote_path)
# fix file permissions when the copy is done as a different user
self._fixup_perms2((self._connection._shell.tmpdir, remote_path))
new_module_args.update(dict(src=xfered,))
res = self._execute_module(module_name='ansible.legacy.copy', module_args=new_module_args, task_vars=task_vars)
if diff:
res['diff'] = diff
result.update(res)
else:
result.update(self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars))
except AnsibleAction as e:
result.update(e.result)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
|
s-hertel/ansible
|
lib/ansible/plugins/action/assemble.py
|
Python
|
gpl-3.0
| 6,573
|
[
"Brian"
] |
625815df36c1fe03ad545c9c4c1f5e0b5bb5e9eb2c311a9830385beaeaf85158
|
# Author: Travis Oliphant 2001
# Author: Nathan Woods 2013 (nquad &c)
import sys
import warnings
from functools import partial
from . import _quadpack
import numpy
from numpy import Inf
__all__ = ['quad', 'dblquad', 'tplquad', 'nquad', 'quad_explain',
'IntegrationWarning']
error = _quadpack.error
class IntegrationWarning(UserWarning):
"""
Warning on issues during integration.
"""
pass
def quad_explain(output=sys.stdout):
"""
Print extra information about integrate.quad() parameters and returns.
Parameters
----------
output : instance with "write" method, optional
Information about `quad` is passed to ``output.write()``.
Default is ``sys.stdout``.
Returns
-------
None
Examples
--------
We can show detailed information of the `integrate.quad` function in stdout:
>>> from scipy.integrate import quad_explain
>>> quad_explain()
"""
output.write(quad.__doc__)
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : {function, scipy.LowLevelCallable}
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(double x)
double func(double x, void *user_data)
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
In the call forms with ``xx``, ``n`` is the length of the ``xx``
array which contains ``xx[0] == x`` and the rest of the items are
numbers contained in the ``args`` argument of quad.
In addition, certain ctypes call signatures are supported for
backward compatibility, but those should not be used in new code.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message
A convergence message.
explain
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
numerical approximation. See `epsrel` below.
epsrel : float or int, optional
Relative error tolerance. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted. Note that this option cannot be used in conjunction
with ``weight``.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e., it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions, and these do not support specifying break points. The
possible values of weight and the corresponding weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
>>> f = lambda x,a : a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
Be aware that pulse shapes and other sharp features as compared to the
size of the integration interval may not be integrated correctly using
this method. A simplified example of this limitation is integrating a
y-axis reflected step function with many zero values within the integrals
bounds.
>>> y = lambda x: 1 if x<=0 else 0
>>> integrate.quad(y, -1, 1)
(1.0, 1.1102230246251565e-14)
>>> integrate.quad(y, -1, 100)
(1.0000000002199108, 1.0189464580163188e-08)
>>> integrate.quad(y, -1, 10000)
(0.0, 0.0)
"""
if not isinstance(args, tuple):
args = (args,)
# check the limits of integration: \int_a^b, expect a < b
flip, a, b = b < a, min(a, b), max(a, b)
if weight is None:
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
if points is not None:
msg = ("Break points cannot be specified when using weighted integrand.\n"
"Continuing, ignoring specified points.")
warnings.warn(msg, IntegrationWarning, stacklevel=2)
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
if flip:
retval = (-retval[0],) + retval[1:]
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos', 'sin'] and (b == Inf or a == -Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning, stacklevel=2)
return retval[:-1]
elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
if epsabs <= 0: # Small error tolerance - applies to all methods
if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
" 5e-29 and 50*(machine epsilon).")
elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf):
msg = ("Sine or cosine weighted intergals with infinite domain"
" must have 'epsabs'>0.")
elif weight is None:
if points is None: # QAGSE/QAGIE
msg = ("Invalid 'limit' argument. There must be"
" at least one subinterval")
else: # QAGPE
if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
msg = ("All break points in 'points' must lie within the"
" integration limits.")
elif len(points) >= limit:
msg = ("Number of break points ({:d})"
" must be less than subinterval"
" limit ({:d})").format(len(points), limit)
else:
if maxp1 < 1:
msg = "Chebyshev moment limit maxp1 must be >=1."
elif weight in ('cos', 'sin') and abs(a+b) == Inf: # QAWFE
msg = "Cycle limit limlst must be >=3."
elif weight.startswith('alg'): # QAWSE
if min(wvar) < -1:
msg = "wvar parameters (alpha, beta) must both be >= -1."
if b < a:
msg = "Integration limits a, b must satistfy a<b."
elif weight == 'cauchy' and wvar in (a, b):
msg = ("Parameter 'wvar' must not equal"
" integration limits 'a' or 'b'.")
raise ValueError(msg)
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
infbounds = 0
if (b != Inf and a != -Inf):
pass # standard integration
elif (b == Inf and a != -Inf):
infbounds = 1
bound = a
elif (b == Inf and a == -Inf):
infbounds = 2
bound = 0 # ignored
elif (b != Inf and a == -Inf):
infbounds = -1
bound = b
else:
raise RuntimeError("Infinity comparisons don't work for you.")
if points is None:
if infbounds == 0:
return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
else:
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
else:
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
#Duplicates force function evaluation at singular points
the_points = numpy.unique(points)
the_points = the_points[a < the_points]
the_points = the_points[the_points < b]
the_points = numpy.concatenate((the_points, (0., 0.)))
return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts):
if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
raise ValueError("%s not a recognized weighting function." % weight)
strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
if weight in ['cos','sin']:
integr = strdict[weight]
if (b != Inf and a != -Inf): # finite limits
if wopts is None: # no precomputed Chebyshev moments
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1,1)
else: # precomputed Chebyshev moments
momcom = wopts[0]
chebcom = wopts[1]
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1, 2, momcom, chebcom)
elif (b == Inf and a != -Inf):
return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
epsabs,limlst,limit,maxp1)
elif (b != Inf and a == -Inf): # remap function and interval
if weight == 'cos':
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return func(*myargs)
else:
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
args = (func,) + args
return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
full_output, epsabs, limlst, limit, maxp1)
else:
raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
else:
if a in [-Inf,Inf] or b in [-Inf,Inf]:
raise ValueError("Cannot integrate with this weight over an infinite interval.")
if weight.startswith('alg'):
integr = strdict[weight]
return _quadpack._qawse(func, a, b, wvar, integr, args,
full_output, epsabs, epsrel, limit)
else: # weight == 'cauchy'
return _quadpack._qawce(func, a, b, wvar, args, full_output,
epsabs, epsrel, limit)
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : callable or float
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8. `dblquad`` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
to ``hfun(x)``, and ``result`` is the numerical approximation.
See `epsrel` below.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Examples
--------
Compute the double integral of ``x * y**2`` over the box
``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
>>> from scipy import integrate
>>> f = lambda y, x: x*y**2
>>> integrate.dblquad(f, 0, 2, lambda x: 0, lambda x: 1)
(0.6666666666666667, 7.401486830834377e-15)
"""
def temp_ranges(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
return nquad(func, [temp_ranges, [a, b]], args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
epsrel=1.49e-8):
"""
Compute a triple (definite) integral.
Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
Parameters
----------
func : function
A Python function or method of at least three variables in the
order (z, y, x).
a, b : float
The limits of integration in x: `a` < `b`
gfun : function or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : function or float
The upper boundary curve in y (same requirements as `gfun`).
qfun : function or float
The lower boundary surface in z. It must be a function that takes
two floats in the order (x, y) and returns a float or a float
indicating a constant boundary surface.
rfun : function or float
The upper boundary surface in z. (Same requirements as `qfun`.)
args : tuple, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the innermost 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad: Adaptive quadrature using QUADPACK
quadrature: Adaptive Gaussian quadrature
fixed_quad: Fixed-order Gaussian quadrature
dblquad: Double integrals
nquad : N-dimensional integrals
romb: Integrators for sampled data
simpson: Integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
scipy.special: For coefficients and roots of orthogonal polynomials
Examples
--------
Compute the triple integral of ``x * y * z``, over ``x`` ranging
from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
>>> from scipy import integrate
>>> f = lambda z, y, x: x*y*z
>>> integrate.tplquad(f, 1, 2, lambda x: 2, lambda x: 3,
... lambda x, y: 0, lambda x, y: 1)
(1.8750000000000002, 3.324644794257407e-14)
"""
# f(z, y, x)
# qfun/rfun (x, y)
# gfun/hfun(x)
# nquad will hand (y, x, t0, ...) to ranges0
# nquad will hand (x, t0, ...) to ranges1
# Stupid different API...
def ranges0(*args):
return [qfun(args[1], args[0]) if callable(qfun) else qfun,
rfun(args[1], args[0]) if callable(rfun) else rfun]
def ranges1(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
ranges = [ranges0, ranges1, [a, b]]
return nquad(func, ranges, args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def nquad(func, ranges, args=None, opts=None, full_output=False):
"""
Integration over multiple variables.
Wraps `quad` to enable integration over multiple variables.
Various options allow improved integration of discontinuous functions, as
well as the use of weighted integration, and generally finer control of the
integration process.
Parameters
----------
func : {callable, scipy.LowLevelCallable}
The function to be integrated. Has arguments of ``x0, ... xn``,
``t0, ... tm``, where integration is carried out over ``x0, ... xn``,
which must be floats. Where ```t0, ... tm``` are extra arguments
passed in args.
Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``.
Integration is carried out in order. That is, integration over ``x0``
is the innermost integral, and ``xn`` is the outermost.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
where ``n`` is the number of variables and args. The ``xx`` array
contains the coordinates and extra arguments. ``user_data`` is the data
contained in the `scipy.LowLevelCallable`.
ranges : iterable object
Each element of ranges may be either a sequence of 2 numbers, or else
a callable that returns such a sequence. ``ranges[0]`` corresponds to
integration over x0, and so on. If an element of ranges is a callable,
then it will be called with all of the integration arguments available,
as well as any parametric arguments. e.g., if
``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
args : iterable object, optional
Additional arguments ``t0, ..., tn``, required by `func`, `ranges`, and
``opts``.
opts : iterable object or dict, optional
Options to be passed to `quad`. May be empty, a dict, or
a sequence of dicts or functions that return a dict. If empty, the
default options from scipy.integrate.quad are used. If a dict, the same
options are used for all levels of integraion. If a sequence, then each
element of the sequence corresponds to a particular integration. e.g.,
opts[0] corresponds to integration over x0, and so on. If a callable,
the signature must be the same as for ``ranges``. The available
options together with their default values are:
- epsabs = 1.49e-08
- epsrel = 1.49e-08
- limit = 50
- points = None
- weight = None
- wvar = None
- wopts = None
For more information on these options, see `quad` and `quad_explain`.
full_output : bool, optional
Partial implementation of ``full_output`` from scipy.integrate.quad.
The number of integrand function evaluations ``neval`` can be obtained
by setting ``full_output=True`` when calling nquad.
Returns
-------
result : float
The result of the integration.
abserr : float
The maximum of the estimates of the absolute error in the various
integration results.
out_dict : dict, optional
A dict containing additional information on the integration.
See Also
--------
quad : 1-D numerical integration
dblquad, tplquad : double and triple integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
Examples
--------
>>> from scipy import integrate
>>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
>>> def opts0(*args, **kwargs):
... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
>>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
... opts=[opts0,{},{},{}], full_output=True)
(1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
>>> scale = .1
>>> def func2(x0, x1, x2, x3, t0, t1):
... return x0*x1*x3**2 + np.sin(x2) + 1 + (1 if x0+t1*x1-t0>0 else 0)
>>> def lim0(x1, x2, x3, t0, t1):
... return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
... scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
>>> def lim1(x2, x3, t0, t1):
... return [scale * (t0*x2 + t1*x3) - 1,
... scale * (t0*x2 + t1*x3) + 1]
>>> def lim2(x3, t0, t1):
... return [scale * (x3 + t0**2*t1**3) - 1,
... scale * (x3 + t0**2*t1**3) + 1]
>>> def lim3(t0, t1):
... return [scale * (t0+t1) - 1, scale * (t0+t1) + 1]
>>> def opts0(x1, x2, x3, t0, t1):
... return {'points' : [t0 - t1*x1]}
>>> def opts1(x2, x3, t0, t1):
... return {}
>>> def opts2(x3, t0, t1):
... return {}
>>> def opts3(t0, t1):
... return {}
>>> integrate.nquad(func2, [lim0, lim1, lim2, lim3], args=(0,0),
... opts=[opts0, opts1, opts2, opts3])
(25.066666666666666, 2.7829590483937256e-13)
"""
depth = len(ranges)
ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
if args is None:
args = ()
if opts is None:
opts = [dict([])] * depth
if isinstance(opts, dict):
opts = [_OptFunc(opts)] * depth
else:
opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
return _NQuad(func, ranges, opts, full_output).integrate(*args)
class _RangeFunc:
def __init__(self, range_):
self.range_ = range_
def __call__(self, *args):
"""Return stored value.
*args needed because range_ can be float or func, and is called with
variable number of parameters.
"""
return self.range_
class _OptFunc:
def __init__(self, opt):
self.opt = opt
def __call__(self, *args):
"""Return stored dict."""
return self.opt
class _NQuad:
def __init__(self, func, ranges, opts, full_output):
self.abserr = 0
self.func = func
self.ranges = ranges
self.opts = opts
self.maxdepth = len(ranges)
self.full_output = full_output
if self.full_output:
self.out_dict = {'neval': 0}
def integrate(self, *args, **kwargs):
depth = kwargs.pop('depth', 0)
if kwargs:
raise ValueError('unexpected kwargs')
# Get the integration range and options for this depth.
ind = -(depth + 1)
fn_range = self.ranges[ind]
low, high = fn_range(*args)
fn_opt = self.opts[ind]
opt = dict(fn_opt(*args))
if 'points' in opt:
opt['points'] = [x for x in opt['points'] if low <= x <= high]
if depth + 1 == self.maxdepth:
f = self.func
else:
f = partial(self.integrate, depth=depth+1)
quad_r = quad(f, low, high, args=args, full_output=self.full_output,
**opt)
value = quad_r[0]
abserr = quad_r[1]
if self.full_output:
infodict = quad_r[2]
# The 'neval' parameter in full_output returns the total
# number of times the integrand function was evaluated.
# Therefore, only the innermost integration loop counts.
if depth + 1 == self.maxdepth:
self.out_dict['neval'] += infodict['neval']
self.abserr = max(self.abserr, abserr)
if depth > 0:
return value
else:
# Final result of N-D integration with error
if self.full_output:
return value, self.abserr, self.out_dict
else:
return value, self.abserr
|
WarrenWeckesser/scipy
|
scipy/integrate/quadpack.py
|
Python
|
bsd-3-clause
| 37,343
|
[
"Gaussian"
] |
0392816bc66527b464631ed46dcc151bdc2b33e8aaccf1ad4a6d08d6077d74ce
|
## Generate plots of quantities from a measurment set
##
## $Id: plotiterator.py,v 1.25 2017-02-21 09:10:05 jive_cc Exp $
##
## $Log: plotiterator.py,v $
## Revision 1.25 2017-02-21 09:10:05 jive_cc
## HV: * DesS requests normalized vector averaging - complex numbers are first
## normalized before being averaged. See "help avt" or "help avc".
##
## Revision 1.24 2017-01-27 13:50:28 jive_cc
## HV: * jplotter.py: small edits
## - "not refresh(e)" => "refresh(e); if not e.plots ..."
## - "e.rawplots.XXX" i.s.o. "e.plots.XXX"
## * relatively big overhaul: in order to force (old) pyrap to
## re-read tables from disk all table objects must call ".close()"
## when they're done.
## Implemented by patching the pyrap.tables.table object on the fly
## with '__enter__' and '__exit__' methods (see "ms2util.opentable(...)")
## such that all table access can be done in a "with ..." block:
## with ms2util.opentable(...) as tbl:
## tbl.getcol('DATA') # ...
## and then when the with-block is left, tbl gets automagically closed
##
## Revision 1.23 2015-12-09 07:02:11 jive_cc
## HV: * big change! the plotiterators now return a one-dimensional dict
## of label => dataset. The higher level code reorganizes them
## into plots, based on the 'new plot' settings. Many wins in this one:
## - the plotiterators only have to index one level in stead of two
## - when the 'new plot' setting is changed, we don't have to read the
## data from disk again [this is a *HUGE* improvement, especially for
## larger data sets]
## - the data set expression parser is simpler, it works on the
## one-dimensional 'list' of data sets and it does not have to
## flatten/unflatten any more
## * The code to deal with refreshing the plots has been rewritten a bit
## such that all necessary steps (re-organizing raw plots into plots,
## re-running the label processing, re-running the post processing,
## re-running the min/max processing) are executed only once; when
## necessary. And this code is now shared between the "pl" command and
## the "load/store" commands.
##
## Revision 1.22 2015-09-23 12:28:36 jive_cc
## HV: * Lorant S. requested sensible requests (ones that were already in the
## back of my mind too):
## - option to specify the data column
## - option to not reorder the spectral windows
## Both options are now supported by the code and are triggered by
## passing options to the "ms" command
##
## Revision 1.21 2015-04-29 14:34:55 jive_cc
## HV: * add support for plotting quantity vs UV-distance
##
## Revision 1.20 2015-04-08 14:34:12 jive_cc
## HV: * Correct checking of wether dataset.[xy] are of the numpy.ndarray
## persuasion
##
## Revision 1.19 2015-02-16 12:56:53 jive_cc
## HV: * Now that we do our own slicing, found that some of the index limits
## were off-by-one
##
## Revision 1.18 2015-02-02 08:55:22 jive_cc
## HV: * support for storing/loading plots, potentially manipulating them
## via arbitrary arithmetic expressions
## * helpfile layout improved
##
## Revision 1.17 2015-01-09 14:27:57 jive_cc
## HV: * fixed copy-paste error in weight-thresholded quantity-versus-time fn
## * sped up SOLINT processing by factor >= 2
## * output of ".... took XXXs" consistentified & beautified
## * removed "Need to convert ..." output; the predicted expected runtime
## was usually very wrong anyway.
##
## Revision 1.16 2015-01-09 00:02:27 jive_cc
## HV: * support for 'solint' - accumulate data in time bins of size 'solint'
## now also in "xxx vs time" plots. i.e. can be used to bring down data
## volume by e.g. averaging down to an arbitrary amount of seconds.
## * solint can now be more flexibly be set using d(ays), h(ours),
## m(inutes) and/or s(econds). Note that in the previous versions a
## unitless specification was acceptable, in this one no more.
##
## Revision 1.15 2014-11-28 14:25:04 jive_cc
## HV: * spelling error in variable name ...
##
## Revision 1.14 2014-11-26 14:56:21 jive_cc
## HV: * pycasa autodetection and use
##
## Revision 1.13 2014-05-14 17:35:15 jive_cc
## HV: * if weight threshold is applied this is annotated in the plot
## * the plotiterators have two implementations now, one with weight
## thresholding and one without. Until I find a method that is
## equally fast with/without weight masking
##
## Revision 1.12 2014-05-14 17:02:01 jive_cc
## HV: * Weight thresholding implemented - but maybe I'll double the code
## to two different functions, one with weight thresholding and one
## without because weight thresholding is sloooooow
##
## Revision 1.11 2014-05-12 21:27:28 jive_cc
## HV: * IF time was an essential part of a label, its resolution of 1second
## was not enough - made it 1/100th of a second. So now you can safely
## plot data sets with individual time stamps even if they're << 1 second
## apart
##
## Revision 1.10 2014-05-06 14:20:39 jive_cc
## HV: * Added marking capability
##
## Revision 1.9 2014-04-15 07:53:17 jive_cc
## HV: * time averaging now supports 'solint' = None => average all data in
## each time-range selection bin
##
## Revision 1.8 2014-04-14 21:04:44 jive_cc
## HV: * Information common to all plot- or data set labels is now stripped
## and displayed in the plot heading i.s.o in the plot- or data set label
##
## Revision 1.7 2014-04-14 14:46:05 jive_cc
## HV: * Uses pycasa.so for table data access waiting for pyrap to be fixed
## * added "indexr" + scan-based selection option
##
## Revision 1.6 2014-04-10 21:14:40 jive_cc
## HV: * I fell for the age-old Python trick where a default argument is
## initialized statically - all data sets were integrating into the
## the same arrays! Nice!
## * Fixed other efficiency measures: with time averaging data already
## IS in numarray so no conversion needs to be done
## * more improvements
##
## Revision 1.5 2014-04-09 08:26:46 jive_cc
## HV: * Ok, moved common plotiterator stuff into baseclass
##
## Revision 1.4 2014-04-08 23:34:13 jive_cc
## HV: * Minor fixes - should be better now
##
## Revision 1.3 2014-04-08 22:41:11 jive_cc
## HV: Finally! This might be release 0.1!
## * python based plot iteration now has tolerable speed
## (need to test on 8M row MS though)
## * added quite a few plot types, simplified plotters
## (plotiterators need a round of moving common functionality
## into base class)
## * added generic X/Y plotter
##
## Revision 1.2 2014-04-02 17:55:30 jive_cc
## HV: * another savegame, this time with basic plotiteration done in Python
##
## Revision 1.1 2013-12-12 14:10:16 jive_cc
## HV: * another savegame. Now going with pythonic based plotiterator,
## built around ms2util.reducems
##
##
import ms2util, hvutil, plots, jenums, itertools, copy, operator, numpy, math, imp, time, collections, functional
import pyrap.quanta
# Auto-detect of pycasa
havePyCasa = True
try:
import pycasa
print "*** using PyCasa for measurementset data access ***"
except:
havePyCasa = False
## Introduce some shorthands
NOW = time.time
CP = copy.deepcopy
AX = jenums.Axes
AVG = jenums.Averaging
YTypes = plots.YTypes
Quantity = collections.namedtuple('Quantity', ['quantity_name', 'quantity_fn'])
# do not drag in all numpy.* names but by "resolving" them at this level
# we shave off a lot of python name lookups. this has an effect on code which
# is called a lot of times per second - like the code in here
ANY = numpy.any
ALL = numpy.all
ADD = numpy.add
SQRT = numpy.sqrt
SQUARE = numpy.square
ARRAY = numpy.array
MARRAY = numpy.ma.array
ISFINITE = numpy.isfinite
LOGICAL_OR = numpy.logical_or
_ArrayT = numpy.ndarray
_MArrayT = numpy.ma.core.MaskedArray
IsArray = lambda x: isinstance(x, _ArrayT) or isinstance(x, _MArrayT) or isinstance(x, list)
# Useful simple functions
# take the current channel selection, and produce a list of the sorted unique channels
mk_chansel = functional.compose(list, sorted, set, CP)
print_if = functional.choice(operator.truth, functional.printf, functional.const(None))
# We support different kinds of averaging
def avg_vectornorm(ax):
def do_it(x):
# first set all flagged + NaN/Inf values to 0 such that
# (1) any NaN/Inf's don't screw up the total sum
# (2) flagged data doesn't count towards the sum
# We're going to need the input-mask twice
imask = LOGICAL_OR(~ISFINITE(x.data), x.mask)
# Sum all values along the requested axis
x.data[imask] = 0
total = numpy.sum(x.data, axis=ax, keepdims=True)
# figure out where the counts along the requested axis are 0
# and transform to a mask such that masked values in the *output*
# may effectively be removed
flags = ARRAY(numpy.sum(~imask, axis=ax, keepdims=True)==0, dtype=numpy.bool)
# Find the maximum unflagged value along the input axis.
# Flagged data gets set to -inf such that max()
# may yield a useful result
mdata = numpy.abs(x.data)
mdata[imask] = -numpy.inf
total /= numpy.max(mdata, axis=ax, keepdims=True)
# remove those data points
total[flags] = numpy.nan
return MARRAY(total, mask = flags)
return do_it
def avg_arithmetic(ax):
# normal arithmetic mean, should work both on complex or scalar data
def do_it(x):
# first set all flagged + NaN/Inf values to 0 such that
# (1) any NaN/Inf's don't screw up the total sum
# (2) flagged data doesn't count towards the sum
imask = LOGICAL_OR(~ISFINITE(x.data), x.mask)
x[ imask ] = 0
total = numpy.sum(x.data, axis=ax, keepdims=True)
counts = numpy.sum(~imask, axis=ax, keepdims=True)
# figure out where the counts are 0 - effectively
# remove those data points
nmask = ARRAY(counts==0, dtype=numpy.bool)
# we have computed where the count==0 so we can now
# overwrite with 1 to prevent divide-by-zero errors.
# Later we'll replace those values with NaN
counts[nmask]=1
total /= counts
# And indicate where there was no average at all
total[nmask] = numpy.NaN
return MARRAY(total, mask = nmask)
return do_it
def avg_sum(ax):
# normal arithmetic mean, should work both on complex or scalar data
def do_it(x):
# first set all flagged + NaN/Inf values to 0 such that
# (1) any NaN/Inf's don't screw up the total sum
# (2) flagged data doesn't count towards the sum
imask = LOGICAL_OR(~ISFINITE(x.data), x.mask)
x.data[ imask ] = 0
# Sum all values along the requested axis
total = numpy.sum(x.data, axis=ax, keepdims=True)
# count unflagged points along that axis and set flag if
# there aren't any of those
flags = ARRAY(numpy.sum(~imask, axis=ax, keepdims=True)==0, dtype=numpy.bool)
# remove points that didn't have any unflagged data
total[flags] = numpy.nan
return MARRAY(total, mask = flags)
return do_it
def avg_none(_):
return functional.identity
## The base class holds the actual table object -
## makes sure the selection etc gets done
class plotbase(object):
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
if hasattr(self, 'table'):
self.table.close()
# depending on combination of query or not and read flags or not
# we have optimum call sequence for processing a table
# key = (qryYesNo, readFlagYesNo)
# I think that executing an empty query
# tbl.query('')
# takes longer than
# tbl.query()
_qrycolmapf = {
(False, False): lambda tbl, q, c: tbl, # no query, no flagcolum reading
(True , False): lambda tbl, q, c: tbl.query(q), # only query
(False, True ): lambda tbl, q, c: tbl.query(columns=c), # only different columns
(True, True ): lambda tbl, q, c: tbl.query(q, columns=c) # the works
}
## selection is a selection object from 'selection.py'
def __init__(self, msname, selection, mapping, **kwargs):
self.verbose = kwargs.setdefault('verbose', True)
self.flags = kwargs.get('readflags', True)
self.datacol = CP(mapping.domain.column)
self.table = pycasa.table(msname) if havePyCasa else ms2util.opentable(msname)
# when reading flags is enabled let the C++ do the OR'ing of FLAG_ROW, FLAG columns for us
colnames = None #"*, (FLAG_ROW || FLAG) AS FLAGCOL" if self.flags else None
## apply selection if necessary
qry = selection.selectionTaQL()
s = NOW()
self.table = plotbase._qrycolmapf[(bool(qry), bool(colnames))](self.table, qry, colnames)
e = NOW()
if not self.table:
raise RuntimeError("No data available for your selection criteria")
if qry and self.verbose:
print "Query took\t\t{0:.3f}s".format(e-s)
# we'll provide overrides for specific column readers
# for efficiency for the WEIGHT and/or FLAG data
#
# Subclasses can request 'WEIGHTCOL' and/or 'FLAGCOL'
# in their call to reducems2(...), provided they pass
# in the self.slicers{..} object which we'll have prepared
self.slicers = dict()
# Set up weight thresholding.
# We have to be able to deal with the following weight shapes:
# numpy.Inf (weights not read)
# (n_int, n_pol) [WEIGHT column read]
# (n_int, n_freq, n_pol) [WEIGHT_SPECTRUM read]
#
# In order to turn the weight criterion (if any) into a mask we must
# potentially broadcast the WEIGHT shape (one weight per polarization)
# to all channels; the data always has shape:
# (n_int, n_freq, n_pol)
#
# We can do that efficiently by transposing the data array in that case to be:
# (n_freq, n_int, n_pol)
# Now the data mask also has this shape.
# Then, numpy.logical_or(data.mask, weight_mask) does the right thing:
# weight_mask == (n_int, n_pol) -> dimensions match on both data.mask and weight_mask
# but for the first -> broadcasted along dim 0, which
# is n_freq, i.e. each spectral point gets the same weight per pol
self.threshold = CP(selection.weightThreshold) if selection.weightThreshold is not None else -numpy.Inf
transpose = weight_rd = None
if self.threshold == -numpy.Inf:
# No weight thresholding? Return an infinite weight to effectively disable thresholding
weight_rd = lambda _a,_b,_c,_d: numpy.Inf
# Also no need to transpose/untranspose the data array for this 'shape'
transpose = functional.identity
else:
# weight read from MS, choose which column to use
weight_col = 'WEIGHT_SPECTRUM' if 'WEIGHT_SPECTRUM' in self.table.colnames() else 'WEIGHT'
weight_rd = lambda tab, _, s, n: tab.getcol(weight_col, startrow=s, nrow=n)
# the need to transpose/untranspose the data array depends on which is the weight column
transpose = operator.methodcaller('transpose', (1,0,2)) if weight_col == 'WEIGHT' else functional.identity
# install the appropriate weight reader for the 'WEIGHTCOL' column
self.slicers['WEIGHTCOL'] = weight_rd
self.transpose = transpose
# Set up FLAG reading
# If self.flags is set, then we take FLAG, FLAG_ROW from the MS,
# otherwise we override the slicer with something that don't do nothing
# but return 'not flagged'
# Because we need to do (FLAG || FLAG_ROW) we play the same transpose
# trick as with the WEIGHT/WEIGHT_SPECTRUM above only different.
# FLAG = (nrow, nchan, npol)
# FLAG_ROW = (nrow,)
# so by doing "flag.transpose((1,2,0))" it becomes (nchan, npol, nrow)
# and now numpy.logical_or(flag, flag_row) broadcasts Just Fine(tm)!
# But we only have to do that if we actually read flags from the MS ...
self.transpose_flag = operator.methodcaller('transpose', (1,2,0))
self.untranspose_flag = operator.methodcaller('transpose', (2,0,1))
if not self.flags:
# no flags to be read, replace with no-ops
no_flag = lambda _a,_b,_c,_d: False
self.slicers['FLAG'] = no_flag
self.slicers['FLAG_ROW'] = no_flag
self.transpose_flag = self.untranspose_flag = functional.identity
## Parse data-description-id selection into a map:
## self.ddSelection will be
## map [ DATA_DESC_ID ] => (FQ, SB, POLS)
##
## With FQ, SB integer - the indices,
## POLS = [ (idx, str), ... ]
## i.e. list of row indices and the polarization string
## to go with it, such that the polarization data
## is put in the correct plot/data set immediately
##
# The matter of the fact is that the polarization row index ('idx'
# above) is not a unique mapping to physical polarization so we cannot
# get away with using the numerical label, even though that would be
# faster
_pMap = mapping.polarizationMap
_spwMap = mapping.spectralMap
GETF = _spwMap.frequenciesOfFREQ_SB
# Frequencies get done in MHz
scale = 1e6 if mapping.domain.domain == jenums.Type.Spectral else 1
## if user did not pass DATA_DESC_ID selection, default to all
if selection.ddSelection:
## An element in "ddSelection" is a 4-element tuple with
## fields (FQ, SB, POLID, [product indices])
## So all we need is to pair the product indices with the
## appropriate polarization strings
GETDDID = _spwMap.datadescriptionIdOfFREQ_SB_POL
ITEMGET = hvutil.itemgetter
def ddIdAdder(acc, ddSel):
(fq, sb, pid, l) = ddSel
ddId = GETDDID(fq, sb, pid)
polStrings = _pMap.getPolarizations(pid)
acc[0][ ddId ] = (fq, sb, zip(l, ITEMGET(*l)(polStrings)))
acc[1][ ddId ] = GETF(fq, sb)/scale
return acc
(self.ddSelection, self.ddFreqs) = reduce(ddIdAdder, selection.ddSelection, [{}, {}])
else:
ddids = _spwMap.datadescriptionIDs()
UNMAPDDID = _spwMap.unmapDDId
def ddIdAdder(acc, dd):
# Our data selection is rather simple: all the rows!
r = UNMAPDDID(dd)
acc[0][ dd ] = (r.FREQID, r.SUBBAND, list(enumerate(_pMap.getPolarizations(r.POLID))))
acc[1][ dd ] = GETF(r.FREQID, r.SUBBAND)/scale
return acc
(self.ddSelection, self.ddFreqs) = reduce(ddIdAdder, ddids, [{}, {}])
## Provide for a label unmapping function.
## After creating the plots we need to transform the labels - some
## of the numerical indices must be unmapped into physical quantities
#unmapBL = mapping.baselineMap.baselineName
#unmapFQ = mapping.spectralMap.freqGroupName
#unmapSRC = mapping.fieldMap.field
unmap_f = { AX.BL: mapping.baselineMap.baselineName,
AX.FQ: mapping.spectralMap.freqGroupName,
AX.SRC: mapping.fieldMap.field,
AX.TIME: lambda t: pyrap.quanta.quantity(t, "s").formatted("time", precision=8) }
identity = lambda x: x
def unmap( (fld, val) ):
return (fld, unmap_f.get(fld, identity)(val))
# flds is the list of field names that the values in the tuple mean
self.MKLAB = lambda flds, tup: plots.label( dict(map(unmap, zip(flds, tup))), flds )
##
## Should return the generated plots according to the following
## structure:
##
## Update: Dec 2015 - we start doing things a little different
## the raw data sets will be delivered as a dict of
## Dict: Key -> Value, where Key is the full data set
## label and Value the dataset() object.
## The division into plots will be done at a higher
## level. Reasons:
## - generation of raw data is faster as only one level
## of dict indexing is needed i.s.o. two
## - if user changes the new plot settings, we don't
## have to read from disk no more, it then is a mere
## rearrangement of the raw data sets
## - load/store with expressions on data sets now work
## on the one-dimensional 'list' of data sets, no need
## to flatten/unflatten anymore
##
## plots = dict( Key -> Value ) with
## Key = <plot index> # contains physical quantities/labels
## Value = DataSet
## DataSet = instance of 'dataset' (see below) with
## attributes ".x" and ".y"
def makePlots(self, *args):
raise RuntimeError, "Someone forgot to implement this function for this plottype"
## Unfortunately, our code relies on the fact that the numarrays returned
## from "ms.getcol()" are 3-dimensional: (nintg x npol x nchannel)
## Sadly, casa is smart/stoopid enough to return no more dimensions
## than are needed; no degenerate axes are present.
## So if your table consists of one row, you get at best a matrix:
## npol x nchannel
## Further, if you happen to read single-pol data, guess what,
## you get a matrix at best and a vector at worst!:
## matrix: nintg x nchannel
## vector: nchannel (worst case: a table with one row of single pol data!)
##
## m3d() can be used to reshape an array to always be at least 3d,
## it inserts degenerate axis from the end, assuming that there
## won't be data sets with only one row ...
## (single pol does happen! a lot!)
#def m3d(ar):
# shp = list(ar.shape)
# while len(shp)<3:
# shp.insert(-1, 1)
# return ar.reshape( shp )
#
#def m2d(ar):
# shp = list(ar.shape)
# while len(shp)<2:
# shp.insert(-1, 1)
# return ar.reshape( shp )
#
#class dataset_org:
# __slots__ = ['x', 'y', 'n', 'a', 'sf', 'm']
#
# @classmethod
# def add_sumy(self, obj, xs, ys, m):
# obj.y = obj.y + ys
# obj.n = obj.n + 1
# obj.m = numpy.logical_and(obj.m, m)
#
# @classmethod
# def init_sumy(self, obj, xs, ys, m):
# obj.x = numpy.array(xs)
# obj.y = numpy.array(ys)
# obj.sf = dataset_org.add_sumy
# obj.m = m
#
# def __init__(self, x=None, y=None, m=None):
# if x is not None and len(x)!=len(y):
# raise RuntimeError, "attempt to construct data set where len(x) != len(y)?!!!"
# self.x = list() if x is None else x
# self.y = list() if y is None else y
# self.m = list() if m is None else m
# self.n = 0 if x is None else 1
# self.sf = dataset_org.init_sumy if x is None else dataset_org.add_sumy
# self.a = False
#
# def append(self, xv, yv, m):
# self.x.append(xv)
# self.y.append(yv)
# self.m.append(m)
#
# # integrate into the current buffer
# def sumy(self, xs, ys, m):
# self.sf(self, xs, ys, m)
#
# def average(self):
# if not self.a and self.n>1:
# self.y = self.y / self.n
# self.a = True
#
# def is_numarray(self):
# return (type(self.x) is numpy.ndarray and type(self.y) is numpy.ndarray)
#
# def as_numarray(self):
# if self.is_numarray():
# return self
# # note to self: float32 has insufficient precision for e.g.
# # <quantity> versus time
# self.x = numpy.array(self.x, dtype=numpy.float64)
# self.y = numpy.array(self.y, dtype=numpy.float64)
# self.m = numpy.array(self.m, dtype=numpy.bool)
# return self
#
# def __str__(self):
# return "DATASET: {0} MASK: {1}".format(zip(self.x, self.y), self.m)
#
# def __repr__(self):
# return str(self)
class dataset_fixed:
__slots__ = ['x', 'y', 'm']
def __init__(self, x=None, y=None):
if x is not None and len(x)!=len(y):
raise RuntimeError, "attempt to construct data set where len(x) != len(y)?!!!"
self.x = list() if x is None else x
self.y = list() if y is None else y
self.m = False
def append(self, xv, yv, m):
raise NotImplemented("append() does not apply to dataset_fixed!")
# integrate into the current buffer
def sumy(self, xs, ys, m):
raise NotImplemented("sumy() does not apply to dataset_fixed!")
def average(self, method):
raise NotImplemented("average() does not apply to dataset_fixed!")
def is_numarray(self):
return type(self) is _ArrayT and type(self.y) is _MArrayT
def as_numarray(self):
if self.is_numarray():
return self
# note to self: float32 has insufficient precision for e.g.
# <quantity> versus time
self.x = ARRAY(self.x, dtype=numpy.float64)
if type(self.y) is not _MArrayT: #numpy.ma.MaskedArray:
self.y = MARRAY(self.y, mask=~ISFINITE(self.y), dtype=numpy.float64)
return self
def __str__(self):
return "DATASET<fixed>: {0}".format(zip(self.x, self.y))
def __repr__(self):
return str(self)
#################################################################################
#
# .append() means append to list, fastest for collecting individual samples
# .average() verifies that no averaging is requested - this one can't handle that
#
#################################################################################
class dataset_list:
__slots__ = ['x', 'y', 'n', 'a', 'm']
@classmethod
def add_sumy(self, obj, xs, ys, m):
obj.y = obj.y + ys
obj.n = obj.n + 1
obj.m = numpy.logical_and(obj.m, m)
@classmethod
def init_sumy(self, obj, xs, ys, m):
obj.x = numpy.array(xs)
obj.y = numpy.array(ys)
obj.sf = dataset.add_sumy
obj.m = m
def __init__(self, x=None, y=None, m=None):
if x is not None and len(x)!=len(y):
raise RuntimeError, "attempt to construct data set where len(x) != len(y)?!!!"
self.x = list() if x is None else x
self.y = list() if y is None else y
self.m = list() if m is None else m
self.n = 0 if x is None else 1
self.a = False
def append(self, xv, yv, m):
self.x.append(xv)
self.y.append(yv)
self.m.append(m)
def extend(self, xseq, yseq, mseq):
self.x.extend(xseq)
self.y.extend(yseq)
self.m.extend(mseq)
def average(self, method):
if method != AVG.None:
raise RuntimeError("dataset_list was not made for time averaging")
def is_numarray(self):
return type(self.x) is _ArrayT and type(self.y) is _MArrayT
def as_numarray(self):
if self.is_numarray():
return self
# note to self: float32 has insufficient precision for e.g.
# <quantity> versus time
self.x = ARRAY(self.x, dtype=numpy.float64)
self.y = MARRAY(self.y, mask=self.m, dtype=numpy.float64)
return self
def __str__(self):
return "DATASET<list>: len(x)={0}, len(y)={1} len(m)={2}".format(len(self.x), len(self.y), len(self.m))
def __repr__(self):
return str(self)
#################################################################################
#
# Specialization for holding one (1) spectrum. x-axis = channel numbers
# The .add_y method may be called only once
#
#################################################################################
#class dataset_chan_wtf:
# __slots__ = ['x', 'y', 'sf']
#
# @classmethod
# def add_sumy(self, *_):
# raise RuntimeError("dataset_chan was not meant to integrate > 1 spectrum")
#
# @classmethod
# def init_sumy(self, obj, xs, ys, m):
# obj.x = ARRAY(xs)
# obj.y = MARRAY(ys, mask=m)
# obj.sf = dataset_chan.add_sumy
#
# def __init__(self):
# self.x = self.y = None
# self.sf = dataset_chan_wtf.init_sumy
#
# def append(self, xv, yv, m):
# raise NotImplemented("dataset_chan not meant for appending")
#
# def add_y(self, x,v, yv, m):
# self.sf(self, xv, yv, m)
#
# def average(self, method):
# if method != AVG.None:
# raise RuntimeError("dataset_chan was not meant for averaging!")
#
# def is_numarray(self):
# return (type(self.x) is numpy.array and type(self.y) is numpy.ma.MaskedArray)
#
# def as_numarray(self):
# if self.is_numarray():
# return self
# # note to self: float32 has insufficient precision for e.g.
# # <quantity> versus time
# self.x = numpy.array(self.x, dtype=numpy.float64)
# self.y = numpy.ma.MaskedArray(self.y, mask=self.m, dtype=numpy.float64)
# return self
#
# def __str__(self):
# return "DATASET<chan>: len(x)={0}, len(y)={1} len(m)={2}".format(len(self.x), len(self.y), len(self.m))
#
# def __repr__(self):
# return str(self)
# Specialization for (potentially) averaging > 1 spectrum when solint'ing
class dataset_chan:
__slots__ = ['x', 'y', 'm', 'sf', 'af']
# 2nd call to .add_y() means that we have to transform
# the mask to integers and remove NaN/Inf (masked) values from
# the previously stored y-values in order to make sure that those
# don't screw up the totals
@classmethod
def add_sumy_first(self, obj, xs, ys, m):
# set masked values to 0 and convert mask to counts in existing object
obj.y[ obj.m ] = 0
obj.m = ARRAY(~obj.m, dtype=numpy.int)
# from now on, averaging has to do something
obj.af = dataset_chan.average_n
# from now on extra .add_y() calls will do something slight different
obj.sf = dataset_chan.add_sumy
# use the new .add_y() to do the "integration" for us
obj.sf(obj, xs, ys, m)
# before 'integrating' the y-values we must
# make sure no NaN/Inf values are present
# because a single NaN/Inf in a channel makes
# the whole summation for that channel go NaN/Inf.
# The reading process auto-flags points which have
# Inf/NaN so we can just set flagged values to 0
@classmethod
def add_sumy(self, obj, xs, ys, m):
ys[ m ] = 0
obj.y = obj.y + ys
obj.m = obj.m + ARRAY(~m, dtype=numpy.int) # transform mask into counts
# very first call to .add_y() just store the parameters
# no fancy processing
@classmethod
def init_sumy(self, obj, xs, ys, m):
obj.x = ARRAY(xs)
obj.y = ARRAY(ys)
obj.m = ARRAY(m) #ARRAY(~m, dtype=numpy.int) # tranform mask into counts
obj.sf = dataset_chan.add_sumy_first
obj.af = dataset_chan.average_noop
@classmethod
def average_empty(self, obj, method):
raise RuntimeError("dataset_chan: attempt to average uninitialized dataset (.add_y() never called)")
@classmethod
def average_noop(self, obj, method):
# nothing to average, really
obj.af = None
@classmethod
def average_n(self, obj, method):
# normal average = arithmetic mean i.e. summed value / count of valid values
fn = numpy.divide
if method==AVG.Vectornorm:
# for vector norm we divide by the largest (complex) amplitude
fn = lambda x, _: x/numpy.max(numpy.abs(x))
elif method in [AVG.None, AVG.Sum, AVG.Vectorsum]:
fn = lambda x, _: x
# from counts form mask [note: do not clobber obj.m just yet, we need the counts!]
m = ARRAY(obj.m==0, dtype=numpy.bool)
# set counts == 1 where counts were 0 to prevent dividing by 0
obj.m[ m ] = 1
# our various add_y() functions have made sure that no NaN/Inf exist in the data
# so we don't have to blank anything; 's already done
# compute average y
obj.y = fn(obj.y, obj.m)
# replace the counts by the mask
obj.m = m
# and set masked values to NaN because averaging no values has no answer
obj.y[m] = numpy.nan
# and indicate we did do the averaging
obj.af = None
def __init__(self):
self.x = self.y = self.m = self.n = None
self.sf = dataset_chan.init_sumy
self.af = dataset_chan.average_empty
def append(self, xv, yv, m):
raise NotImplemented("dataset_chan not meant for appending")
def add_y(self, xv, yv, m):
if not IsArray(xv):
raise RuntimeError("dataset_chan:add_y() adding xv of non-array type! xv = {0}".format(xv))
self.sf(self, xv, yv, m)
def average(self, method):
if self.af is not None:
self.af(self, method)
else:
raise RuntimeError("Calling .average() > once on dataset_chan object?!")
def is_numarray(self):
return type(self.x) is _ArrayT and type(self.y) is _MArrayT
def as_numarray(self):
if self.is_numarray():
return self
if self.af is not None:
raise RuntimeError("Request to convert unaveraged dataset_chan_solint to nd-array?!!")
# note to self: float32 has insufficient precision for e.g.
# <quantity> versus time
self.x = ARRAY(self.x, dtype=numpy.float64)
self.y = MARRAY(self.y, mask=self.m)
return self
def __str__(self):
return "DATASET<chan>: len(x)={0}, len(y)={1} len(m)={2}".format(len(self.x), len(self.y), len(self.m))
def __repr__(self):
return str(self)
#################################################################################
#
# specialization of dataset for grouping multiple channels w/ mask by x value
# (think solint/group by time interval)
#
# the .append() accumulates the channels grouped by the x value
# .average() computes the channel averages over all data collected for each x value
#
#################################################################################
class dataset_solint_array:
__slots__ = ['x', 'y', 'a', 'd', 'm']
def __init__(self):
self.x = self.y = None
self.a = None
self.d = collections.defaultdict(int)
self.m = collections.defaultdict(int)
# this specialization assumes yv, m are instances of numpy.ndarray or numpy.ma.core.MaskedArray
def append(self, xv, yv, m):
# masked data shall not count towards the total for computing the average
#yv[m] = 0
# Also: any floating point aggregation function (sum, mean, etc.) will barf on
# any of the aggregated values being [+-]Inf or NaN - i.e. the net result
# will be NaN/Inf whatever. Therefore we must replace these with 0.
# We'll put back NaN if it turns out that no unmasked values were averaged because
# in such a situation there IS no average/sum/etc. ("what is the sum of no values?"):
# >>> import numpy
# >>> numpy.sum([1,2,numpy.nan])
# nan
yv[ LOGICAL_OR(~ISFINITE(yv), m) ] = 0
# accumulate in the bin for this specific x value
self.d[xv] = numpy.add(yv, self.d[xv])
self.m[xv] = numpy.add(~m, self.m[xv])
def average(self, method):
if self.a is not None:
return
# normal average = arithmetic mean i.e. summed value / count of valid values by default
fn = numpy.divide
if method==AVG.Vectornorm:
# for vector norm we divide by the largest complex amplitude
fn = lambda x, _: x/numpy.max(numpy.abs(x))
elif method in [AVG.None, AVG.Sum, AVG.Vectorsum]:
# because we already integrate (==sum) then no averaging equals summing and v.v. :-)
fn = lambda x, _: x
# construct a new dict with the averaged data values and set mask wether
# any unmasked values were collected for that x, channel
self.a = dict()
while self.d:
(x, ys) = self.d.popitem()
counts = self.m.pop(x)
# ---- latest ---------------
counts = ARRAY(counts)
mask = ARRAY(counts==0, dtype=numpy.bool)
counts[mask] = 1
data = fn(ARRAY(ys), counts)
# after averaging, points with zero counts should be set to NaN
# to effectively remove them.
data[mask] = numpy.nan
self.a[x] = MARRAY(data, mask=mask)
# ---------------------------
def is_numarray(self):
return type(self.x) is _ArrayT and type(self.y) is _MArrayT
def as_numarray(self):
if self.is_numarray():
return self
# note to self: float32 has insufficient precision for e.g.
# the <time> axis in <quantity> versus time datasets
if self.a is None:
raise RuntimeError("solint dataset has not been averaged yet")
self.x = numpy.fromiter(self.a.iterkeys(), dtype=numpy.float64, count=len(self.a))
self.y = MARRAY(self.a.values())
return self
def __str__(self):
return "DATASET<solint-array>: len(d)={0} len(m)={1}".format(len(self.d), len(self.m))
def __repr__(self):
return str(self)
##################################################################################
##
## specialization of dataset for grouping a single channels w/ mask by x value
## (think solint/group by time interval)
##
## the .append() accumulates the values grouped by the x value
## .average() computes the value averages over all data collected for each x value
##
##################################################################################
class dataset_solint_scalar:
__slots__ = ['x', 'y', 'a', 'd', 'm']
def __init__(self):
self.x = self.y = None
self.a = None
self.d = collections.defaultdict(list)
self.m = collections.defaultdict(int)
# this specialization assumes yv, m are scalar value + boolean (or anything
# indicating the truth of yv)
def append(self, xv, yv, m):
# don't let masked or Inf/NaN values count towards the total before averaging
self.d[xv].append( 0 if m or not ISFINITE(yv) else yv )
# do count truth values
self.m[xv] += 0 if m else 1
def average(self, method):
if self.a is not None:
return
# normal average = arithmetic mean i.e. summed value / count of valid values
fn = operator.truediv
if method==AVG.Vectornorm:
# for vector norm we divide by the largest complex amplitude
fn = lambda x, _: x/max(map(abs,x))
elif method in [AVG.None, AVG.Sum, AVG.Vectorsum]:
# because our data is already summed then no averaging == summing
fn = lambda x, _: x
# construct a new dict with the averaged data values and set mask based on
# number of unmasked
self.a = dict()
while self.d:
(x, ys) = self.d.popitem()
counts = self.m.pop(x)
# ---- latest ---------------
# if no valid data at all substitute a value of nan
self.a[x] = fn(sum(ys), counts) if counts else numpy.nan
# ---------------------------
def is_numarray(self):
return type(self.x) is _ArrayT and type(self.y) is _MArrayT
def as_numarray(self):
if self.is_numarray():
return self
# note to self: float32 has insufficient precision for e.g.
# the <time> axis in <quantity> versus time datasets
if self.a is None:
raise RuntimeError("solint dataset has not been averaged yet")
self.x = numpy.fromiter(self.a.iterkeys(), dtype=numpy.float64, count=len(self.a))
self.y = MARRAY(self.a.values())
return self
def __str__(self):
return "DATASET<solint-scalar>: len(d)={0} len(m)={1}".format(len(self.d), len(self.m))
def __repr__(self):
return str(self)
## Partition a data set into two separate data sets,
## one with those elements satisfying the predicate,
## the other those who dont.
## Returns (ds_true, ds_false)
##
## Implementation note:
## Yes, there is hvutil.partition() which does much the same but
## using a reduce(). The problem is that it expects a single list of values
## to which to apply the predicate.
## In order to turn a dataset() into a single list, we'd have to
## zip() the ".x" and ".y" lists. After having partition'ed the list,
## we'd have to unzip them again into separate ".x" and ".y" arrays,
## for the benefit of PGPLOT.
## Summarizing: in order to use hvutil.partition() we'd have to do two (2)
## cornerturning operations, which seems to be wasteful.
class partitioner:
def __init__(self, expr):
# solution found in:
# http://stackoverflow.com/questions/10303248/true-dynamic-and-anonymous-functions-possible-in-python
self.code = compile(
"from numpy import *\n"+
"from math import *\n"+
"avg = None\n"+
"sd = None\n"+
"xmin = None\n"+
"xmax = None\n"+
"ymin = None\n"+
"ymax = None\n"+
"f = lambda x, y: "+expr,
'dyn-mark-string', 'exec')
self.mod = imp.new_module("dyn_marker_mod")
exec self.code in self.mod.__dict__
def __call__(self, x, y):
ds_true = []
self.mod.avg = numpy.mean(y)
self.mod.sd = numpy.std(y)
self.mod.xmin = numpy.min(x)
self.mod.xmax = numpy.max(x)
self.mod.ymin = numpy.min(y)
self.mod.ymax = numpy.max(y)
for i in xrange(len(x)):
if self.mod.f(x[i], y[i]):
ds_true.append(i)
return ds_true
### Turn an array of channel indices (the channels that we're interested in)
### into a 3D mask function
### Assumes that the indices have been shifted to 0 by slicing the column
### This implies that IF chanidx is a list of length 1, it must automatically
### be channel 0
#def mk3dmask_fn_idx(nrow, chanidx, npol):
# return lambda x: x[:,chanidx,:]
#
#def mk3dmask_fn_mask(nrow, chanidx, npol):
# if len(chanidx)>1 and (len(chanidx)!=(chanidx[-1]+1)):
# # Start off with all channels masked, up to the last index
# m = numpy.ones( (nrow, chanidx[-1]+1, npol), dtype=numpy.int8 )
# # all indexed channels have no mask
# m[:,chanidx,:] = 0
# return lambda x: numpy.ma.MaskedArray(x, mask=m)
# else:
# # single channel - or all channels
# if len(chanidx)==1 and chanidx[0]!=0:
# raise RuntimeError, "consistency problem, chanidx[0] isn't 0 for single channel selection"
# return lambda x: numpy.ma.MaskedArray(x, mask=numpy.ma.nomask)
#
def genrows(bls, ddids, fldids):
tm = 0
while True:
for (bl, dd, fld) in itertools.product(bls, ddids, fldids):
yield (tm, bl, dd, fld)
tm = tm + 1
import itertools, operator
class fakems:
def __init__(self, ms, mapping):
#self.ms = ms
self.length = len(ms)
#(self.a1, self.a2) = zip( *mapping.baselineMap.baselineIndices() )
self.bls = mapping.baselineMap.baselineIndices()
self.ddids = mapping.spectralMap.datadescriptionIDs()
self.flds = mapping.fieldMap.getFieldIDs()
shp = ms[0]["LAG_DATA" if "LAG_DATA" in ms.colnames() else "DATA"].shape
while len(shp)<2:
shp.append(1)
self.shp = shp
self.rowgen = genrows(self.bls, self.ddids, self.flds)
self.chunk = {}
print "fakems/",len(self.bls)," baselines, ",len(self.ddids)," SB, ",len(self.flds)," SRC, shape:",self.shp
def __len__(self):
return self.length
def __getitem__(self, item):
theshape = self.shp
class column:
def __init__(self, shp):
self.shape = shp
class row:
def __init__(self):
self.rdict = { 'DATA': column(theshape), 'LAG_DATA': column(theshape) }
def __getitem__(self, colnm):
return self.rdict[colnm]
return row()
def getcol(self, col, **kwargs):
nrow = kwargs['nrow']
startrow = kwargs['startrow']
if not startrow in self.chunk:
# new block of rows. delete existing
del self.chunk
i = [0]
def predicate(x):
i[0] = i[0] + 1
return i[0]<=nrow
self.chunk = {startrow: list(itertools.takewhile(predicate, self.rowgen))}
# rows = [ (tm, (a1, a2), dd, fld), .... ]
rows = self.chunk[startrow]
coldict = {
"ANTENNA1" : (lambda x: map(lambda (tm, (a1, a2), dd, fl): a1, x), numpy.int32),
"ANTENNA2" : (lambda x: map(lambda (tm, (a1, a2), dd, fl): a2, x), numpy.int32),
"TIME" : (lambda x: map(lambda (tm, (a1, a2), dd, fl): tm, x), numpy.float64),
"DATA_DESC_ID": (lambda x: map(lambda (tm, (a1, a2), dd, fl): dd, x), numpy.int32),
"FIELD_ID" : (lambda x: map(lambda (tm, (a1, a2), dd, fl): fl, x), numpy.int32)
}
(valfn, tp) = coldict.get(col, (None, None))
#print "getcol[{0}]/var={1}".format(col, var)
if valfn:
return numpy.array(valfn(rows), dtype=tp)
if col=="WEIGHT":
# nrow x npol
shp = (nrow, self.shp[1])
rv = numpy.ones( reduce(operator.mul, shp), dtype=numpy.float32 )
rv.shape = shp
return rv
if col=="DATA" or col=="LAG_DATA":
shp = (nrow, self.shp[0], self.shp[1])
rv = numpy.zeros( reduce(operator.mul, shp), dtype=numpy.complex64 )
rv.shape = shp
return rv
raise RuntimeError,"Unhandled column {0}".format(col)
##### Different solint functions
#
#def solint_none(dsref):
# return 0.0
#
## Tried a few different approaches for solint processing.
## The functions below are kept as illustrative references.
##
## They are ordered from slowest to fastest operation, as benchmarked on running
## on the same data set with the same settings.
##
## solint_numpy_indexing: 7.2s runtime
## solint_numpy_countbin: 5.9s
## solint_pure_python: 3.8s
## solint_pure_python3: 3.2s
## solint_pure_python2: 2.8s
#
#
#def solint_numpy_indexing(dsref):
# start = time.time()
#
# dsref.as_numarray()
# tms = numpy.unique(dsref.x)
#
# # check if there is something to be averaged at all
# if len(tms)==len(dsref.x):
# return time.time() - start
#
# newds = dataset()
# for tm in tms:
# idxs = numpy.where(dsref.x==tm)
# newds.append(tm, numpy.average(dsref.y[idxs]), numpy.any(dsref.m[idxs]) )
# dsref.x = newds.x
# dsref.y = newds.y
# return time.time() - start
#
#def solint_numpy_countbin(dsref):
# start = time.time()
# dsref.as_numarray()
#
# # get the unique time stamps
# tms = numpy.unique(dsref.x)
#
# # check if there is something to be averaged at all
# if len(tms)==len(dsref.x):
# return time.time() - start
#
# # "bins" will be the destination bin where the quantity
# # will be summed into for each unique time stamp
# # i.e. all data having time stamp tms[0] will be summed into
# # bin 0, all data having time stamp tms[x] will be summed
# # into bin x
# #bins = range( len(tms) )
# # Now we must transform the array of times (dsref.x) into an
# # array with bin indices
# dests = reduce(lambda acc, (ix, tm): \
# numpy.put(acc, numpy.where(dsref.x==tm), ix) or acc, \
# enumerate(tms), \
# numpy.empty(dsref.x.shape, dtype=numpy.int32))
# # Good, now that we have that ...
# sums = numpy.bincount(dests, weights=dsref.y)
# count = numpy.bincount(dests)
# dsref.y = sums/count
# dsref.x = tms
# return time.time() - start
#
#
#def solint_pure_python(dsref):
# start = time.time()
# tms = set(dsref.x)
#
# # check if there is something to be averaged at all
# if len(tms)==len(dsref.x):
# return time.time() - start
#
# # accumulate data into bins of the same time
# r = reduce(lambda acc, (tm, y): acc[tm].append(y) or acc, \
# itertools.izip(dsref.x, dsref.y), \
# collections.defaultdict(list))
# # do the averaging
# (x, y) = reduce(lambda (xl, yl), (tm, ys): (xl+[tm], yl+[sum(ys)/len(ys)]), \
# r.iteritems(), (list(), list()))
# dsref.x = x
# dsref.y = y
# return time.time() - start
#
#class average(object):
# __slots__ = ['total', 'n']
#
# def __init__(self):
# self.total = 0.0
# self.n = 0
#
# def add(self, other):
# self.total += other
# self.n += 1
# return None
#
# def avg(self):
# return self.total/self.n
#
#def solint_pure_python3(dsref):
# start = time.time()
# tms = set(dsref.x)
#
# # check if there is something to be averaged at all
# if len(tms)==len(dsref.x):
# return time.time() - start
#
# # accumulate data into bins of the same time
# r = reduce(lambda acc, (tm, y): acc[tm].add(y) or acc, \
# itertools.izip(dsref.x, dsref.y), \
# collections.defaultdict(average))
# # do the averaging
# (x, y) = reduce(lambda (xl, yl), (tm, ys): (xl.append(tm) or xl, yl.append(ys.avg()) or yl), \
# r.iteritems(), (list(), list()))
# dsref.x = x
# dsref.y = y
# return time.time() - start
#
#def solint_pure_python2(dsref):
# start = time.time()
# tms = set(dsref.x)
#
# # check if there is something to be averaged at all
# if len(tms)==len(dsref.x):
# return time.time() - start
#
# # accumulate data into bins of the same time
# r = reduce(lambda acc, (tm, y): acc[tm].append(y) or acc, \
# itertools.izip(dsref.x, dsref.y), \
# collections.defaultdict(list))
# # do the averaging
# (x, y) = reduce(lambda (xl, yl), (tm, ys): (xl.append(tm) or xl, yl.append(sum(ys)/len(ys)) or yl), \
# r.iteritems(), (list(), list()))
# dsref.x = x
# dsref.y = y
# return time.time() - start
#
#def solint_pure_python2a(dsref):
# start = time.time()
# tms = set(dsref.x)
#
# # check if there is something to be averaged at all
# if len(tms)==len(dsref.x):
# return time.time() - start
#
# # accumulate data into bins of the same time
# acc = collections.defaultdict(list)
# y = dsref.y
# m = dsref.m
# for (i, tm) in enumerate(dsref.x):
# if m[i] == False:
# acc[ tm ].append( y[i] )
# # do the averaging
# (xl, yl) = (list(), list())
# for (tm, ys) in acc.iteritems():
# xl.append(tm)
# yl.append( sum(ys)/len(ys) )
# dsref.x = xl
# dsref.y = yl
# dsref.m = numpy.zeros(len(xl), dtype=numpy.bool)
# return time.time() - start
#
## In solint_pure_python4 we do not check IF we need to do something, just DO it
#def solint_pure_python4(dsref):
# start = time.time()
#
# # accumulate data into bins of the same time
# r = reduce(lambda acc, (tm, y): acc[tm].append(y) or acc, \
# itertools.izip(dsref.x, dsref.y), \
# collections.defaultdict(list))
# # do the averaging
# (dsref.x, dsref.y) = reduce(lambda (xl, yl), (tm, ys): (xl.append(tm) or xl, yl.append(sum(ys)/len(ys)) or yl), \
# r.iteritems(), (list(), list()))
# return time.time() - start
#
## solint_pure_python5 is solint_pure_python4 with the lambda's removed; replaced by
## calls to external functions. This shaves off another 2 to 3 milliseconds (on large data sets)
#def grouper(acc, (tm, y)):
# acc[tm].append(y)
# return acc
#
#def averager((xl, yl), (tm, ys)):
# xl.append(tm)
# yl.append(sum(ys)/len(ys))
# return (xl, yl)
#
#def solint_pure_python5(dsref):
# start = time.time()
#
# # accumulate data into bins of the same time
# r = reduce(grouper, itertools.izip(dsref.x, dsref.y), collections.defaultdict(list))
# # do the averaging
# (dsref.x, dsref.y) = reduce(averager, r.iteritems(), (list(), list()))
# return time.time() - start
## This plotter will iterate over "DATA" or "LAG_DATA"
## and produce a number of quantities per data point, possibly averaging over time and/or channels
class data_quantity_time(plotbase):
# should set up a choice table based on the combination of averaging methods
# key into the lookup is '(avgChannelMethod, avgTimeMethod)'
# Also return wether the quantities must be postponed
_averaging = {
# no averaging at all, no need to postpone computing the quantity(ies)
(AVG.None, AVG.None): (avg_none, avg_none, False),
# only time averaging requested
# scalar in time means we can collect the quantities themselves
(AVG.None, AVG.Scalar): (avg_none, avg_arithmetic, False),
(AVG.None, AVG.Sum): (avg_none, avg_sum, False),
(AVG.None, AVG.Vectorsum): (avg_none, avg_sum, True),
# when vector(norm) averaging we must first collect all time data
# before we can compute the quantities, i.e. their computation must be postponed
(AVG.None, AVG.Vector): (avg_none, avg_arithmetic, True),
(AVG.None, AVG.Vectornorm): (avg_none, avg_vectornorm, True),
# When scalar averaging the channels no vector averaging in time possible
# Also no need to postpone computing the quantities
(AVG.Scalar, AVG.None): (avg_arithmetic, avg_none, False),
(AVG.Scalar, AVG.Sum): (avg_arithmetic, avg_sum, False),
(AVG.Scalar, AVG.Scalar): (avg_arithmetic, avg_arithmetic, False),
# When vector averaging the channels, the time averaging governs
# the choice of when to compute the quantity(ies)
(AVG.Vector, AVG.None): (avg_arithmetic, avg_none, False),
(AVG.Vector, AVG.Sum): (avg_arithmetic, avg_sum, False),
(AVG.Vector, AVG.Vectorsum): (avg_arithmetic, avg_sum, True),
(AVG.Vector, AVG.Scalar): (avg_arithmetic, avg_arithmetic, False),
# when doing vector in both dims we must first add up all the complex numbers
# for each channel(selection) and then in time and THEN compute the quantity(ies)
(AVG.Vector, AVG.Vector): (avg_arithmetic, avg_arithmetic, True),
(AVG.Vector, AVG.Vectornorm): (avg_arithmetic, avg_vectornorm, True),
# vectornorm averaging over the channels, see what's requested in time
(AVG.Vectornorm, AVG.None): (avg_vectornorm, avg_none, False),
(AVG.Vectornorm, AVG.Scalar): (avg_vectornorm, avg_arithmetic, False),
(AVG.Vectornorm, AVG.Vector): (avg_vectornorm, avg_arithmetic, True),
(AVG.Vectornorm, AVG.Vectornorm): (avg_vectornorm, avg_vectornorm, True),
(AVG.Vectornorm, AVG.Sum): (avg_vectornorm, avg_sum, False),
(AVG.Vectornorm, AVG.Vectorsum): (avg_vectornorm, avg_sum, True),
(AVG.Sum, AVG.None): (avg_sum, avg_none, False),
(AVG.Sum, AVG.Sum): (avg_sum, avg_sum, False),
(AVG.Sum, AVG.Scalar): (avg_sum, avg_arithmetic, False),
(AVG.Vectorsum, AVG.None): (avg_sum, avg_none, False),
(AVG.Vectorsum, AVG.Scalar): (avg_sum, avg_arithmetic, False),
(AVG.Vectorsum, AVG.Sum): (avg_sum, avg_sum, False),
(AVG.Vectorsum, AVG.Vectorsum): (avg_sum, avg_sum, True),
(AVG.Vectorsum, AVG.Vectornorm): (avg_sum, avg_vectornorm, True),
}
## our construct0r
## qlist = [ (quantity_name, quantity_fn), ... ]
##
def __init__(self, qlist):
self.quantities = list(itertools.starmap(Quantity, qlist))
def makePlots(self, msname, selection, mapping, **kwargs):
# Deal with channel averaging
# Scalar => average the derived quantity
# Vector => compute average cplx number, then the quantity
avgChannel = CP(selection.averageChannel)
avgTime = CP(selection.averageTime)
solchan = CP(selection.solchan)
solint = CP(selection.solint)
timerng = CP(selection.timeRange)
# some sanity checks
if solchan is not None and avgChannel==AVG.None:
raise RuntimeError("nchav value was set without specifiying a channel averaging method; please tell me how you want them averaged")
if solint is not None and avgTime==AVG.None:
raise RuntimeError("solint value was set without specifiying a time averaging method; please tell me how you want your time range(s) averaged")
## initialize the base class
super(data_quantity_time, self).__init__(msname, selection, mapping, **kwargs)
# channel selection+averaging schemes; support averaging over channels (or chunks of channels)
chansel = Ellipsis
n_chan = self.table[0][self.datacol].shape[0]
if selection.chanSel:
channels = mk_chansel(selection.chanSel)
max_chan = max(channels)
# if any of the indexed channels > n_chan that's an error
if max_chan>=n_chan:
raise RuntimeError("At least one selected channel ({0}) > largest channel index ({1})".format(max_chan, n_chan-1))
# also <0 is not quite acceptable
if min(channels)<0:
raise RuntimeError("Negative channel number {0} is not acceptable".format(min(channels)))
# if the user selected all channels (by selection
# 'ch 0:<last>' in stead of 'ch none' we don't
# override the default channel selection (which is more efficient)
if channels!=range(n_chan):
chansel = channels
# ignore channel averaging if only one channel specified
if (n_chan if chansel is Ellipsis else len(chansel))==1 and avgChannel != AVG.None:
print "WARNING: channel averaging method {0} ignored because only one channel selected or available".format( avgChannel )
avgChannel = AVG.None
# Test if the selected combination of averaging settings makes sense
setup = data_quantity_time._averaging.get((avgChannel, avgTime), None)
if setup is None:
raise RuntimeError("the combination of {0} channel + {1} time averaging is not supported".format(avgChannel, avgTime))
(avgchan_fn, avgtime_fn, postpone) = setup
# How integration/averaging actually is implemented is by modifying the
# time stamp. By massaging the time stamp into buckets of size
# 'solint', we influence the label of the TIME field, which will make
# all data points with the same TIME stamp be integrated into the same
# data set
self.timebin_fn = functional.identity
if avgTime!=AVG.None:
if solint is None:
# Ah. Hmm. Have to integrate different time ranges
# Let's transform our timerng list of (start, end) intervals into
# a list of (start, end, mid) such that we can easily map
# all time stamps [start, end] to mid
# If no time ranges defined at all average everything down to middle of experiment?
# It is important to KNOW that "selection.timeRange" (and thus our
# local copy 'timerng') is a list or sorted, non-overlapping time ranges
timerng = map(lambda (s, e): (s, e, (s+e)/2.0), timerng if timerng is not None else [(mapping.timeRange.start, mapping.timeRange.end)])
if len(timerng)==1:
print "WARNING: averaging all data into one point in time!"
print " This is because no solint was set or no time"
print " ranges were selected to average. Your plot"
print " may contain less useful info than expected"
# try to be a bit optimized in time stamp replacement - filter the
# list of time ranges to those applying to the time stamps we're
# replacing
def do_it(x):
mi,ma = numpy.min(x), numpy.max(x)
ranges = filter(lambda tr: not (tr[0]>ma or tr[1]<mi), timerng)
return reduce(lambda acc, (s, e, m): numpy.put(acc, numpy.where((acc>=s) & (acc<=e)), m) or acc, ranges, x)
self.timebin_fn = do_it
else:
# Check if solint isn't too small
ti = mapping.timeRange.inttm[0]
if solint<=ti:
raise RuntimeError("solint value {0:.3f} is less than integration time {1:.3f}".format(solint, ti))
self.timebin_fn = lambda x: (numpy.trunc(x/solint)*solint) + solint/2.0
# chansel now is Ellipsis (all channels) or a list of some selected channels
self.chanidx = list()
self.vectorAvg = functional.identity
self.scalarAvg = functional.identity
self.tmVectAvg = functional.identity
self.tmScalAvg = functional.identity
if avgChannel==AVG.None:
# No channel averaging - each selected channel goes into self.chanidx
self.chanidx = list(enumerate(range(n_chan) if chansel is Ellipsis else chansel))
# The vector average step will be misused to just apply the channel selection such that all selected channels
# are mapped to 0..n-1. This is only necessary in case not all channels were selected
if chansel is not Ellipsis:
self.vectorAvg = lambda x: x[:,chansel,:]
else:
# ok channel averaging requested
chbin_fn = None
if solchan is None:
# average all selected channels down to one
# data array 'x' has shape (n_int, n_chan, n_pol)
#self.chbin_fn = lambda x: normalize_ch(1)(numpy.ma.mean(x[:,avg_over,:], axis=1, keepdims=True))
# average the selected channels according the requested averaging method
chbin_fn = lambda x: avgchan_fn(1)(x[:,chansel,:])
self.chanidx = [(0, '*')]
else:
# average bins of solchan channels down to one
if solchan > n_chan:
raise RuntimeError("request to average channels in bins of {0} channels but only {1} are available".format(solchan, n_chan))
# Create a mask which is the complement of the selected channels
# (remember: chansel == Ellipsis => all channels
ch_mask = (numpy.zeros if chansel is Ellipsis else numpy.ones)(n_chan, dtype=numpy.bool)
# only in case chansel != everything we must modify the mask
if chansel is not Ellipsis:
ch_mask[chansel] = False
# Since we're going to zap masked values (replace by 0) we can usefully use
# reduceat! So all we then need is an index array, informing reduceat what the
# reduction boundaries are!
# First up: the actual bin numbers we're interested in, we compute the actual
# start + end indices from that
bins = numpy.unique((numpy.array(chansel) if chansel is not Ellipsis else numpy.arange(0, n_chan, solchan))//solchan)
bins.sort()
# we're going to apply channel binning so we must replace 'chansel'
# by 'bins' in order for downstream accounting of how many "channels" there will
# be in the data
chansel = bins
# Did timings on comparing simplistic 'loop over list of slices' and numpy.add.reduceat based approaches.
# Results: grab bag - depending on problem set size:
# - simplistic approach between 2.5-6x faster (!) when averaging small number of
# channels in small number of bins (say <= 5 bins of ~5 channels)
# - reduceat approach slightly more than 2x faster when binning
# large number of channels in large-ish amount of bins (say 32 bins
# of 4 channels)
# Update: the reduceat also only wins if all bins are adjacent.
# the way <operator>.reduceat works is, given a list of indices [i,j,k]
# and applied to an array A, it will produce the following outputs:
# [ <operator>(A[i:j]), <operator>(A[j:k]), <operator>(A[k:-1]) ]
# (see https://docs.scipy.org/doc/numpy/reference/generated/numpy.ufunc.reduceat.html)
#
# Basically we can use this to efficiently bin i:j, j:k, ..., z:-1 ranges
# If our bins (or in the future, arbitrary channels ranges) are NOT adjacent, then we must
# feed these operators to <operator>.reduceat:
# [ start0, end0, start1, end1, ..., startN, endN ]
# with the start, end indices of channel ranges 0..N
# will produce the following outputs:
# [ <operator>( A[start0:end0] ), <operator>( A[end0:start1] ), <operator>( A[start1:end1] ), ... ]
# so we'd have to throw out every second entry in the output.
# In numpy that's simple enough but it also means that <operator>.reduceat() does twice as must work
# for no apparent reason.
# Detect if the bins are adjacent
adjacent_bins = (len(set(bins[1:] - bins[:-1])) == 1) if len(bins)>1 else False
chbin_fn = None
if adjacent_bins:
# we're going to use reduceat() which means it's good enough
# to generate [bin0*solchan, bin1*solchan, ..., bin<nbin-1>*solchan]
indices = CP(bins)
# generate the channel index labels for correct labelling
self.chanidx = list()
for (ch_idx, start) in enumerate(indices):
self.chanidx.append( (ch_idx, start) )
#self.chanidx.append( (ch_idx, "{0}*".format(start)) )
# need to carefully check last entry in there; if 'last bin' < 'n_chan//solchan'
# we must add an extra final boundary or else reduceat() will add up to the end
# of the number of channels in stead of until the end of the bin ...
if bins[-1]<((n_chan-1)//solchan):
#print "Must add one more bin limit; bins=",bins
# add one more bin limit, set slice to keep only n-1 bins
keepbins = slice(0, len(indices))
indices = numpy.r_[indices, [indices[-1]+1]]
#print " indices now = ",indices
else:
keepbins = Ellipsis
# indices are in units of solchan bins so for reduceat must
# scale them back to actual channels
indices *= solchan
#print " indices now = ",indices
# This is where the magic happens
transpose_ch = operator.methodcaller('transpose', (0, 2, 1))
def use_reduceat(x):
# (n_int, n_ch, n_pol) => (n_int, n_pol, n_ch)
tmpx = transpose_ch(x)
# also we must reshape it to a 2-D array of ((n_int * n_pol), n_chan) shape orelse
# the reduceat() don't work [https://docs.scipy.org/doc/numpy/reference/generated/numpy.ufunc.reduceat.html]
# remember the dimensions for later
n_int,n_pol = tmpx.shape[:-1]
tmpx = tmpx.reshape( (n_int*n_pol, -1) )
# mask out channels that we don't want averaged, joining it
# with the mask that excludes flagged data (by the user) and/or
# whatever was weight-thresholded ...
#print "====> previous mask shape=", tmpx.mask.shape,"/",tmpx.mask[-1]
#print " channel mask shape", ch_mask.shape,"/",ch_mask
#print " indices=",indices
#print " data=",tmpx.data[-1]
tmpx.mask = LOGICAL_OR(tmpx.mask, ch_mask)
#print " final mask shape", tmpx.mask.shape,"/",tmpx.mask[-1]
#if tmpx.mask.all():
# print " NO UNMASKED DATA"
# set all masked values to 0 such that they don't ever count towards *anything*
# e.g. suppose all channels in a bin are masked then the average should be NaN or something
# unrepresentable because there was no valid data at all
tmpx.data[tmpx.mask] = 0
#print " data after masking=",tmpx.data[-1]
# do the summation.
result = numpy.add.reduceat(tmpx.data, indices, axis=1)[:,keepbins]
#print " RESULT=",result
#tmp = numpy.add.reduceat(tmpx.data, indices, axis=1)
#result = tmp[:,keepbins]
# also count the number of unmasked values that went into each point
# we may use it for averaging, definitely be using it to create the mask
counts = numpy.add.reduceat(~tmpx.mask, indices, axis=1)[:,keepbins]
# pre-create the masked based on places where the count of unflagged points == 0
# these values have to be removed in the output (and also we can prevent
# divide-by-zero errors)
mask = ARRAY(counts == 0, dtype=numpy.bool)
#print " COUNTS=",counts
# Because we do things different here than in the ordinary averaging,
# we must look at what was requested in order to mimic that behaviour
if avgchan_fn is avg_vectornorm:
# ok need to find the maximum complex number in each bin to scale it by
# take proper care of flagged/inf data
tmpx.data[tmpx.mask] = -numpy.inf
result /= (numpy.maximum.reduceat(numpy.abs(tmpx.data), indices, axis=1)[:,keepbins])
elif avgchan_fn in [avg_sum, avg_none]:
# either vector or scalar sum or no averaging, don't do anything
pass
else:
# ordinary arithmetic mean
# sets counts = 1 where counts == 0 so we don't divided by 0
counts[ mask ] = 1
result /= counts
# set entries where counts==0 to NaN to make it explicit
# that, mathematically speaking, there is nothing there
result[mask] = numpy.nan
# unshape + untranspose from 2-d ((n_int * n_pol), n_output_channels)
# into 3-d (n_int, n_pol, n_ouput_channels)
return transpose_ch(numpy.ma.array(result.reshape((n_int, n_pol, -1)), mask=mask.reshape((n_int, n_pol, -1))))
# set chbin_fn to use reduceat()
chbin_fn = use_reduceat
else:
# not going to use reduceat() just bruteforcing over a list of slices()
# do some extra pre-processing for the simplistic approach
# it uses slice() indexing so we pre-create the slice objects for it
# for each range of channels to average we compute src and dst slice
indices = map(lambda s: (s*solchan, min((s+1)*solchan, n_chan)), bins)
slices = [(slice(i, i+1), slice(rng[0], rng[1]+1)) for i, rng in enumerate(indices)]
# for display + loopindexing create list of (array_index, "CH label") tuples
self.chanidx = list()
for (ch_idx, start_end) in enumerate(indices):
self.chanidx.append( (ch_idx, start_end[0]) )
#self.chanidx.append( (ch_idx, "{0}*".format(start_end[0])) )
n_slices = len(slices)
# this is the simplistic approach
def use_dumbass_method(x):
# get an output array
n_int,_,n_pol = x.shape
result = numpy.ma.empty((n_int, n_slices, n_pol), dtype=x.dtype)
for (dst_idx, src_idx) in slices:
result[:,dst_idx,:] = numpy.ma.mean(x[:,src_idx,:], axis=1, keepdims=True)
result.mask[:,dst_idx,:] = (numpy.sum(x.mask[:,src_idx,:], axis=1, keepdims=True) == 0)
return result
# and set the channel bin function to use to this'un
chbin_fn = use_dumbass_method
# Some channel averaging is to be applied so chbin_fn must not be None
if chbin_fn is None:
raise RuntimeError("chbin_fn is None whilst some channel averaging requested. Please yell at H. Verkouter (verkouter@jive.eu)")
# depending on which kind of channel averaging, we apply it to the complex data before
# producing quantities or on the scalars after computing the quantities
if avgChannel == AVG.Scalar:
self.scalarAvg = chbin_fn
else:
self.vectorAvg = chbin_fn
# Now we must come up with a strategy for organizing the data processing chain
#
# If avgTime is Vector-like then we can only compute the quantities after all data's been
# read & averaged. We've already ruled out that avgChannel == Scalar (for that makes no sense)
#
# So we may have to postpone computing of the quantities until after having collected + integrated all data
post_quantities = lambda tp, x: [(tp, x)]
org_quantities = None
if postpone:
# create data sets based on the averaged data in a dataset
org_quantities = CP(self.quantities)
post_quantities = lambda _, x: map(lambda q: (q.quantity_name, q.quantity_fn(x)), org_quantities)
self.quantities = [Quantity('raw', functional.identity)]
if len(self.chanidx)==1:
# post_channel doesn't do nothing, self.chanidx remains a list of length 1
post_channel = lambda ch, x: [(ch, x)]
else:
# here the post_channel yields a list of extracted channels from the data 'x' coupled
# with the assigned label from self.chanidx
org_chanidx = CP(self.chanidx)
post_channel = lambda _, x: map(lambda chi: (chi[1], x[:,chi[0]]), org_chanidx)
# replace self.chanidx with a single entry which captures all channels and sets the
# associated label to None - which we could use as a sentinel, if needed
self.chanidx = [(Ellipsis, None)]
# Let's keep the channels together as long as possible. If only one channel remains then we can do it
# in our inner loop
#dataset_proto = dataset_list if avgTime == AVG.None else dataset_solint_array
if avgTime == AVG.None:
dataset_proto = dataset_list
else:
# depending on wether we need to solint one or more channels in one go
# loop over the current self.chanidx and count nr of channels
nChannel = reduce(lambda acc, chi: acc + ((n_chan if chansel is Ellipsis else len(chansel)) if chi[0] is Ellipsis else 1), self.chanidx, 0)
dataset_proto = dataset_solint_array if nChannel>1 else dataset_solint_scalar
## Now we can start the reduction of the table
# Note: there will /always/ be WEIGHT+FLAGCOL - either read from the table or invented
# 0 1 2 3 4 5 6
fields = [AX.TYPE, AX.BL, AX.FQ, AX.SB, AX.SRC, AX.P, AX.CH]
columns = ["ANTENNA1", "ANTENNA2", "TIME", "DATA_DESC_ID", "FIELD_ID", "WEIGHTCOL", "FLAG_ROW", "FLAG", self.datacol]
pts = ms2util.reducems2(self, self.table, collections.defaultdict(dataset_proto), columns, verbose=True, slicers=self.slicers, chunksize=5000)
# after the reduction's done we can put back our quantities if we did remove them before
if org_quantities is not None:
self.quantities = org_quantities
rv = {}
for (label, dataset) in pts.iteritems():
dl = list(label)
dataset.average( avgTime )
# convert x,y to numarrays
dataset.as_numarray()
for qn,qd in post_quantities(label[0], dataset.y):
for chn,chd in post_channel(label[6], qd):
dl[0] = qn
dl[6] = chn
rv[ self.MKLAB(fields, dl) ] = dataset_fixed(dataset.x, chd)
return rv
## Here we make the plots
def __call__(self, acc, a1, a2, tm, dd, fld, weight, flag_row, flag, data):
#print "************************************************"
#print "* __call__ data.shape=",data.shape
#print "************************************************"
# Create masked array from the data with invalid data already masked off
data = numpy.ma.masked_invalid(data)
# now we can easily add in flag information;
# flags either has shape of data or it's a single bool False
data.mask = numpy.logical_or(data.mask, self.untranspose_flag(numpy.logical_or(self.transpose_flag(flag), flag_row)))
# weight handling. It's been set up such that whatever the weight was
# (WEIGHT, WEIGHT_SPECTRUM, no weight thresholding) the following sequence
# always works
data = self.transpose(data)
data.mask = numpy.logical_or(data.mask, weight<self.threshold)
data = self.transpose(data)
# possibly vector-average the data
data = self.vectorAvg(data)
# Now create the quantity data - map the quantity functions over the (vector averaged)
# data and, if needed, scalar average them
qd = map(lambda q: (q.quantity_name, self.scalarAvg(q.quantity_fn(data))), self.quantities)
# Transform the time stamps, if necessary
tm = self.timebin_fn(tm)
# Now we can loop over all the rows in the data
dds = self.ddSelection
ci = self.chanidx
# We don't have to test *IF* the current data description id is
# selected; the fact that we see it here means that it WAS selected!
# The only interesting bit is selecting the correct products
for row in range(data.shape[0]):
(fq, sb, plist) = dds[ dd[row] ]
for (chi, chn) in ci:
for (pidx, pname) in plist:
l = ["", (a1[row], a2[row]), fq, sb, fld[row], pname, chn]
for (qnm, qval) in qd:
l[0] = qnm
acc[tuple(l)].append(tm[row], qval.data[row, chi, pidx], qval.mask[row, chi, pidx])
return acc
## This plotter will iterate over "DATA" or "LAG_DATA"
## and produce a number of quantities per frequency, possibly averaging over time and/or channels
class data_quantity_chan(plotbase):
# should set up a choice table based on the combination of averaging methods
# key into the lookup is '(avgChannelMethod, avgTimeMethod)'
# Also return wether the quantities must be postponed
_averaging = {
# no averaging at all, no need to postpone computing the quantity(ies)
(AVG.None, AVG.None): (avg_none, avg_none, False),
(AVG.None, AVG.Sum): (avg_none, avg_sum, False),
(AVG.None, AVG.Vectorsum): (avg_none, avg_sum, True),
# only time averaging requested
# scalar in time means we can collect the quantities themselves
(AVG.None, AVG.Scalar): (avg_none, avg_arithmetic, False),
# when vector(norm) averaging we must first collect all time data
# before we can compute the quantities, i.e. their computation must be postponed
(AVG.None, AVG.Vector): (avg_none, avg_arithmetic, True),
(AVG.None, AVG.Vectornorm): (avg_none, avg_vectornorm, True),
# When scalar averaging the channels no vector averaging in time possible
# Also no need to postpone computing the quantities
(AVG.Scalar, AVG.None): (avg_arithmetic, avg_none, False),
(AVG.Scalar, AVG.Sum): (avg_arithmetic, avg_sum, False),
(AVG.Scalar, AVG.Scalar): (avg_arithmetic, avg_arithmetic, False),
# When vector averaging the channels, the time averaging governs
# the choice of when to compute the quantity(ies)
(AVG.Vector, AVG.None): (avg_arithmetic, avg_none, False),
(AVG.Vector, AVG.Sum): (avg_arithmetic, avg_sum, False),
(AVG.Vector, AVG.Vectorsum): (avg_arithmetic, avg_sum, True),
(AVG.Vector, AVG.Scalar): (avg_arithmetic, avg_arithmetic, False),
# when doing vector in both dims we must first add up all the complex numbers
# for each channel(selection) and then in time and THEN compute the quantity(ies)
(AVG.Vector, AVG.Vector): (avg_arithmetic, avg_arithmetic, True),
(AVG.Vector, AVG.Vectornorm): (avg_arithmetic, avg_vectornorm, True),
# vectornorm averaging over the channels, see what's requested in time
(AVG.Vectornorm, AVG.None): (avg_vectornorm, avg_none, False),
(AVG.Vectornorm, AVG.Scalar): (avg_vectornorm, avg_arithmetic, False),
(AVG.Vectornorm, AVG.Vector): (avg_vectornorm, avg_arithmetic, True),
(AVG.Vectornorm, AVG.Vectornorm): (avg_vectornorm, avg_vectornorm, True),
(AVG.Vectornorm, AVG.Sum): (avg_vectornorm, avg_sum, False),
(AVG.Vectornorm, AVG.Vectorsum): (avg_vectornorm, avg_sum, True),
(AVG.Sum, AVG.None): (avg_sum, avg_none, False),
(AVG.Sum, AVG.Sum): (avg_sum, avg_sum, False),
(AVG.Sum, AVG.Scalar): (avg_sum, avg_arithmetic, False),
(AVG.Vectorsum, AVG.None): (avg_sum, avg_none, False),
(AVG.Vectorsum, AVG.Scalar): (avg_sum, avg_arithmetic, False),
(AVG.Vectorsum, AVG.Sum): (avg_sum, avg_sum, False),
(AVG.Vectorsum, AVG.Vectorsum): (avg_sum, avg_sum, True),
(AVG.Vectorsum, AVG.Vectornorm): (avg_sum, avg_vectornorm, True),
}
## our construct0r
## qlist = [ (quantity_name, quantity_fn), ... ]
##
def __init__(self, qlist, **kwargs):
self.quantities = list(itertools.starmap(Quantity, qlist))
self.byFrequency = kwargs.get('byFrequency', False)
def makePlots(self, msname, selection, mapping, **kwargs):
# Deal with channel averaging
# Scalar => average the derived quantity
# Vector => compute average cplx number, then the quantity
avgChannel = CP(selection.averageChannel)
avgTime = CP(selection.averageTime)
solchan = CP(selection.solchan)
solint = CP(selection.solint)
timerng = CP(selection.timeRange)
# some sanity checks
if solchan is not None and avgChannel==AVG.None:
raise RuntimeError("nchav value was set without specifiying a channel averaging method; please tell me how you want them averaged")
if solint is not None and avgTime==AVG.None:
raise RuntimeError("solint value was set without specifiying a time averaging method; please tell me how you want your time range(s) averaged")
## initialize the base class
super(data_quantity_chan, self).__init__(msname, selection, mapping, **kwargs)
# channel selection+averaging schemes; support averaging over channels (or chunks of channels)
chansel = Ellipsis #None
n_chan = self.table[0][self.datacol].shape[0]
if selection.chanSel:
channels = list(sorted(set(CP(selection.chanSel))))
max_chan = max(channels)
# if any of the indexed channels > n_chan that's an error
if max_chan>=n_chan:
raise RuntimeError("At least one selected channel ({0}) > largest channel index ({1})".format(max_chan, n_chan-1))
# also <0 is not quite acceptable
if min(channels)<0:
raise RuntimeError("Negative channel number {0} is not acceptable".format(min(channels)))
# if the user selected all channels (by selection
# 'ch 0:<last>' in stead of 'ch none' we don't
# override the default channel selection (which is more efficient)
if channels!=range(n_chan):
chansel = channels
# ignore channel averaging if only one channel specified
if (n_chan if chansel is Ellipsis else len(chansel))==1 and avgChannel != AVG.None:
print "WARNING: channel averaging method {0} ignored because only one channel selected".format( avgChannel )
avgChannel = AVG.None
# Test if the selected combination of averaging settings makes sense
setup = data_quantity_time._averaging.get((avgChannel, avgTime), None)
if setup is None:
raise RuntimeError("the combination of {0} channel + {1} time averaging is not supported".format(avgChannel, avgTime))
(avgchan_fn, avgtime_fn, postpone) = setup
# How integration/averaging actually is implemented is by modifying the
# time stamp. By massaging the time stamp into buckets of size
# 'solint', we influence the label of the TIME field, which will make
# all data points with the same TIME stamp be integrated into the same
# data set
self.timebin_fn = functional.identity
if avgTime!=AVG.None:
if solint is None:
# Ah. Hmm. Have to integrate different time ranges
# Let's transform our timerng list of (start, end) intervals into
# a list of (start, end, mid) such that we can easily map
# all time stamps [start, end] to mid
# If no time ranges defined at all average everything down to middle of experiment?
# It is important to KNOW that "selection.timeRange" (and thus our
# local copy 'timerng') is a list or sorted, non-overlapping time ranges
timerng = map(lambda (s, e): (s, e, (s+e)/2.0), timerng if timerng is not None else [(mapping.timeRange.start, mapping.timeRange.end)])
# try to be a bit optimized in time stamp replacement - filter the
# list of time ranges to those applying to the time stamps we're
# replacing
def do_it(x):
mi,ma = numpy.min(x), numpy.max(x)
ranges = filter(lambda tr: not (tr[0]>ma or tr[1]<mi), timerng)
return reduce(lambda acc, (s, e, m): numpy.put(acc, numpy.where((acc>=s) & (acc<=e)), m) or acc, ranges, x)
self.timebin_fn = do_it
else:
# Check if solint isn't too small
ti = mapping.timeRange.inttm[0]
if solint<=ti:
raise RuntimeError("solint value {0:.3f} is less than integration time {1:.3f}".format(solint, ti))
self.timebin_fn = lambda x: (numpy.trunc(x/solint)*solint) + solint/2.0
# chansel now is Ellipsis (all channels) or a list of some selected channels
self.chanidx = list()
self.chanidx_fn= None
self.vectorAvg = functional.identity
self.scalarAvg = functional.identity
self.tmVectAvg = functional.identity
self.tmScalAvg = functional.identity
# if the x-axis is frequency ... *gulp*
self.freq_of_dd=CP(self.ddFreqs)
if avgChannel==AVG.None:
# No channel averaging - the new x-axis will be the indices of the selected channels
self.chanidx = range(n_chan) if chansel is Ellipsis else chansel #list(enumerate(range(n_chan) if chansel is Ellipsis else chansel))
# The vector average step will be misused to just apply the channel selection such that all selected channels
# are mapped to 0..n-1. This is only necessary in case not all channels were selected
if chansel is not Ellipsis:
self.vectorAvg = lambda x: x[:,chansel,:]
if self.byFrequency:
self.chanidx_fn = lambda dd: self.freq_of_dd[dd][self.chanidx]
else:
self.chanidx_fn = functional.const(self.chanidx)
else:
# ok channel averaging requested
chbin_fn = None
if solchan is None:
# average all selected channels down to one
# data array 'x' has shape (n_int, n_chan, n_pol)
#self.chbin_fn = lambda x: normalize_ch(1)(numpy.ma.mean(x[:,avg_over,:], axis=1, keepdims=True))
# average the selected channels according the requested averaging method
chbin_fn = lambda x: avgchan_fn(1)(x[:,chansel,:])
# compute average channel number - honouring Ellipsis if necessary #[(0, '*')]
self.chanidx = [numpy.mean(list(functional.range_(n_chan) if chansel is Ellipsis else chansel))]
# transform all frequencies to an average frequency
for dd in self.freq_of_dd.keys():
self.freq_of_dd[dd] = [numpy.mean(self.freq_of_dd[dd][chansel])]
if self.byFrequency:
self.chanidx_fn = lambda dd: self.freq_of_dd[dd]
else:
self.chanidx_fn = functional.const(self.chanidx)
else:
# average bins of solchan channels down to one
if solchan > n_chan:
raise RuntimeError("request to average channels in bins of {0} channels but only {1} are available".format(solchan, n_chan))
# Create a mask which is the complement of the selected channels
# (remember: chansel == Ellipsis => all channels
ch_mask = (numpy.zeros if chansel is Ellipsis else numpy.ones)(n_chan, dtype=numpy.bool)
# only in case chansel != everything we must modify the mask
if chansel is not Ellipsis:
ch_mask[chansel] = False
# Since we're going to zap masked values (replace by 0) we can usefully use
# reduceat! So all we then need is an index array, informing reduceat what the
# reduction boundaries are!
# First up: the actual bin numbers we're interested in, we compute the actual
# start + end indices from that
bins = numpy.unique((numpy.array(chansel) if chansel is not Ellipsis else numpy.arange(0, n_chan, solchan))//solchan)
bins.sort()
# we're going to apply channel binning so we must replace 'chansel'
# by 'bins' in order for downstream accounting of how many "channels" there will
# be in the data
chansel = bins
# Did timings on comparing simplistic 'loop over list of slices' and numpy.add.reduceat based approaches.
# Results: grab bag - depending on problem set size:
# - simplistic approach between 2.5-6x faster (!) when averaging small number of
# channels in small number of bins (say <= 5 bins of ~5 channels)
# - reduceat approach slightly more than 2x faster when binning
# large number of channels in large-ish amount of bins (say 32 bins
# of 4 channels)
# Update: the reduceat also only wins if all bins are adjacent.
# the way <operator>.reduceat works is, given a list of indices [i,j,k]
# and applied to an array A, it will produce the following outputs:
# [ <operator>(A[i:j]), <operator>(A[j:k]), <operator>(A[k:-1]) ]
# (see https://docs.scipy.org/doc/numpy/reference/generated/numpy.ufunc.reduceat.html)
#
# Basically we can use this to efficiently bin i:j, j:k, ..., z:-1 ranges
# If our bins (or in the future, arbitrary channels ranges) are NOT adjacent, then we must
# feed these operators to <operator>.reduceat:
# [ start0, end0, start1, end1, ..., startN, endN ]
# with the start, end indices of channel ranges 0..N
# will produce the following outputs:
# [ <operator>( A[start0:end0] ), <operator>( A[end0:start1] ), <operator>( A[start1:end1] ), ... ]
# so we'd have to throw out every second entry in the output.
# In numpy that's simple enough but it also means that <operator>.reduceat() does twice as must work
# for no apparent reason.
# Detect if the bins are adjacent
adjacent_bins = (len(set(bins[1:] - bins[:-1])) == 1) if len(bins)>1 else False
chbin_fn = None
if adjacent_bins:
# we're going to use reduceat() which means it's good enough
# to generate [bin0*solchan, bin1*solchan, ..., bin<nbin-1>*solchan]
indices = CP(bins)
# generate the channel index labels for correct labelling
self.chanidx = CP(indices) #list()
# need to carefully check last entry in there; if 'last bin' < 'n_chan//solchan'
# we must add an extra final boundary or else reduceat() will add up to the end
# of the number of channels in stead of until the end of the bin ...
if bins[-1]<((n_chan-1)//solchan):
# add one more bin limit, set slice to keep only n-1 bins
keepbins = slice(0, len(indices))
indices = numpy.r_[indices, [indices[-1]+1]]
else:
keepbins = Ellipsis
# indices are in units of solchan bins so for reduceat must
# scale them back to actual channels
indices *= solchan
# This is where the magic happens
transpose_ch = operator.methodcaller('transpose', (0, 2, 1))
def use_reduceat(x):
# (n_int, n_ch, n_pol) => (n_int, n_pol, n_ch)
tmpx = transpose_ch(x)
# also we must reshape it to a 2-D array of ((n_int * n_pol), n_chan) shape orelse
# the reduceat() don't work [https://docs.scipy.org/doc/numpy/reference/generated/numpy.ufunc.reduceat.html]
# remember the dimensions for later
n_int,n_pol = tmpx.shape[:-1]
tmpx = tmpx.reshape( (n_int*n_pol, -1) )
# mask out channels that we don't want averaged, joining it
# with the mask that excludes flagged data (by the user) and/or
# whatever was weight-thresholded ...
tmpx.mask = numpy.logical_or(tmpx.mask, ch_mask)
# set all masked values to 0 such that they don't ever count towards *anything*
# e.g. suppose all channels in a bin are masked then the average should be NaN or something
# unrepresentable because there was no valid data at all
tmpx.data[tmpx.mask] = 0
# do the summation.
#print "###############################################################"
#print " use_reduceat: x.shape=",x.shape
#print " tmpx.data.shape=",tmpx.data.shape
#print " indices=",indices
#print " keepbins=",keepbins
#print "###############################################################"
result = numpy.add.reduceat(tmpx.data, indices, axis=1)[:,keepbins]
# also count the number of unmasked values that went into each point
# we may use it for averaging, definitely be using it to create the mask
counts = numpy.add.reduceat(~tmpx.mask, indices, axis=1)[:,keepbins]
# Because we do things different here than in the ordinary averaging,
# we must look at what was requested in order to mimic that behaviour
if avgchan_fn is avg_vectornorm:
# ok need to find the maximum complex number in each bin to scale it by
result /= (numpy.maximum.reduceat(numpy.abs(tmpx.data), indices, axis=1)[:,keepbins])
elif avgchan_fn in [avg_sum, avg_none]:
# no averaging/summing boils down to not doing anything
pass
else:
# ordinary arithmetic mean
result /= counts
# return masked array - can reuse the counts array by converting them
# to bool and inverting: no counts => False
mask = numpy.array(counts == 0, dtype=numpy.bool)
# set entries where counts==0 to NaN to make it explicit
# that, mathematically speaking, there is nothing there
result[mask] = numpy.nan
# unshape + untranspose from 2-d ((n_int * n_pol), n_output_channels)
# into 3-d (n_int, n_pol, n_ouput_channels)
return transpose_ch(numpy.ma.array(result.reshape((n_int, n_pol, -1)), mask=mask.reshape((n_int, n_pol, -1))))
# set chbin_fn to use reduceat()
chbin_fn = use_reduceat
else:
# not going to use reduceat() just bruteforcing over a list of slices()
# do some extra pre-processing for the simplistic approach
# it uses slice() indexing so we pre-create the slice objects for it
# for each range of channels to average we compute src and dst slice
indices = map(lambda s: (s*solchan, min((s+1)*solchan, n_chan)), bins)
slices = [(slice(i, i+1), slice(rng[0], rng[1]+1)) for i, rng in enumerate(indices)]
# for display + loopindexing create list of (array_index, "CH label") tuples
self.chanidx = CP(bins)
n_slices = len(slices)
# this is the simplistic approach
def use_dumbass_method(x):
# get an output array
n_int,_,n_pol = x.shape
result = numpy.ma.empty((n_int, n_slices, n_pol), dtype=x.dtype)
for (dst_idx, src_idx) in slices:
result[:,dst_idx,:] = numpy.ma.mean(x[:,src_idx,:], axis=1, keepdims=True)
result.mask[:,dst_idx,:] = (numpy.sum(x.mask[:,src_idx,:], axis=1, keepdims=True) == 0)
return result
# and set the channel bin function to use to this'un
chbin_fn = use_dumbass_method
# transform all frequencies to an average frequency per bin
indices_l = map(lambda s: (s*solchan, min((s+1)*solchan, n_chan)), bins)
slices_l = [(slice(i, i+1), slice(rng[0], rng[1]+1)) for i, rng in enumerate(indices_l)]
def mk_fbins(x):
result = numpy.empty((len(slices_l)))
for (dst_idx, src_idx) in slices_l:
result[dst_idx] = numpy.mean(x[src_idx])
return result
for dd in self.freq_of_dd.keys():
freqs = ARRAY(self.freq_of_dd[dd])
self.freq_of_dd[dd] = mk_fbins( ARRAY(self.freq_of_dd[dd]) )
if self.byFrequency:
self.chanidx_fn = lambda dd: self.freq_of_dd[dd]
else:
self.chanidx_fn = functional.const(self.chanidx)
# Some channel averaging is to be applied so chbin_fn must not be None
if chbin_fn is None:
raise RuntimeError("chbin_fn is None whilst some channel averaging requested. Please yell at H. Verkouter (verkouter@jive.eu)")
# depending on which kind of channel averaging, we apply it to the complex data before
# producing quantities or on the scalars after computing the quantities
if avgChannel == AVG.Scalar:
self.scalarAvg = chbin_fn
else:
self.vectorAvg = chbin_fn
if self.chanidx_fn is None:
raise RuntimeError("The self.chanidx_fn is still None! Someone (verkouter@jive.eu) forgot something. Go yell at 'im!")
# Now we must come up with a strategy for organizing the data processing chain
#
# If avgTime is Vector-like then we can only compute the quantities after all data's been
# read & averaged. We've already ruled out that avgChannel == Scalar (for that makes no sense)
#
# So we may have to postpone computing of the quantities until after having collected + integrated all data
post_quantities = lambda tp, x: [(tp, x)]
org_quantities = None
if postpone:
# create data sets based on the averaged data in a dataset
org_quantities = CP(self.quantities)
post_quantities = lambda _, x: map(lambda q: (q.quantity_name, q.quantity_fn(x)), org_quantities)
self.quantities = [Quantity('raw', functional.identity)]
# Now that we've got an idea what our x-axis is going to be ('self.chanidx')
# we can warn if we think it's a tad short)
if len(self.chanidx)==1:
print "WARNING: the output will contain only one channel, which might be an odd choice for your x-axis; it will be shorter than you think."
#if len(self.chanidx)==1:
# # post_channel doesn't do nothing, self.chanidx remains a list of length 1
# post_channel = lambda ch, x: [(ch, x)]
#else:
# # here the post_channel yields a list of extracted channels from the data 'x' coupled
# # with the assigned label from self.chanidx
# org_chanidx = CP(self.chanidx)
# post_channel = lambda _, x: map(lambda chi: (chi[1], x[:,chi[0]]), org_chanidx)
# # replace self.chanidx with a single entry which captures all channels and sets the
# # associated label to None - which we could use as a sentinel, if needed
# self.chanidx = [(Ellipsis, None)]
# Let's keep the channels together as long as possible. If only one channel remains then we can do it
# in our inner loop
#dataset_proto = dataset_list if avgTime == AVG.None else dataset_solint_array
#if avgTime == AVG.None:
# # if no time averaging is to be done the (possibly channel averaged) data set IS the data set
# dataset_proto = dataset_list
#else:
# # depending on wether we need to solint one or more channels in one go
# # loop over the current self.chanidx and count nr of channels
# nChannel = reduce(lambda acc, chi: acc + ((n_chan if chansel is Ellipsis else len(chansel)) if chi[0] is Ellipsis else 1), self.chanidx, 0)
# dataset_proto = dataset_solint_array if nChannel>1 else dataset_solint_scalar
## Now we can start the reduction of the table
# Note: there will /always/ be WEIGHT+FLAGCOL - either read from the table or invented
# 0 1 2 3 4 5 6
fields = [AX.TYPE, AX.BL, AX.FQ, AX.SB, AX.SRC, AX.P, AX.TIME]
columns = ["ANTENNA1", "ANTENNA2", "TIME", "DATA_DESC_ID", "FIELD_ID", "WEIGHTCOL", "FLAG_ROW", "FLAG", self.datacol]
pts = ms2util.reducems2(self, self.table, collections.defaultdict(dataset_chan), columns, verbose=True, slicers=self.slicers, chunksize=5000)
# after the reduction's done we can put back our quantities if we did remove them before
if org_quantities is not None:
self.quantities = org_quantities
rv = {}
for (label, dataset) in pts.iteritems():
dl = list(label)
dataset.average( avgTime )
# convert x,y to numarrays
dataset.as_numarray()
for qn,qd in post_quantities(label[0], dataset.y):
dl[0] = qn
rv[ self.MKLAB(fields, dl) ] = dataset_fixed(dataset.x, qd)
return rv
## Here we make the plots
def __call__(self, acc, a1, a2, tm, dd, fld, weight, flag_row, flag, data):
# Create masked array from the data with invalid data already masked off
data = numpy.ma.masked_invalid(data)
# now we can easily add in flag information;
# flags either has shape of data or it's a single bool False
data.mask = numpy.logical_or(data.mask, self.untranspose_flag(numpy.logical_or(self.transpose_flag(flag), flag_row)))
# weight handling. It's been set up such that whatever the weight was
# (WEIGHT, WEIGHT_SPECTRUM, no weight thresholding) the following sequence
# always works
data = self.transpose(data)
data.mask = numpy.logical_or(data.mask, weight<self.threshold)
data = self.transpose(data)
# possibly vector-average the data
data = self.vectorAvg(data)
# Now create the quantity data - map the quantity functions over the (vector averaged)
# data and, if needed, scalar average them
qd = map(lambda q: (q.quantity_name, self.scalarAvg(q.quantity_fn(data))), self.quantities)
# Transform the time stamps, if necessary
tm = self.timebin_fn(tm)
# Now we can loop over all the rows in the data
dds = self.ddSelection
#ci = self.chanidx
cif = self.chanidx_fn
# We don't have to test *IF* the current data description id is
# selected; the fact that we see it here means that it WAS selected!
# The only interesting bit is selecting the correct products
for row in range(data.shape[0]):
ddid = dd[row]
(fq, sb, plist) = dds[ ddid ]
#(fq, sb, plist) = dds[ dd[row] ]
for (pidx, pname) in plist:
l = ["", (a1[row], a2[row]), fq, sb, fld[row], pname, tm[row]]
for (qnm, qval) in qd:
l[0] = qnm
acc[tuple(l)].add_y(cif(ddid), qval.data[row, :, pidx], qval.mask[row, :, pidx])
return acc
### This plotter will iterate over "DATA" or "LAG_DATA"
### and produce a number of quantities per frequency
#class data_quantity_chan_old(plotbase):
#
# ## our construct0r
# ## qlist = [ (quantity_name, quantity_fn), ... ]
# ##
# ## Note that 'time averaging' will be implemented on a per-plot
# ## basis, not at the basic type of plot instance
# def __init__(self, qlist, **kwargs):
# self.quantities = qlist
# self.byFrequency = kwargs.get('byFrequency', False)
#
# def makePlots(self, msname, selection, mapping, **kwargs):
# datacol = CP(mapping.domain.column)
#
# # Deal with time averaging
# # Scalar => average the derived quantity
# # Vector => compute average cplx number, then the quantity
# avgTime = CP(selection.averageTime)
# solint = CP(selection.solint)
# timerng = CP(selection.timeRange)
#
# # need a function that (optionally) transforms the FQ/SB/CH idx to real frequencies
# self.changeXaxis = lambda dd, chanidx: chanidx
# if self.byFrequency:
# if mapping.spectralMap is None:
# raise RuntimeError("Request to plot by frequency but no spectral mapping available")
# self.changeXaxis = lambda dd, chanidx: self.ddFreqs[ dd ][ chanidx ]
#
# # solint must be >0.1 OR must be equal to None
# # solint==None implies "aggregate all data into the selected time ranges in
# # their separate bins"
# if avgTime!=AVG.None and not (selection.solint is None or selection.solint>0.1):
# raise RuntimeError, "time averaging requested but solint is not none or >0.1: {0}".format(selection.solint)
# # If solint is a number and averaging is not set, default to Scalar averaging
# if selection.solint and avgTime==AVG.None:
# avgTime = AVG.Scalar
# print "WARN: solint is set but no averaging method was specified. Defaulting to ",avgTime
#
# if selection.averageChannel!=AVG.None:
# print "WARN: {0} channel averaging ignored for this plot".format(selection.averageChannel)
#
# # If time averaging requested but solint==None and timerange==None, this means we
# # have to set up a time range to integrate. timerange==None => whole data set
# if avgTime!=AVG.None and solint is None and timerng is None:
# timerng = [(mapping.timeRange.start, mapping.timeRange.end)]
#
# ## initialize the base class
# super(data_quantity_chan, self).__init__(msname, selection, mapping, **kwargs)
#
# ## Some variables must be stored in ourselves such
# ## that they can be picked up by the callback function
# slicers = {}
#
# # For data sets with a large number of channels
# # (e.g. UniBoard data, 1024 channels spetral resolution)
# # it makes a big (speed) difference if there is a channel
# # selection to let the casa stuff [the ms column] do
# # the (pre)selection so we do not get *all* the channels
# # into casa
#
# # 1) the channel selection. it is global; ie applies to
# # every data description id.
# # also allows us to create a slicer
# # default: iterate over all channels
# shape = self.table[0][datacol].shape
# self.chunksize = 5000
# self.maskfn = lambda x: numpy.ma.MaskedArray(x, mask=numpy.ma.nomask)
# self.chanidx = numpy.arange(shape[0])
# self.chansel = numpy.arange(shape[0])
#
# # After having read the data, first we apply the masking function
# # which disables the unselected channels
# if selection.chanSel:
# channels = sorted(CP(selection.chanSel))
# indices = map(lambda x: x-channels[0], channels)
# self.chanidx = numpy.array(channels, dtype=numpy.int32)
# self.chansel = numpy.array(indices, dtype=numpy.int32)
# self.maskfn = mk3dmask_fn_mask(self.chunksize, indices, shape[-1])
# slicers[datacol] = ms2util.mk_slicer((channels[0], 0), (channels[-1]+1, shape[-1]))
#
# # This is how we're going to go about dealing with time averaging
# # The model is that, after having read the data, there is a function
# # being called which produces (a list of) data products
# # * with scalar averaging, we produce a list of scalar quantities, the result
# # of calling self.quantities on the data. the .TYPE field in the data set
# # label is the actual quantity type
# # * with vector averaging, we produce nothing but the raw data itself; it is
# # the complex numbers that we must integrate/average. we give these data sets
# # the .TYPE of 'raw'.
# # * with no averaging at all, we also return the 'raw' data
# #
# # Then all the data is accumulated
# # After the whole data set has been processed, we do the averaging and
# # apply another transformation function:
# # * with scalar averaging, we don't have to do anything; the quantities have already
# # been produced
# # * with vector averaging, we take all data sets with type 'raw' and map our
# # quantity producing functions over the averaged data, producing new data sets
# # The raw data can now be deleted
#
# # How integration/averaging actually is implemented is by modifying the
# # time stamp. By massaging the time stamp into buckets of size
# # 'solint', we influence the label of the TIME field, which will make
# # all data points with the same TIME stamp be integrated into the same
# # data set
# self.timebin_fn = lambda x: x
# if avgTime!=AVG.None:
# if solint is None:
# # Ah. Hmm. Have to integrate different time ranges
# # Let's transform our timerng list of (start, end) intervals into
# # a list of (start, end, mid) such that we can easily map
# # all time stamps [start, end] to mid
#
# # It is important to KNOW that "selection.timeRange" (and thus our
# # local copy 'timerng') is a list or sorted, non-overlapping time ranges
# timerng = map(lambda (s, e): (s, e, (s+e)/2.0), timerng)
# self.timebin_fn = lambda x: \
# reduce(lambda acc, (s, e, m): numpy.put(acc, numpy.where((acc>=s) & (acc<=e)), m) or acc, timerng, x)
# else:
# # we have already checked the validity of solint
# self.timebin_fn = lambda x: (numpy.trunc(x/solint)*solint) + solint/2.0
#
# # With no time averaging or with Scalar averaging, we can immediately produce
# # the quantities. Only when doing Vector averaging, we must produce the quantities
# # after having read all the data
# self.preProcess = lambda x: map(lambda (qnm, qfn): (qnm, qfn(x)), self.quantities)
# if avgTime in [AVG.Vector, AVG.Vectornorm]:
# doNormalize = (lambda x: x) if avgTime==AVG.Vector else (lambda x: x/numpy.abs(x))
# self.preProcess = lambda x: [('raw', doNormalize(x))]
#
# fields = [AX.TYPE, AX.BL, AX.FQ, AX.SB, AX.SRC, AX.P, AX.TIME]
#
# # weight filtering
# self.nreject = 0
# self.reject_f = lambda weight: False
# self.threshold = -10000000
# if selection.weightThreshold is not None:
# self.threshold = CP(selection.weightThreshold)
# self.reject_f = lambda weight: weight<self.threshold
#
# ## Now we can start the reduction of the table
# if selection.weightThreshold is None:
# columns = ["ANTENNA1", "ANTENNA2", "TIME", "DATA_DESC_ID", "FIELD_ID", datacol]
# self.actual_fn = self.withoutWeightThresholding
# else:
# columns = ["ANTENNA1", "ANTENNA2", "TIME", "DATA_DESC_ID", "FIELD_ID", "WEIGHT", datacol]
# self.actual_fn = self.withWeightThresholding
# if self.flags:
# columns.append( "FLAGCOL" )
# pts = ms2util.reducems2(self, self.table, {}, columns, verbose=True, slicers=slicers, chunksize=self.chunksize)
#
# if self.nreject:
# print "Rejected ",self.nreject," points because of weight criterion"
#
# ## Excellent. Now start post-processing
# rv = {}
# for (label, ds) in pts.iteritems():
# ds.average()
# if label[0]=='raw':
# dl = list(label)
# for (qnm, qd) in map(lambda (qnm, qfn): (qnm, qfn(ds.y)), self.quantities):
# dl[0] = qnm
# rv[ self.MKLAB(fields, dl) ] = dataset(ds.x, qd, ds.m)
# else:
# rv[ self.MKLAB(fields, label) ] = ds
# #for k in rv.keys():
# # print "Plot:",str(k),"/",map(str, rv[k].keys())
# return rv
#
#
# ## Here we make the plots
# def __call__(self, *args):
# return self.actual_fn(*args)
#
# # This is the one WITHOUT WEIGHT THRESHOLDING
# def withoutWeightThresholding(self, acc, a1, a2, tm, dd, fld, data, *flag):
# # Make really sure we have a 3-D array of data ...
# d3d = m3d(data)
# shp = data.shape
#
# # Good. We have a block of data, shape (nrow, nchan, npol)
# # Step 1: apply the masking, such that any averaging later on
# # will skip the masked data.
# # 'md' is "masked data"
# # Try to use the pre-computed channel mask, if it fits,
# # otherwise create one for this odd-sized block
# # (typically the last block)
# mfn = self.maskfn if shp[0]==self.chunksize else mk3dmask_fn_mask(shp[0], self.chansel, shp[2])
#
# # Now create the quantity data
# # qd will be a list of (quantity_name, quantity_data) tuples
# # original: qd = map(lambda (qnm, qfn): (qnm, qfn(mfn(d3d))), self.quantities)
# qd = self.preProcess( mfn(d3d) )
#
# # Transform the time stamps [rounds time to integer multiples of solint
# # if that is set or the midpoint of the time range if solint was None]
# tm = self.timebin_fn( tm )
# flag = flag[0] if flag else numpy.zeros(data.shape, dtype=numpy.bool)
#
# # Now we can loop over all the rows in the data
#
# # We don't have to test *IF* the current data description id is
# # selected; the fact that we see it here means that it WAS selected!
# # The only interesting bit is selecting the correct products
# dds = self.ddSelection
# cx = self.changeXaxis
# ci = self.chanidx
# cs = self.chansel
# for row in range(shp[0]):
# ddr = dd[row]
# (fq, sb, plist) = dds[ ddr ]
# # we can already precompute most of the label
# # potentially, modify the TIME value to be a time bucket such
# # that we can intgrate into it
# l = ["", (a1[row], a2[row]), fq, sb, fld[row], "", tm[row]]
# # we don't iterate over channels, only over polarizations
# for (pidx, pname) in plist:
# l[5] = pname
# for (qnm, qval) in qd:
# l[0] = qnm
# acc.setdefault(tuple(l), dataset()).sumy(cx(ddr, ci), qval[row, cs, pidx], flag[row, cs, pidx])
# return acc
#
# # This is the one WITH WEIGHT THRESHOLDING
# def withWeightThresholding(self, acc, a1, a2, tm, dd, fld, weight, data):
# # Make really sure we have a 3-D array of data ...
# d3d = m3d(data)
# shp = data.shape
#
# # compute weight mask
# w3d = numpy.zeros(shp, dtype=numpy.float)
# for i in xrange(shp[0]):
# # we have weights per polzarization but we must
# # expand them to per channel ...
# cw = numpy.vstack( shp[1]*[weight[i]] )
# w3d[i] = cw
# w3m = w3d<self.threshold
# wfn = lambda a: numpy.ma.MaskedArray(a.data, numpy.logical_and(a.mask, w3m))
#
# # Good. We have a block of data, shape (nrow, nchan, npol)
# # Step 1: apply the masking, such that any averaging later on
# # will skip the masked data.
# # 'md' is "masked data"
# # Try to use the pre-computed channel mask, if it fits,
# # otherwise create one for this odd-sized block
# # (typically the last block)
# mfn = self.maskfn if shp[0]==self.chunksize else mk3dmask_fn_mask(shp[0], self.chansel, shp[2])
#
# # Now create the quantity data
# # qd will be a list of (quantity_name, quantity_data) tuples
# # original: qd = map(lambda (qnm, qfn): (qnm, qfn(mfn(d3d))), self.quantities)
# qd = self.preProcess( wfn(mfn(d3d)) )
#
# # Transform the time stamps [rounds time to integer multiples of solint
# # if that is set or the midpoint of the time range if solint was None]
# tm = self.timebin_fn( tm )
# flag = flag[0] if flag else numpy.zeros(data.shape, dtype=numpy.bool)
#
# # Now we can loop over all the rows in the data
# dds = self.ddSelection
# ci = self.chanidx
# cs = self.chansel
# cx = self.changeXaxis
# rf = self.reject_f
# # We don't have to test *IF* the current data description id is
# # selected; the fact that we see it here means that it WAS selected!
# # The only interesting bit is selecting the correct products
# for row in range(shp[0]):
# ddr = dd[row]
# (fq, sb, plist) = dds[ ddr ]
# # we can already precompute most of the label
# # potentially, modify the TIME value to be a time bucket such
# # that we can intgrate into it
# l = ["", (a1[row], a2[row]), fq, sb, fld[row], "", tm[row]]
# # we don't iterate over channels, only over polarizations
# for (pidx, pname) in plist:
# if rf(w3d[row, 0, pidx]):
# self.nreject = self.nreject + 1
# continue
# l[5] = pname
# for (qnm, qval) in qd:
# l[0] = qnm
# acc.setdefault(tuple(l), dataset()).sumy(cx(ddr, ci), qval[row, cs, pidx], flag[row, cs, pidx])
# return acc
#
class unflagged(object):
def __getitem__(self, idx):
return False
#class weight_time_old(plotbase):
# def __init__(self):
# # nothing yet ...
# pass
#
# def makePlots(self, msname, selection, mapping, **kwargs):
# ## initialize the base class (opens table, does selection)
# super(weight_time, self).__init__(msname, selection, mapping, **kwargs)
#
# # Support "time averaging" by aggregating data points in time bins of 'solint' length
# solint = CP(selection.solint)
# avgTime = CP(selection.averageTime)
# #solint_fn = solint_none
# self.timebin_fn = functional.identity
# if avgTime!=AVG.None:
# if solint is None:
# # Ah. Hmm. Have to integrate different time ranges
# # Let's transform our timerng list of (start, end) intervals into
# # a list of (start, end, mid) such that we can easily map
# # all time stamps [start, end] to mid
#
# # It is important to KNOW that "selection.timeRange" (and thus our
# # local copy 'timerng') is a list or sorted, non-overlapping time ranges
# timerng = map(lambda (s, e): (s, e, (s+e)/2.0), timerng)
# if len(timerng)==1:
# print "WARNING: averaging all data into one point in time!"
# print " This is because no solint was set. Your plot"
# print " may contain less useful info than expected"
#
# # try to be a bit optimized in time stamp replacement - filter the
# # list of time ranges to those applying to the time stamps we're
# # replacing
# def do_it(x):
# mi,ma = numpy.min(x), numpy.max(x)
# ranges = filter(lambda tr: not (tr[0]>ma or tr[1]<mi), timerng)
# return reduce(lambda acc, (s, e, m): numpy.put(acc, numpy.where((acc>=s) & (acc<=e)), m) or acc, ranges, x)
# self.timebin_fn = do_it
# else:
# # Check if solint isn't too small
# ti = mapping.timeRange.inttm[0]
# if solint<ti:
# raise RuntimeError("solint value {0:.3f} is less than integration time {1:.3f}".format(solint, ti))
# self.timebin_fn = lambda x: (numpy.trunc(x/solint)*solint) + solint/2.0
#
# self.dataset_proto = dataset_list if avgTime == AVG.None else dataset_solint
#
# ## we plot using the WEIGHT column
#
# fields = [AX.TYPE, AX.BL, AX.FQ, AX.SB, AX.SRC, AX.P]
#
# #self.cnt = 0
# #self.ts = set()
# ## Now we can start the reduction of the table
# columns = ["ANTENNA1", "ANTENNA2", "TIME", "DATA_DESC_ID", "FIELD_ID", "WEIGHT"] + ["FLAG_ROW"] if self.flags else []
# pts = ms2util.reducems2(self, self.table, {}, columns, verbose=True, chunksize=5000)
#
# #print "WE SHOULD HAVE ",self.cnt," DATA POINTS"
# #print "ANDALSO ",len(self.ts)," TIME STAMPS"
#
# rv = {}
# #dt = 0.0
# for (label, dataset) in pts.iteritems():
# #dt += solint_fn( dataset )
# dataset.average( avgTime )
# rv[ self.MKLAB(fields, label) ] = dataset
# #if solint:
# # print "SOLINT processing took\t{0:.3f}s".format( dt )
# return rv
#
# def __call__(self, acc, a1, a2, tm, dd, fld, weight, *flag_row):
# #print "__call__: ",a1,a2,tm,dd,fld,weight.shape
# # ok, process all the rows!
# shp = weight.shape
# flags = unflagged() if not flag_row else flag_row[0]
# # single-pol data will have shape (nrow,)
# # but our code really would like it to be (nrow, npol), even if 'npol' == 1. (FFS casacore!)
# tm = self.timebin_fn( tm )
# d2d = m2d(weight)
# for row in range(shp[0]):
# (fq, sb, plist) = self.ddSelection[ dd[row] ]
# # we don't iterate over channels
# for (pidx, pname) in plist:
# acc.setdefault((YTypes.weight, (a1[row], a2[row]), fq, sb, fld[row], pname), self.dataset_proto()).append(tm[row], weight[row, pidx], flags[row])
# return acc
class weight_time(plotbase):
# should set up a choice table based on the combination of averaging methods
# key into the lookup is '(avgChannelMethod, avgTimeMethod)'
# For the weights we can remove a lot of entries:
# no vector averaging applies here; weight IS a scalar.
# Also no point in postponing because the weight IS the quantity;
# it is not a derived quantity.
# Note: we KEEP the postponed and have self.quantities be a list-of-quantities
# (currently 1 entry with the identity transform ...) because that way
# we could in the future have different flavours of this fn
_averaging = {
(AVG.None, AVG.None): (avg_none, avg_none, False),
(AVG.None, AVG.Scalar): (avg_none, avg_arithmetic, False),
(AVG.Scalar, AVG.None): (avg_arithmetic, avg_none, False),
(AVG.Scalar, AVG.Scalar): (avg_arithmetic, avg_arithmetic, False),
(AVG.None, AVG.Sum): (avg_none, avg_sum, False),
(AVG.Scalar, AVG.Sum): (avg_arithmetic, avg_sum, False),
(AVG.Sum, AVG.Sum): (avg_sum, avg_sum, False),
}
## our construct0r
def __init__(self):
# weight IS the quantity :-)
self.quantities = [Quantity('weight', functional.identity)]
def makePlots(self, msname, selection, mapping, **kwargs):
# Deal with channel averaging
# Scalar => average the derived quantity
avgChannel = CP(selection.averageChannel)
avgTime = CP(selection.averageTime)
solchan = CP(selection.solchan)
solint = CP(selection.solint)
timerng = CP(selection.timeRange)
# some sanity checks
if solchan is not None and avgChannel==AVG.None:
raise RuntimeError("nchav value was set without specifiying a channel averaging method; please tell me how you want them averaged")
if solint is not None and avgTime==AVG.None:
raise RuntimeError("solint value was set without specifiying a time averaging method; please tell me how you want your time range(s) averaged")
## initialize the base class
super(weight_time, self).__init__(msname, selection, mapping, **kwargs)
n_chan = self.table[0][self.datacol].shape[0]
colname = 'WEIGHT_SPECTRUM' if 'WEIGHT_SPECTRUM' in self.table.colnames() else 'WEIGHT'
spectrum = (colname=='WEIGHT_SPECTRUM')
# start of with default channel selection, if there is weight-per-channel anyhoos
chansel = Ellipsis if spectrum else None
if selection.chanSel:
if spectrum:
channels = mk_chansel(selection.chanSel)
max_chan = max(channels)
# if any of the indexed channels > n_chan that's an error
if max_chan>=n_chan:
raise RuntimeError("At least one selected channel ({0}) > largest channel index ({1})".format(max_chan, n_chan-1))
# also <0 is not quite acceptable
if min(channels)<0:
raise RuntimeError("Negative channel number {0} is not acceptable".format(min(channels)))
# if the user selected all channels (by selection
# 'ch 0:<last>' in stead of 'ch none' we don't
# override the default channel selection (which is more efficient)
if channels!=range(n_chan):
chansel = channels
# ignore channel averaging if only one channel specified
if (n_chan if chansel is Ellipsis else len(chansel))==1 and avgChannel != AVG.None:
print "WARNING: channel averaging method {0} ignored because only one channel selected".format( avgChannel )
avgChannel = AVG.None
else:
# channel selection active but only WEIGHT column
print "WARNING: you have selected channels but there is no WEIGHT_SPECTRUM column"
print " your channel selection will be IGNORED"
chansel = None
if avgChannel != AVG.None:
print "WARNING: you specified {0} channel averaging".format(avgChannel)
print " but there is no WEIGHT_SPECTRUM column so IGNORING channel averaging"
avgChannel = AVG.None
else:
# no channels selected by user, check wether we actually *have* channels if averaging requested
if avgChannel != AVG.None and not spectrum:
print "WARNING: you specified {0} channel averaging".format(avgChannel)
print " but there is no WEIGHT_SPECTRUM column so IGNORING channel averaging"
avgChannel = AVG.None
# Test if the selected combination of averaging settings makes sense
setup = weight_time._averaging.get((avgChannel, avgTime), None)
if setup is None:
raise RuntimeError("the combination of {0} channel + {1} time averaging is not supported".format(avgChannel, avgTime))
(avgchan_fn, avgtime_fn, postpone) = setup
# How integration/averaging actually is implemented is by modifying the
# time stamp. By massaging the time stamp into buckets of size
# 'solint', we influence the label of the TIME field, which will make
# all data points with the same TIME stamp be integrated into the same
# data set
self.timebin_fn = functional.identity
if avgTime!=AVG.None:
if solint is None:
# Ah. Hmm. Have to integrate different time ranges
# Let's transform our timerng list of (start, end) intervals into
# a list of (start, end, mid) such that we can easily map
# all time stamps [start, end] to mid
# If no time ranges defined at all average everything down to middle of experiment?
# It is important to KNOW that "selection.timeRange" (and thus our
# local copy 'timerng') is a list or sorted, non-overlapping time ranges
timerng = map(lambda (s, e): (s, e, (s+e)/2.0), timerng if timerng is not None else [(mapping.timeRange.start, mapping.timeRange.end)])
if len(timerng)==1:
print "WARNING: averaging all data into one point in time!"
print " This is because no solint was set or no time"
print " ranges were selected to average. Your plot"
print " may contain less useful info than expected"
# try to be a bit optimized in time stamp replacement - filter the
# list of time ranges to those applying to the time stamps we're
# replacing
def do_it(x):
mi,ma = numpy.min(x), numpy.max(x)
ranges = filter(lambda tr: not (tr[0]>ma or tr[1]<mi), timerng)
return reduce(lambda acc, (s, e, m): numpy.put(acc, numpy.where((acc>=s) & (acc<=e)), m) or acc, ranges, x)
self.timebin_fn = do_it
else:
# Check if solint isn't too small
ti = mapping.timeRange.inttm[0]
if solint<=ti:
raise RuntimeError("solint value {0:.3f} is less than integration time {1:.3f}".format(solint, ti))
self.timebin_fn = lambda x: (numpy.trunc(x/solint)*solint) + solint/2.0
# chansel now is Ellipsis (all channels) or a list of some selected channels
self.chanidx = list()
self.vectorAvg = functional.identity
self.scalarAvg = functional.identity
self.tmVectAvg = functional.identity
self.tmScalAvg = functional.identity
if avgChannel==AVG.None:
if spectrum:
# No channel averaging - each selected channel goes into self.chanidx
self.chanidx = list(enumerate(range(n_chan) if chansel is Ellipsis else chansel))
# The vector average step will be misused to just apply the channel selection such that all selected channels
# are mapped to 0..n-1. This is only necessary in case not all channels were selected
if chansel is not Ellipsis:
self.vectorAvg = lambda x: x[:,chansel,:]
else:
# no channels at all
self.chanidx = None
else:
# ok channel averaging requested
# note that the setup code already verifies that it makes sense to average channels.
# which is to say: if there is only the WEIGHT column then averaging will be disabled
chbin_fn = None
if solchan is None:
# average all selected channels down to one
# weight_spectrum array 'x' has shape (n_int, n_chan, n_pol)
# average the selected channels according the requested averaging method
chbin_fn = lambda x: avgchan_fn(1)(x[:,chansel,:])
self.chanidx = [(0, '*')]
else:
# average bins of solchan channels down to one
if solchan > n_chan:
raise RuntimeError("request to average channels in bins of {0} channels but only {1} are available".format(solchan, n_chan))
# Create a mask which is the complement of the selected channels
# (remember: chansel == Ellipsis => all channels
ch_mask = (numpy.zeros if chansel is Ellipsis else numpy.ones)(n_chan, dtype=numpy.bool)
# only in case chansel != everything we must modify the mask
if chansel is not Ellipsis:
ch_mask[chansel] = False
# Since we're going to zap masked values (replace by 0) we can usefully use
# reduceat! So all we then need is an index array, informing reduceat what the
# reduction boundaries are!
# First up: the actual bin numbers we're interested in, we compute the actual
# start + end indices from that
bins = numpy.unique((numpy.array(chansel) if chansel is not Ellipsis else numpy.arange(0, n_chan, solchan))//solchan)
bins.sort()
# we're going to apply channel binning so we must replace 'chansel'
# by 'bins' in order for downstream accounting of how many "channels" there will
# be in the data
chansel = bins
# Did timings on comparing simplistic 'loop over list of slices' and numpy.add.reduceat based approaches.
# Results: grab bag - depending on problem set size:
# - simplistic approach between 2.5-6x faster (!) when averaging small number of
# channels in small number of bins (say <= 5 bins of ~5 channels)
# - reduceat approach slightly more than 2x faster when binning
# large number of channels in large-ish amount of bins (say 32 bins
# of 4 channels)
# Update: the reduceat also only wins if all bins are adjacent.
# the way <operator>.reduceat works is, given a list of indices [i,j,k]
# and applied to an array A, it will produce the following outputs:
# [ <operator>(A[i:j]), <operator>(A[j:k]), <operator>(A[k:-1]) ]
# (see https://docs.scipy.org/doc/numpy/reference/generated/numpy.ufunc.reduceat.html)
#
# Basically we can use this to efficiently bin i:j, j:k, ..., z:-1 ranges
# If our bins (or in the future, arbitrary channels ranges) are NOT adjacent, then we must
# feed these operators to <operator>.reduceat:
# [ start0, end0, start1, end1, ..., startN, endN ]
# with the start, end indices of channel ranges 0..N
# will produce the following outputs:
# [ <operator>( A[start0:end0] ), <operator>( A[end0:start1] ), <operator>( A[start1:end1] ), ... ]
# so we'd have to throw out every second entry in the output.
# In numpy that's simple enough but it also means that <operator>.reduceat() does twice as must work
# for no apparent reason.
# Detect if the bins are adjacent
adjacent_bins = (len(set(bins[1:] - bins[:-1])) == 1) if len(bins)>1 else False
chbin_fn = None
if adjacent_bins:
# we're going to use reduceat() which means it's good enough
# to generate [bin0*solchan, bin1*solchan, ..., bin<nbin-1>*solchan]
indices = CP(bins)
# generate the channel index labels for correct labelling
self.chanidx = list()
for (ch_idx, start) in enumerate(indices):
self.chanidx.append( (ch_idx, start) )
# need to carefully check last entry in there; if 'last bin' < 'n_chan//solchan'
# we must add an extra final boundary or else reduceat() will add up to the end
# of the number of channels in stead of until the end of the bin ...
if bins[-1]<((n_chan-1)//solchan):
# add one more bin limit, set slice to keep only n-1 bins
keepbins = slice(0, len(indices))
indices = numpy.r_[indices, [indices[-1]+1]]
else:
keepbins = Ellipsis
# indices are in units of solchan bins so for reduceat must
# scale them back to actual channels
indices *= solchan
# This is where the magic happens
transpose_ch = operator.methodcaller('transpose', (0, 2, 1))
def use_reduceat(x):
# (n_int, n_ch, n_pol) => (n_int, n_pol, n_ch)
tmpx = transpose_ch(x)
# also we must reshape it to a 2-D array of ((n_int * n_pol), n_chan) shape orelse
# the reduceat() don't work [https://docs.scipy.org/doc/numpy/reference/generated/numpy.ufunc.reduceat.html]
# remember the dimensions for later
n_int,n_pol = tmpx.shape[:-1]
tmpx = tmpx.reshape( (n_int*n_pol, -1) )
# mask out channels that we don't want averaged, joining it
# with the mask that excludes flagged data (by the user) and/or
# whatever was weight-thresholded ...
tmpx.mask = LOGICAL_OR(tmpx.mask, ch_mask)
# set all masked values to 0 such that they don't ever count towards *anything*
# e.g. suppose all channels in a bin are masked then the average should be NaN or something
# unrepresentable because there was no valid data at all
tmpx.data[tmpx.mask] = 0
# do the summation.
result = numpy.add.reduceat(tmpx.data, indices, axis=1)[:,keepbins]
# also count the number of unmasked values that went into each point
# we may use it for averaging, definitely be using it to create the mask
counts = numpy.add.reduceat(~tmpx.mask, indices, axis=1)[:,keepbins]
# pre-create the masked based on places where the count of unflagged points == 0
# these values have to be removed in the output (and also we can prevent
# divide-by-zero errors)
mask = ARRAY(counts == 0, dtype=numpy.bool)
# ******************************************************************
# No vector* averaging for THIS data
# ******************************************************************
# ordinary arithmetic mean
# sets counts = 1 where counts == 0 so we don't divided by 0
counts[ mask ] = 1
result /= counts
# if avgchan_fn is avg_vectornorm:
# # ok need to find the maximum complex number in each bin to scale it by
# # take proper care of flagged/inf data
# tmpx.data[tmpx.mask] = -numpy.inf
# result /= (numpy.maximum.reduceat(numpy.abs(tmpx.data), indices, axis=1)[:,keepbins])
# else:
# # ordinary arithmetic mean
# # sets counts = 1 where counts == 0 so we don't divided by 0
# counts[ mask ] = 1
# result /= counts
# set entries where counts==0 to NaN to make it explicit
# that, mathematically speaking, there is nothing there
result[mask] = numpy.nan
# unshape + untranspose from 2-d ((n_int * n_pol), n_output_channels)
# into 3-d (n_int, n_pol, n_ouput_channels)
return transpose_ch(numpy.ma.array(result.reshape((n_int, n_pol, -1)), mask=mask.reshape((n_int, n_pol, -1))))
# set chbin_fn to use reduceat()
chbin_fn = use_reduceat
else:
# not going to use reduceat() just bruteforcing over a list of slices()
# do some extra pre-processing for the simplistic approach
# it uses slice() indexing so we pre-create the slice objects for it
# for each range of channels to average we compute src and dst slice
indices = map(lambda s: (s*solchan, min((s+1)*solchan, n_chan)), bins)
slices = [(slice(i, i+1), slice(rng[0], rng[1]+1)) for i, rng in enumerate(indices)]
# for display + loopindexing create list of (array_index, "CH label") tuples
self.chanidx = list()
for (ch_idx, start_end) in enumerate(indices):
self.chanidx.append( (ch_idx, start_end[0]) )
n_slices = len(slices)
# this is the simplistic approach
def use_dumbass_method(x):
# get an output array
n_int,_,n_pol = x.shape
result = numpy.ma.empty((n_int, n_slices, n_pol), dtype=x.dtype)
for (dst_idx, src_idx) in slices:
result[:,dst_idx,:] = numpy.ma.mean(x[:,src_idx,:], axis=1, keepdims=True)
result.mask[:,dst_idx,:] = (numpy.sum(x.mask[:,src_idx,:], axis=1, keepdims=True) == 0)
return result
# and set the channel bin function to use to this'un
chbin_fn = use_dumbass_method
# Some channel averaging is to be applied so chbin_fn must not be None
if chbin_fn is None:
raise RuntimeError("chbin_fn is None whilst some channel averaging requested. Please yell at H. Verkouter (verkouter@jive.eu)")
# depending on which kind of channel averaging, we apply it to the complex data before
# producing quantities or on the scalars after computing the quantities
if avgChannel == AVG.Scalar:
self.scalarAvg = chbin_fn
else:
self.vectorAvg = chbin_fn
# Now we must come up with a strategy for organizing the data processing chain
#
# If avgTime is Vector-like then we can only compute the quantities after all data's been
# read & averaged. We've already ruled out that avgChannel == Scalar (for that makes no sense)
#
# So we may have to postpone computing of the quantities until after having collected + integrated all data
post_quantities = lambda tp, x: [(tp, x)]
org_quantities = None
if postpone:
# create data sets based on the averaged data in a dataset
org_quantities = CP(self.quantities)
post_quantities = lambda _, x: map(lambda q: (q.quantity_name, q.quantity_fn(x)), org_quantities)
self.quantities = [Quantity('raw', functional.identity)]
if self.chanidx is None or len(self.chanidx)==1:
# post_channel doesn't do nothing, self.chanidx remains a list of length 1
post_channel = lambda ch, x: [(ch, x)]
else:
# here the post_channel yields a list of extracted channels from the data 'x' coupled
# with the assigned label from self.chanidx
org_chanidx = CP(self.chanidx)
post_channel = lambda _, x: map(lambda chi: (chi[1], x[:,chi[0]]), org_chanidx)
# replace self.chanidx with a single entry which captures all channels and sets the
# associated label to None - which we could use as a sentinel, if needed
self.chanidx = [(Ellipsis, None)]
# Let's keep the channels together as long as possible. If only one channel remains then we can do it
# in our inner loop
if avgTime == AVG.None:
dataset_proto = dataset_list
else:
# depending on wether we need to solint one or more channels in one go
# loop over the current self.chanidx and count nr of channels
if self.chanidx:
nChannel = reduce(lambda acc, chi: acc + ((n_chan if chansel is Ellipsis else len(chansel)) if chi[0] is Ellipsis else 1), self.chanidx, 0)
else:
nChannel = 1
dataset_proto = dataset_solint_array if nChannel>1 else dataset_solint_scalar
# Depending on wether we have channels or not, we choose different processing routines; one
# having per-channel processing and one without
if self.chanidx is None:
# only WEIGHT column
print "WARNING: weight-per-polarization (WEIGHT column) implies only FLAG_ROW column used for flags"
fields = [AX.TYPE, AX.BL, AX.FQ, AX.SB, AX.SRC, AX.P]
columns = ["ANTENNA1", "ANTENNA2", "TIME", "DATA_DESC_ID", "FIELD_ID", "WEIGHT", "FLAG_ROW"]
self.actual_fn = self.process_weight
else:
# Note: there will /always/ be WEIGHT+FLAGCOL - either read from the table or invented
# 0 1 2 3 4 5 6
fields = [AX.TYPE, AX.BL, AX.FQ, AX.SB, AX.SRC, AX.P, AX.CH]
columns = ["ANTENNA1", "ANTENNA2", "TIME", "DATA_DESC_ID", "FIELD_ID", "WEIGHT_SPECTRUM", "FLAG_ROW", "FLAG"]
self.actual_fn = self.process_weight_spectrum
## Now we can start the reduction of the table
pts = ms2util.reducems2(self, self.table, collections.defaultdict(dataset_proto), columns, verbose=True, slicers=self.slicers, chunksize=5000)
# after the reduction's done we can put back our quantities if we did remove them before
if org_quantities is not None:
self.quantities = org_quantities
rv = {}
if self.chanidx:
for (label, dataset) in pts.iteritems():
dl = list(label)
dataset.average( avgTime )
# convert x,y to numarrays
dataset.as_numarray()
for qn,qd in post_quantities(label[0], dataset.y):
for chn,chd in post_channel(label[6], qd):
dl[0] = qn
dl[6] = chn
rv[ self.MKLAB(fields, dl) ] = dataset_fixed(dataset.x, chd)
else:
for (label, dataset) in pts.iteritems():
dl = list(label)
dataset.average( avgTime )
# convert x,y to numarrays
dataset.as_numarray()
for qn,qd in post_quantities(label[0], dataset.y):
dl[0] = qn
rv[ self.MKLAB(fields, dl) ] = dataset_fixed(dataset.x, qd)
return rv
## Here we make the plots
def __call__(self, *args):
return self.actual_fn(*args)
# handle WEIGHT = (n_int, n_pol), no FLAG data
def process_weight(self, acc, a1, a2, tm, dd, fld, weight, flag_row):
# Create masked array from the data with invalid data already masked off
data = numpy.ma.masked_invalid(weight)
# now we can easily add in flag information:
# FLAG_ROW = (n_int) do logical_or with data.mask.T [data.mask.T = (n_int,n_pol).T == (n_pol, n_int)]
# so the FLAG_ROW(n_int) will broadcast nicely across polarizations
data.mask = numpy.logical_or(data.mask.T, flag_row).T
# weight handling. A lot easier for this one :-)
data.mask = numpy.logical_or(data.mask, weight<self.threshold)
# possibly vector-average the data
data = self.vectorAvg(data)
# Now create the quantity data - map the quantity functions over the (vector averaged)
# data and, if needed, scalar average them
qd = map(lambda q: (q.quantity_name, self.scalarAvg(q.quantity_fn(data))), self.quantities)
# Transform the time stamps, if necessary
tm = self.timebin_fn(tm)
# Now we can loop over all the rows in the data
dds = self.ddSelection
# We don't have to test *IF* the current data description id is
# selected; the fact that we see it here means that it WAS selected!
# The only interesting bit is selecting the correct products
for row in range(data.shape[0]):
(fq, sb, plist) = dds[ dd[row] ]
for (pidx, pname) in plist:
l = ["", (a1[row], a2[row]), fq, sb, fld[row], pname]
for (qnm, qval) in qd:
l[0] = qnm
acc[tuple(l)].append(tm[row], qval.data[row, pidx], qval.mask[row, pidx])
return acc
# handle WEIGHT_SPECTRUM = (n_int, n_chan, n_pol)
def process_weight_spectrum(self, acc, a1, a2, tm, dd, fld, weight_spectrum, flag_row, flag):
# Create masked array from the data with invalid data already masked off
data = numpy.ma.masked_invalid(weight_spectrum)
# now we can easily add in flag information;
# flags either has shape of data or it's a single bool False
data.mask = numpy.logical_or(data.mask, self.untranspose_flag(numpy.logical_or(self.transpose_flag(flag), flag_row)))
# weight thresholding - is easier for this one!
data.mask = numpy.logical_or(data.mask, weight_spectrum<self.threshold)
# possibly vector-average the data
data = self.vectorAvg(data)
# Now create the quantity data - map the quantity functions over the (vector averaged)
# data and, if needed, scalar average them
qd = map(lambda q: (q.quantity_name, self.scalarAvg(q.quantity_fn(data))), self.quantities)
# Transform the time stamps, if necessary
tm = self.timebin_fn(tm)
# Now we can loop over all the rows in the data
dds = self.ddSelection
ci = self.chanidx
# We don't have to test *IF* the current data description id is
# selected; the fact that we see it here means that it WAS selected!
# The only interesting bit is selecting the correct products
for row in range(data.shape[0]):
(fq, sb, plist) = dds[ dd[row] ]
for (chi, chn) in ci:
for (pidx, pname) in plist:
l = ["", (a1[row], a2[row]), fq, sb, fld[row], pname, chn]
for (qnm, qval) in qd:
l[0] = qnm
acc[tuple(l)].append(tm[row], qval.data[row, chi, pidx], qval.mask[row, chi, pidx])
return acc
class uv(plotbase):
def __init__(self):
# nothing yet ...
pass
_sep = "\n\t - "
def makePlots(self, msname, selection, mapping, **kwargs):
## initialize the base class (opens table, does selection)
super(uv, self).__init__(msname, selection, mapping, **kwargs)
# warn about any averaging or channel selection
print_if("WARNING: Ignoring the following settings:"+uv._sep + uv._sep.join(
map(lambda tup: tup[0](tup[1]),
filter(lambda tup: tup[1] not in [AVG.None, None],
zip(["your channel selection".format, "solint {0}".format, "{0} avg in time".format, "nchav of {0}".format, "{0} avg in frequency".format, "weight threshold of {0}".format],
[selection.chanSel, selection.solint, selection.averageTime, selection.solchan, selection.averageChannel, selection.weightThreshold])))))
## we plot using the UVW column
## UVW is not a function of POL (it should be a function
## of CH but that would mean we'd have to actually
## do computations - yikes)
fields = [AX.TYPE, AX.BL, AX.FQ, AX.SB, AX.SRC]
## Now we can start the reduction of the table
# The base class will have set up FLAG/FLAG_ROW accessors based on wether the user
# specified reading flags or not. We can just use the transpose/untranspose functions
# and expect them to Do The Right Thing (tm)
columns = ["ANTENNA1", "ANTENNA2", "DATA_DESC_ID", "FIELD_ID", "UVW", "FLAG_ROW"]
pts = ms2util.reducems2(self, self.table, {}, columns, verbose=True, chunksize=5000)
rv = {}
for (label, dataset) in pts.iteritems():
rv[ self.MKLAB(fields, label) ] = dataset
return rv
def __call__(self, acc, a1, a2, dd, fld, uvw, row_flag):
# transform the data into a masked array with inf/nan masked off
uvw = numpy.ma.masked_invalid(uvw)
# uvw = (nrow, 3), flag_row = (nrow)
# so to broadcast row_flag across the mask we use uvw.T [shape = (3, nrow)]
uvw.mask = numpy.logical_or(uvw.mask.T, row_flag).T
# now condense the (nrow, [u,v,w], dtype=bool) array to (nrow, [uflag || vflag]) [shape: (nrow,1)]:
# one flag for the (u,v) data point: if either u or v was nan/inf or the row was
# flagged, flag that datapoint
row_flag = numpy.logical_or(uvw.mask[:,0], uvw.mask[:,1])
u = uvw.data[:,0]
v = uvw.data[:,1]
# ok, process all the rows!
for row in range(uvw.shape[0]):
(fq, sb, _plist) = self.ddSelection[ dd[row] ]
# we don't iterate over channels nor over polarizations
ds = acc.setdefault(('V', (a1[row], a2[row]), fq, sb, fld[row]), dataset_list())
f = row_flag[row]
ds.append( u[row], v[row], f)
ds.append(-u[row], -v[row], f)
return acc
## This plotter will iterate over "DATA" or "LAG_DATA"
## and produce a number of quantities per frequency, possibly averaging channels (no time avg yet)
class data_quantity_uvdist(plotbase):
# should set up a choice table based on the combination of averaging methods
# key into the lookup is '(avgChannelMethod, avgTimeMethod)'
# Also return wether the quantities must be postponed
_averaging = {
# no averaging at all, no need to postpone computing the quantity(ies)
(AVG.None, AVG.None): (avg_none, avg_none, False),
(AVG.Scalar, AVG.None): (avg_arithmetic, avg_none, False),
(AVG.Vector, AVG.None): (avg_arithmetic, avg_none, False),
(AVG.Vectornorm, AVG.None): (avg_vectornorm, avg_none, False),
(AVG.Sum, AVG.None): (avg_sum, avg_none, False),
(AVG.Vectorsum, AVG.None): (avg_sum, avg_none, False),
}
## our construct0r
## qlist = [ (quantity_name, quantity_fn), ... ]
##
def __init__(self, qlist, **kwargs):
self.quantities = list(itertools.starmap(Quantity, qlist))
# UV distance is frequency dependent
self.byFrequency = True
def makePlots(self, msname, selection, mapping, **kwargs):
# Deal with channel averaging
# Scalar => average the derived quantity
# Vector => compute average cplx number, then the quantity
avgChannel = CP(selection.averageChannel)
avgTime = CP(selection.averageTime)
solchan = CP(selection.solchan)
solint = CP(selection.solint)
timerng = CP(selection.timeRange)
## initialize the base class
super(data_quantity_uvdist, self).__init__(msname, selection, mapping, **kwargs)
# channel selection+averaging schemes; support averaging over channels (or chunks of channels)
chansel = Ellipsis #None
n_chan = self.table[0][self.datacol].shape[0]
if selection.chanSel:
channels = list(sorted(set(CP(selection.chanSel))))
max_chan = max(channels)
# if any of the indexed channels > n_chan that's an error
if max_chan>=n_chan:
raise RuntimeError("At least one selected channel ({0}) > largest channel index ({1})".format(max_chan, n_chan-1))
# also <0 is not quite acceptable
if min(channels)<0:
raise RuntimeError("Negative channel number {0} is not acceptable".format(min(channels)))
# if the user selected all channels (by selection
# 'ch 0:<last>' in stead of 'ch none' we don't
# override the default channel selection (which is more efficient)
if channels!=range(n_chan):
chansel = channels
# ignore channel averaging if only one channel specified
if (n_chan if chansel is Ellipsis else len(chansel))==1 and avgChannel != AVG.None:
print "WARNING: channel averaging method {0} ignored because only one channel selected".format( avgChannel )
avgChannel = AVG.None
# Test if the selected combination of averaging settings makes sense
setup = data_quantity_time._averaging.get((avgChannel, avgTime), None)
if setup is None:
raise RuntimeError("the combination of {0} channel + {1} time averaging is not supported".format(avgChannel, avgTime))
(avgchan_fn, avgtime_fn, postpone) = setup
# some sanity checks
if solchan is not None and avgChannel==AVG.None:
raise RuntimeError("nchav value was set without specifiying a channel averaging method; please tell me how you want them averaged")
# How integration/averaging actually is implemented is by modifying the
# time stamp. By massaging the time stamp into buckets of size
# 'solint', we influence the label of the TIME field, which will make
# all data points with the same TIME stamp be integrated into the same
# data set
self.timebin_fn = functional.identity
# currently we don't support time averaging!
# if avgTime!=AVG.None:
# if solint is None:
# # Ah. Hmm. Have to integrate different time ranges
# # Let's transform our timerng list of (start, end) intervals into
# # a list of (start, end, mid) such that we can easily map
# # all time stamps [start, end] to mid
# # If no time ranges defined at all average everything down to middle of experiment?
#
# # It is important to KNOW that "selection.timeRange" (and thus our
# # local copy 'timerng') is a list or sorted, non-overlapping time ranges
# timerng = map(lambda (s, e): (s, e, (s+e)/2.0), timerng if timerng is not None else [(mapping.timeRange.start, mapping.timeRange.end)])
#
# # try to be a bit optimized in time stamp replacement - filter the
# # list of time ranges to those applying to the time stamps we're
# # replacing
# def do_it(x):
# mi,ma = numpy.min(x), numpy.max(x)
# ranges = filter(lambda tr: not (tr[0]>ma or tr[1]<mi), timerng)
# return reduce(lambda acc, (s, e, m): numpy.put(acc, numpy.where((acc>=s) & (acc<=e)), m) or acc, ranges, x)
# self.timebin_fn = do_it
# else:
# # Check if solint isn't too small
# ti = mapping.timeRange.inttm[0]
# if solint<=ti:
# raise RuntimeError("solint value {0:.3f} is less than integration time {1:.3f}".format(solint, ti))
# self.timebin_fn = lambda x: (numpy.trunc(x/solint)*solint) + solint/2.0
# chansel now is Ellipsis (all channels) or a list of some selected channels
self.chanidx = list()
self.chanidx_fn= None
self.vectorAvg = functional.identity
self.scalarAvg = functional.identity
self.tmVectAvg = functional.identity
self.tmScalAvg = functional.identity
# the x-axis is frequency ... but we pre-convert to a multiplication factor
# 1/lambda ( nu / c = 1 / lambda) such that going from baseline length in m
# to baseline length in wavelengths is as easy as multiplying by 1 / lambda
self.freq_of_dd=CP(self.ddFreqs)
# transform all channel frequencies to 1 / lambda
# NOTE: frequencies in this mapping are in units of MHz!!!!
for dd in self.freq_of_dd.keys():
self.freq_of_dd[dd] = (ARRAY(self.freq_of_dd[dd]) * 1e6) / 299792458.0
if avgChannel==AVG.None:
# No channel averaging - the new x-axis will be the indices of the selected channels
# [0,1,...,n-1] if ellipsis else [3, 6, 12, 128] (selected channels)
self.chanidx = range(n_chan) if chansel is Ellipsis else chansel
# The vector average step will be misused to just apply the channel selection such that all selected channels
# are mapped to 0..n-1. This is only necessary in case not all channels were selected
if chansel is not Ellipsis:
self.vectorAvg = lambda x: x[:,chansel,:] # collapses selection to indices [0, 1, ..., n-1]
self.chanidx_fn = lambda dd: self.freq_of_dd[dd][chansel]
else:
self.chanidx_fn = self.freq_of_dd.__getitem__
else:
# ok channel averaging requested
chbin_fn = None
if solchan is None:
# average all selected channels down to one
# data array 'x' has shape (n_int, n_chan, n_pol)
#self.chbin_fn = lambda x: normalize_ch(1)(numpy.ma.mean(x[:,avg_over,:], axis=1, keepdims=True))
# average the selected channels according the requested averaging method
chbin_fn = lambda x: avgchan_fn(1)(x[:,chansel,:])
self.chanidx = [numpy.mean(range(n_chan) if chansel is Ellipsis else chansel)]
# transform all frequencies to an average frequency
for dd in self.freq_of_dd.keys():
self.freq_of_dd[dd] = [numpy.mean(self.freq_of_dd[dd][chansel])]
self.chanidx_fn = self.freq_of_dd.__getitem__
else:
# average bins of solchan channels down to one
if solchan > n_chan:
raise RuntimeError("request to average channels in bins of {0} channels but only {1} are available".format(solchan, n_chan))
# Create a mask which is the complement of the selected channels
# (remember: chansel == Ellipsis => all channels
ch_mask = (numpy.zeros if chansel is Ellipsis else numpy.ones)(n_chan, dtype=numpy.bool)
# only in case chansel != everything we must modify the mask
if chansel is not Ellipsis:
ch_mask[chansel] = False
# Since we're going to zap masked values (replace by 0) we can usefully use
# reduceat! So all we then need is an index array, informing reduceat what the
# reduction boundaries are!
# First up: the actual bin numbers we're interested in, we compute the actual
# start + end indices from that
bins = numpy.unique((numpy.array(chansel) if chansel is not Ellipsis else numpy.arange(0, n_chan, solchan))//solchan)
bins.sort()
# we're going to apply channel binning so we must replace 'chansel'
# by 'bins' in order for downstream accounting of how many "channels" there will
# be in the data
chansel = bins
# Detect if the bins are adjacent
adjacent_bins = (len(set(bins[1:] - bins[:-1])) == 1) if len(bins)>1 else False
chbin_fn = None
if adjacent_bins:
# we're going to use reduceat() which means it's good enough
# to generate [bin0*solchan, bin1*solchan, ..., bin<nbin-1>*solchan]
indices = CP(bins)
# generate the channel index labels for correct labelling
self.chanidx = CP(indices) #list()
# need to carefully check last entry in there; if 'last bin' < 'n_chan//solchan'
# we must add an extra final boundary or else reduceat() will add up to the end
# of the number of channels in stead of until the end of the bin ...
if bins[-1]<((n_chan-1)//solchan):
# add one more bin limit, set slice to keep only n-1 bins
keepbins = slice(0, len(indices))
indices = numpy.r_[indices, [indices[-1]+1]]
else:
keepbins = Ellipsis
# indices are in units of solchan bins so for reduceat must
# scale them back to actual channels
indices *= solchan
# This is where the magic happens
transpose_ch = operator.methodcaller('transpose', (0, 2, 1))
def use_reduceat(x):
# (n_int, n_ch, n_pol) => (n_int, n_pol, n_ch)
tmpx = transpose_ch(x)
# also we must reshape it to a 2-D array of ((n_int * n_pol), n_chan) shape orelse
# the reduceat() don't work [https://docs.scipy.org/doc/numpy/reference/generated/numpy.ufunc.reduceat.html]
# remember the dimensions for later
n_int,n_pol = tmpx.shape[:-1]
tmpx = tmpx.reshape( (n_int*n_pol, -1) )
# mask out channels that we don't want averaged, joining it
# with the mask that excludes flagged data (by the user) and/or
# whatever was weight-thresholded ...
tmpx.mask = numpy.logical_or(tmpx.mask, ch_mask)
# set all masked values to 0 such that they don't ever count towards *anything*
# e.g. suppose all channels in a bin are masked then the average should be NaN or something
# unrepresentable because there was no valid data at all
tmpx.data[tmpx.mask] = 0
# do the summation.
#print "###############################################################"
#print " use_reduceat: x.shape=",x.shape
#print " tmpx.data.shape=",tmpx.data.shape
#print " indices=",indices
#print " keepbins=",keepbins
#print "###############################################################"
result = numpy.add.reduceat(tmpx.data, indices, axis=1)[:,keepbins]
# also count the number of unmasked values that went into each point
# we may use it for averaging, definitely be using it to create the mask
counts = numpy.add.reduceat(~tmpx.mask, indices, axis=1)[:,keepbins]
# Because we do things different here than in the ordinary averaging,
# we must look at what was requested in order to mimic that behaviour
if avgchan_fn is avg_vectornorm:
# ok need to find the maximum complex number in each bin to scale it by
result /= (numpy.maximum.reduceat(numpy.abs(tmpx.data), indices, axis=1)[:,keepbins])
elif avgchan_fn in [avg_sum, avg_none]:
# no averaging/summing boils down to not doing anything
pass
else:
# ordinary arithmetic mean
result /= counts
# return masked array - can reuse the counts array by converting them
# to bool and inverting: no counts => False
mask = numpy.array(counts == 0, dtype=numpy.bool)
# set entries where counts==0 to NaN to make it explicit
# that, mathematically speaking, there is nothing there
result[mask] = numpy.nan
# unshape + untranspose from 2-d ((n_int * n_pol), n_output_channels)
# into 3-d (n_int, n_pol, n_ouput_channels)
return transpose_ch(numpy.ma.array(result.reshape((n_int, n_pol, -1)), mask=mask.reshape((n_int, n_pol, -1))))
# set chbin_fn to use reduceat()
chbin_fn = use_reduceat
else:
# not going to use reduceat() just bruteforcing over a list of slices()
# do some extra pre-processing for the simplistic approach
# it uses slice() indexing so we pre-create the slice objects for it
# for each range of channels to average we compute src and dst slice
indices = map(lambda s: (s*solchan, min((s+1)*solchan, n_chan)), bins)
slices = [(slice(i, i+1), slice(rng[0], rng[1]+1)) for i, rng in enumerate(indices)]
# for display + loopindexing create list of (array_index, "CH label") tuples
self.chanidx = CP(bins)
n_slices = len(slices)
# this is the simplistic approach
def use_dumbass_method(x):
# get an output array
n_int,_,n_pol = x.shape
result = numpy.ma.empty((n_int, n_slices, n_pol), dtype=x.dtype)
for (dst_idx, src_idx) in slices:
result[:,dst_idx,:] = numpy.ma.mean(x[:,src_idx,:], axis=1, keepdims=True)
result.mask[:,dst_idx,:] = (numpy.sum(x.mask[:,src_idx,:], axis=1, keepdims=True) == 0)
return result
# and set the channel bin function to use to this'un
chbin_fn = use_dumbass_method
# transform all frequencies to an average frequency per bin
indices_l = map(lambda s: (s*solchan, min((s+1)*solchan, n_chan)), bins)
slices_l = [(slice(i, i+1), slice(rng[0], rng[1]+1)) for i, rng in enumerate(indices_l)]
def mk_fbins(x):
result = numpy.empty((len(slices_l)))
for (dst_idx, src_idx) in slices_l:
result[dst_idx] = numpy.mean(x[src_idx])
return result
for dd in self.freq_of_dd.keys():
self.freq_of_dd[dd] = mk_fbins( ARRAY(self.freq_of_dd[dd]) )
self.chanidx_fn = self.freq_of_dd.__getitem__
# Some channel averaging is to be applied so chbin_fn must not be None
if chbin_fn is None:
raise RuntimeError("chbin_fn is None whilst some channel averaging requested. Please yell at H. Verkouter (verkouter@jive.eu)")
# depending on which kind of channel averaging, we apply it to the complex data before
# producing quantities or on the scalars after computing the quantities
if avgChannel == AVG.Scalar:
self.scalarAvg = chbin_fn
else:
self.vectorAvg = chbin_fn
if self.chanidx_fn is None:
raise RuntimeError("The self.chanidx_fn is still None! Someone (verkouter@jive.eu) forgot something. Go yell at 'im!")
# Now we must come up with a strategy for organizing the data processing chain
#
# If avgTime is Vector-like then we can only compute the quantities after all data's been
# read & averaged. We've already ruled out that avgChannel == Scalar (for that makes no sense)
#
# So we may have to postpone computing of the quantities until after having collected + integrated all data
post_quantities = lambda tp, x: [(tp, x)]
org_quantities = None
if postpone:
# create data sets based on the averaged data in a dataset
org_quantities = CP(self.quantities)
post_quantities = lambda _, x: map(lambda q: (q.quantity_name, q.quantity_fn(x)), org_quantities)
self.quantities = [Quantity('raw', functional.identity)]
## Now we can start the reduction of the table
# Note: there will /always/ be WEIGHT+FLAGCOL - either read from the table or invented
# 0 1 2 3 4 5 6
#fields = [AX.TYPE, AX.BL, AX.FQ, AX.SB, AX.SRC, AX.P, AX.CH]
fields = [AX.TYPE, AX.BL, AX.FQ, AX.SB, AX.SRC, AX.P]
columns = ["ANTENNA1", "ANTENNA2", "UVW", "DATA_DESC_ID", "FIELD_ID", "WEIGHTCOL", "FLAG_ROW", "FLAG", self.datacol]
pts = ms2util.reducems2(self, self.table, collections.defaultdict(dataset_list), columns, verbose=True, slicers=self.slicers, chunksize=5000)
# after the reduction's done we can put back our quantities if we did remove them before
if org_quantities is not None:
self.quantities = org_quantities
rv = {}
for (label, dataset) in pts.iteritems():
dl = list(label)
#dataset.average( avgTime )
# convert x,y to numarrays
dataset.as_numarray()
for qn,qd in post_quantities(label[0], dataset.y):
dl[0] = qn
rv[ self.MKLAB(fields, dl) ] = dataset_fixed(dataset.x, qd)
return rv
## Here we make the plots
def __call__(self, acc, a1, a2, uvw, dd, fld, weight, flag_row, flag, data):
# Create masked array from the data with invalid data already masked off
data = numpy.ma.masked_invalid(data)
# now we can easily add in flag information;
# flags either has shape of data or it's a single bool False
data.mask = numpy.logical_or(data.mask, self.untranspose_flag(numpy.logical_or(self.transpose_flag(flag), flag_row)))
# weight handling. It's been set up such that whatever the weight was
# (WEIGHT, WEIGHT_SPECTRUM, no weight thresholding) the following sequence
# always works
data = self.transpose(data)
data.mask = numpy.logical_or(data.mask, weight<self.threshold)
data = self.transpose(data)
# possibly vector-average the data
data = self.vectorAvg(data)
# Now create the quantity data - map the quantity functions over the (vector averaged)
# data and, if needed, scalar average them
qd = map(lambda q: (q.quantity_name, self.scalarAvg(q.quantity_fn(data))), self.quantities)
# Transform uvw column into uvw distance. Apparently ...
# older numpy's have a numpy.linalg.norm() that does NOT take an 'axis' argument
# so we have to write the distance computation out ourselves. #GVD
uvw = SQRT( ADD( SQUARE(uvw[:,0]), SQUARE(uvw[:,1]) ) )
# Now we can loop over all the rows in the data
dds = self.ddSelection
#ci = range(len(self.chanidx))
cif = self.chanidx_fn
# We don't have to test *IF* the current data description id is
# selected; the fact that we see it here means that it WAS selected!
# The only interesting bit is selecting the correct products
for row in range(data.shape[0]):
ddid = dd[row]
(fq, sb, plist) = dds[ ddid ]
for (pidx, pname) in plist:
l = ["", (a1[row], a2[row]), fq, sb, fld[row], pname, ""]
# transform projected baseline length to wavelength for the current spectral window
uvd = uvw[row] * cif(ddid)
for (qnm, qval) in qd:
l[0] = qnm
acc[tuple(l)].extend(uvd, qval.data[row,:,pidx], qval.mask[row,:,pidx])
# loop over the channels; self.chanidx = [ChX, ChY, ChZ]
#for i in ci:
# acc[tuple(l)].append(uvd[i], qval.data[row, i, pidx], qval.mask[row, i, pidx])
return acc
### This plotter will iterate over "DATA" or "LAG_DATA"
### and produce a number of quantities per data point
#class data_quantity_uvdist_old(plotbase):
#
# ## our construct0r
# ## qlist = [ (quantity_name, quantity_fn), ... ]
# ##
# ## Note that 'channel averaging' will be implemented on a per-plot
# ## basis, not at the basic type of plot instance
# def __init__(self, qlist):
# self.quantities = qlist
#
# def makePlots(self, msname, selection, mapping, **kwargs):
# datacol = CP(mapping.domain.column)
#
# # Deal with channel averaging
# # Scalar => average the derived quantity
# # Vector => compute average cplx number, then the quantity
# avgChannel = CP(selection.averageChannel)
#
# if selection.averageTime!=AVG.None:
# print "Warning: {0} time averaging ignored for this plot".format(selection.averageTime)
#
# ## initialize the base class
# super(data_quantity_uvdist, self).__init__(msname, selection, mapping, **kwargs)
#
# ## Some variables must be stored in ourselves such
# ## that they can be picked up by the callback function
# slicers = {}
#
# # For data sets with a large number of channels
# # (e.g. UniBoard data, 1024 channels spetral resolution)
# # it makes a big (speed) difference if there is a channel
# # selection to let the casa stuff [the ms column] do
# # the (pre)selection so we do not get *all* the channels
# # into casa
#
# # 1) the channel selection. it is global; ie applies to
# # every data description id.
# # also allows us to create a slicer
# # default: iterate over all channels
# shape = self.table[0][datacol].shape
# self.chunksize = 5000
# self.chanidx = zip(range(shape[0]), range(shape[0]))
# self.maskfn = lambda x: numpy.ma.MaskedArray(x, mask=numpy.ma.nomask)
# self.chansel = range(shape[0])
#
# # We must translate the selected channels to a frequency (or wavelength) - such that we can
# # compute the uvdist in wavelengths
# _spwMap = mapping.spectralMap
# ddids = _spwMap.datadescriptionIDs()
#
# # preallocate an array of dimension (nDDID, nCHAN) such that we can put
# # the frequencies of DDID #i at row i - makes for easy selectin'
# self.factors = numpy.zeros((max(ddids)+1, shape[0]))
# for ddid in ddids:
# fqobj = _spwMap.unmap( ddid )
# self.factors[ ddid ] = _spwMap.frequenciesOfFREQ_SB(fqobj.FREQID, fqobj.SUBBAND)
#
# # After having read the data, first we apply the masking function
# # which disables the unselected channels
# if selection.chanSel:
# channels = sorted(CP(selection.chanSel))
# indices = map(lambda x: x-channels[0], channels)
# self.chanidx = zip(indices, channels)
# self.chansel = indices
# # select only the selected channels
# self.factors = self.factors[:, channels]
# #print "channels=",channels," indices=",indices," self.chanidx=",self.chanidx
# self.maskfn = mk3dmask_fn_mask(self.chunksize, indices, shape[-1])
# slicers[datacol] = ms2util.mk_slicer((channels[0], 0), (channels[-1]+1, shape[-1]))
#
# # right - factors now contain *frequency*
# # divide by speed of lite to get the multiplication factor
# # to go from UV distance in meters to UV dist in lambda
# self.factors /= 299792458.0
# # older numpy's have a numpy.linalg.norm() that does NOT take an 'axis' argument
# # so we have to write the distance computation out ourselves. #GVD
# self.uvdist_f = lambda uvw: numpy.sqrt( numpy.square(uvw[...,0]) + numpy.square(uvw[...,1]) )
#
# # If there is vector averaging to be done, this is done in the step after reading the data
# # (default: none)
# self.vectorAvg = lambda x: x
#
# if avgChannel==AVG.Vector:
# self.vectorAvg = lambda x: numpy.average(x, axis=1).reshape( (x.shape[0], 1, x.shape[2]) )
# self.chanidx = [(0, '*')]
#
# # Scalar averaging is done after the quantities have been computed
# self.scalarAvg = lambda x: x
#
# if avgChannel==AVG.Scalar:
# self.scalarAvg = lambda x: numpy.average(x, axis=1).reshape( (x.shape[0], 1, x.shape[2]) )
# self.chanidx = [(0, '*')]
#
# if avgChannel!=AVG.None:
# self.factors = numpy.average(self.factors[:,self.chansel], axis=1)
#
# fields = [AX.TYPE, AX.BL, AX.FQ, AX.SB, AX.SRC, AX.P, AX.CH]
#
# # weight filtering
# self.nreject = 0
# self.reject_f = lambda weight: False
# self.threshold = -10000000
# if selection.weightThreshold is not None:
# self.threshold = CP(selection.weightThreshold)
# self.reject_f = lambda weight: weight<self.threshold
#
# ## Now we can start the reduction of the table
# ## INCORPORATE THE WEIGHT COLUMN
# if selection.weightThreshold is None:
# columns = ["ANTENNA1", "ANTENNA2", "UVW", "DATA_DESC_ID", "FIELD_ID", datacol]
# self.actual_fn = self.withoutWeightThresholding
# else:
# columns = ["ANTENNA1", "ANTENNA2", "UVW", "DATA_DESC_ID", "FIELD_ID", "WEIGHT", datacol]
# self.actual_fn = self.withWeightThresholding
# if self.flags:
# columns.append( "FLAGCOL" )
# pts = ms2util.reducems2(self, self.table, {}, columns, verbose=True, slicers=slicers, chunksize=self.chunksize)
#
# if self.nreject:
# print "Rejected ",self.nreject," points because of weight criterion"
#
# rv = {}
# for (label, dataset) in pts.iteritems():
# rv[ self.MKLAB(fields, label) ] = dataset
# #for k in rv.keys():
# # print "Plot:",str(k),"/",map(str, rv[k].keys())
# return rv
#
# ## Here we make the plots
# def __call__(self, *args):
# return self.actual_fn(*args)
#
# #### This is the version WITHOUT WEIGHT THRESHOLDING
# def withoutWeightThresholding(self, acc, a1, a2, uvw, dd, fld, data, *flag):
# #print "__call__: ",a1,a2,tm,dd,fld,data.shape
# # Make really sure we have a 3-D array of data ...
# d3d = m3d(data)
# shp = data.shape
# flg = unflagged() if not flag else flag[0]
# # Good. We have a block of data, shape (nrow, nchan, npol)
# # Step 1: apply the masking + vector averaging
# # 'vamd' = vector averaged masked data
# # Try to use the pre-computed channel mask, if it fits,
# # otherwise create one for this odd-sized block
# # (typically the last block)
# mfn = self.maskfn if shp[0]==self.chunksize else mk3dmask_fn_mask(shp[0], self.chansel, shp[2])
# vamd = self.vectorAvg( mfn(d3d) )
#
# # Now create the quantity data - map the quantity functions over the
# # (potentially) vector averaged data and (potentially) scalar
# # average them
# qd = map(lambda (qnm, qfn): (qnm, self.scalarAvg(qfn(vamd))), self.quantities)
#
# # we can compute the uv distances of all spectral points in units of lambda
# # because we have the UVW's now and the nu/speed-of-lite for all spectral points
# uvd = numpy.atleast_2d( self.factors[dd].T * self.uvdist_f(uvw) )
#
# # Now we can loop over all the rows in the data
#
# # We don't have to test *IF* the current data description id is
# # selected; the fact that we see it here means that it WAS selected!
# # The only interesting bit is selecting the correct products
# for row in range(shp[0]):
# (fq, sb, plist) = self.ddSelection[ dd[row] ]
# for (chi, chn) in self.chanidx:
# for (pidx, pname) in plist:
# l = ["", (a1[row], a2[row]), fq, sb, fld[row], pname, chn]
# for (qnm, qval) in qd:
# l[0] = qnm
# acc.setdefault(tuple(l), dataset()).append(uvd[chi, row], qval[row, chi, pidx], flg[row, chi, pidx])
# return acc
#
# #### This is the version WITH WEIGHT THRESHOLDING
# def withWeightThresholding(self, acc, a1, a2, uvw, dd, fld, weight, data, *flag):
# #print "__call__: ",a1,a2,tm,dd,fld,data.shape
# # Make really sure we have a 3-D array of data ...
# d3d = m3d(data)
# shp = data.shape
# flg = unflagged() if not flag else flag[0]
# # compute weight mask
# w3d = numpy.zeros(shp, dtype=numpy.float)
# for i in xrange(shp[0]):
# # we have weights per polzarization but we must
# # expand them to per channel ...
# cw = numpy.vstack( shp[1]*[weight[i]] )
# w3d[i] = cw
# w3m = w3d<self.threshold
# wfn = lambda a: numpy.ma.MaskedArray(a.data, numpy.logical_and(a.mask, w3m))
# # Good. We have a block of data, shape (nrow, nchan, npol)
# # Step 1: apply the masking + vector averaging
# # 'vamd' = vector averaged masked data
# # Try to use the pre-computed channel mask, if it fits,
# # otherwise create one for this odd-sized block
# # (typically the last block)
# mfn = self.maskfn if shp[0]==self.chunksize else mk3dmask_fn_mask(shp[0], self.chansel, shp[2])
# vamd = self.vectorAvg( wfn(mfn(d3d)) )
#
# # Now create the quantity data - map the quantity functions over the
# # (potentially) vector averaged data and (potentially) scalar
# # average them
# qd = map(lambda (qnm, qfn): (qnm, self.scalarAvg(qfn(vamd))), self.quantities)
#
# # compute uv distances
# uvd = self.uvdist_f(uvw)
# #for (qn, qv) in qd:
# # print qn,": shape=",qv.shape
#
# # Now we can loop over all the rows in the data
#
# # We don't have to test *IF* the current data description id is
# # selected; the fact that we see it here means that it WAS selected!
# # The only interesting bit is selecting the correct products
# for row in range(shp[0]):
# (fq, sb, plist) = self.ddSelection[ dd[row] ]
# for (chi, chn) in self.chanidx:
# for (pidx, pname) in plist:
# if self.reject_f(w3d[row, chi, pidx]):
# self.nreject = self.nreject + 1
# continue
# l = ["", (a1[row], a2[row]), fq, sb, fld[row], pname, chn]
# for (qnm, qval) in qd:
# l[0] = qnm
# #pi = self.plot_idx(l)
# #di = self.ds_idx(l)
# #print "row #",row,"/l=",l," => pi=",pi," di=",di," qval.shape=",qval.shape
# acc.setdefault(tuple(l), dataset()).append(tm[row], qval[row, chi, pidx], flag[row, chi, pidx])
# return acc
Iterators = {
'amptime' : data_quantity_time([(YTypes.amplitude, numpy.ma.abs)]),
'phatime' : data_quantity_time([(YTypes.phase, lambda x: numpy.ma.angle(x, True))]),
'anptime' : data_quantity_time([(YTypes.amplitude, numpy.ma.abs), (YTypes.phase, lambda x: numpy.ma.angle(x, True))]),
'retime' : data_quantity_time([(YTypes.real, numpy.real)]),
'imtime' : data_quantity_time([(YTypes.imag, numpy.imag)]),
'rnitime' : data_quantity_time([(YTypes.real, numpy.real), (YTypes.imag, numpy.imag)]),
'ampchan' : data_quantity_chan([(YTypes.amplitude, numpy.ma.abs)]),
'ampfreq' : data_quantity_chan([(YTypes.amplitude, numpy.ma.abs)], byFrequency=True),
'phachan' : data_quantity_chan([(YTypes.phase, lambda x: numpy.ma.angle(x, True))]),
'phafreq' : data_quantity_chan([(YTypes.phase, lambda x: numpy.ma.angle(x, True))], byFrequency=True),
'anpchan' : data_quantity_chan([(YTypes.amplitude, numpy.ma.abs), (YTypes.phase, lambda x: numpy.ma.angle(x, True))]),
'anpfreq' : data_quantity_chan([(YTypes.amplitude, numpy.ma.abs), (YTypes.phase, lambda x: numpy.ma.angle(x, True))], byFrequency=True),
'rechan' : data_quantity_chan([(YTypes.real, numpy.real)]),
'imchan' : data_quantity_chan([(YTypes.imag, numpy.imag)]),
'rnichan' : data_quantity_chan([(YTypes.real, numpy.real), (YTypes.imag, numpy.imag)]),
'wt' : weight_time(),
'uv' : uv(),
'ampuv' : data_quantity_uvdist([(YTypes.amplitude, numpy.ma.abs)])
}
|
haavee/jiveplot
|
plotiterator.py
|
Python
|
gpl-3.0
| 196,516
|
[
"GULP"
] |
0b71803d5a1c8c4d053e7ab95ba8cbcf4e3526bd22e60f06404f819ab488394c
|
# Copyright (C) 2016
# Max Planck Institute for Polymer Research & JGU Mainz
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*******************
espressopp.Particle
*******************
.. function:: espressopp.Particle(pid, storage)
:param pid:
:param storage:
:type pid:
:type storage:
"""
import _espressopp
import esutil
import pmi
from espressopp import toReal3DFromVector, toInt3DFromVector
import mpi4py.MPI as MPI
from espressopp.Exceptions import ParticleDoesNotExistHere
# Controller Particle:
# * requests are directly forwarded
# Parallel Particle:
# * throw exception if particle does not exist locally
# * otherwise do it
# Parallel Ghost Particle:
# * throw exception if particle does not exist locally
# * can not be written
# * should throw exception if data is not available
# The class _TmpParticle wraps the C++ internal pointer to a particle
# _TmpParticle should not be used, as it might die easily and will
# cause a SegFault when used after it has died.
class ParticleLocal(object):
"""The local particle.
Throws an exception:
* when the particle does not exists locally
TODO: Should throw an exception:
* when a ghost particle is to be written
* when data is to be read from a ghost that is not available
"""
def __init__(self, pid, storage):
self.pid = pid
self.storage = storage
def __getTmp(self):
return self.storage.lookupRealParticle(self.pid)
#if tmp is None:
# TODO: Exception
# raise ParticleDoesNotExistHere('pid='+str(self.pid)+' rank='+str(pmi.rank) )
#else:
# return tmp
# Defining __getattr__ will make sure that you can use any
# property defined in _TmpParticle
def __getattr__(self, key):
return getattr(self.__getTmp(), key)
# def __setattr__(self, key, value):
# return setattr(self.__getTmp(), key, value)
# The following properties are modified between Python and C++
@property
def f(self): return self.__getTmp().f
@f.setter
def f(self, val): self.__getTmp().f = toReal3DFromVector(val)
@property
def v(self): return self.__getTmp().v
@v.setter
def v(self, val): self.__getTmp().v = toReal3DFromVector(val)
@property
def pos(self): return self.__getTmp().pos
@pos.setter
def pos(self, val): self.__getTmp().pos = toReal3DFromVector(val)
@property
def type(self): return self.__getTmp().type
@type.setter
def type(self, val): self.__getTmp().type = val
@property
def mass(self): return self.__getTmp().mass
@mass.setter
def mass(self, val): self.__getTmp().mass = val
@property
def q(self): return self.__getTmp().q
@q.setter
def q(self, val): self.__getTmp().q = val
@property
def radius(self): return self.__getTmp().radius
@radius.setter
def radius(self, val): self.__getTmp().radius = val
@property
def fradius(self): return self.__getTmp().fradius
@radius.setter
def fradius(self, val): self.__getTmp().fradius = val
@property
def vradius(self): return self.__getTmp().vradius
@radius.setter
def vradius(self, val): self.__getTmp().vradius = val
@property
def imageBox(self): return self.__getTmp().imageBox
@imageBox.setter
def imageBox(self, val): self.__getTmp().imageBox = toInt3DFromVector(val)
@property
def isGhost(self): return self.__getTmp().isGhost
@isGhost.setter
def isGhost(self, val): self.__getTmp().isGhost = val
@property
def lambda_adr(self): return self.__getTmp().lambda_adr
@isGhost.setter
def lambda_adr(self, val): self.__getTmp().lambda_adr = val
@property
def drift_f(self): return self.__getTmp().drift_f
@isGhost.setter
def drift_f(self, val): self.__getTmp().drift_f = val
@property
def lambda_adrd(self): return self.__getTmp().lambda_adrd
@isGhost.setter
def lambda_adrd(self, val): self.__getTmp().lambda_adrd = val
@property
def extVar(self): return self.__getTmp().extVar
@radius.setter
def extVar(self, val): self.__getTmp().extVar = val
@property
def state(self): return self.__getTmp().state
@state.setter
def state(self, val): self.__getTmp().state = val
def getLocalData(self, key):
tmp = self.storage.lookupRealParticle(self.pid)
if tmp is not None:
return getattr(tmp, key)
else:
return None
def locateParticle(self):
tmp = self.storage.lookupRealParticle(self.pid)
return (tmp is not None)
if pmi.isController:
class Particle(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.ParticleLocal',
pmiproperty = [ "id", "storage" ]
)
@property
def node(self):
value, node = pmi.reduce(pmi.MAXLOC, self, 'locateParticle')
return node
def __getattr__(self, key):
value = pmi.reduce(pmi.MAX, self, 'getLocalData', key)
return value
|
fedepad/espressopp
|
src/Particle.py
|
Python
|
gpl-3.0
| 5,901
|
[
"ESPResSo"
] |
69024fa25c3dba28ada3401938dcc5675f6ff5c9f8392a1cb62ec7cecdbe8bb3
|
import Avogadro
import unittest
from numpy import *
class TestMolecule(unittest.TestCase):
def setUp(self):
self.molecule = Avogadro.molecules.addMolecule()
def test_fileName(self):
self.molecule.fileName = "somefile.ext"
self.assertEqual(self.molecule.fileName, "somefile.ext")
def test_addAtom(self):
# add 5 atoms
for i in range(5):
# test addAtom()
atom = self.molecule.addAtom()
self.assertEqual(atom.index, i)
self.assertEqual(atom.id, i)
self.assertEqual(atom.type, Avogadro.PrimitiveType.AtomType)
# take atom with index 2
atom = self.molecule.atom(2)
# store the id
id = atom.id
# delete it
self.molecule.removeAtom(id)
# test addAtom(id)
atom = self.molecule.addAtom(id)
self.assertEqual(atom.id, id)
def test_removeAtom(self):
# add 2 atoms
atom1 = self.molecule.addAtom()
atom2 = self.molecule.addAtom()
self.assertEqual(self.molecule.numAtoms, 2)
# test removeAtom(Atom)
self.molecule.removeAtom(atom1)
self.assertEqual(self.molecule.numAtoms, 1)
# test removeAtom(id)
self.molecule.removeAtom(atom2.id)
self.assertEqual(self.molecule.numAtoms, 0)
def test_addBond(self):
# add 5 bonds
for i in range(5):
# test addBond()
bond = self.molecule.addBond()
self.assertEqual(bond.index, i)
self.assertEqual(bond.id, i)
self.assertEqual(bond.type, Avogadro.PrimitiveType.BondType)
# take bond with index 2
bond = self.molecule.bond(2)
# store the id
id = bond.id
# delete it
self.molecule.removeBond(id)
# test addBond(id)
bond = self.molecule.addBond(id)
self.assertEqual(bond.id, id)
def test_removeBond(self):
# add 2 bonds
bond1 = self.molecule.addBond()
bond2 = self.molecule.addBond()
self.assertEqual(self.molecule.numBonds, 2)
# test removeBond(Bond)
self.molecule.removeBond(bond1)
self.assertEqual(self.molecule.numBonds, 1)
# test removeBond(id)
self.molecule.removeBond(bond2.id)
self.assertEqual(self.molecule.numBonds, 0)
def test_addHydrogens(self):
atom = self.molecule.addAtom()
atom.atomicNumber = 6
self.molecule.addHydrogens(atom)
self.assertEqual(self.molecule.numAtoms, 5)
def test_removeHydrogens(self):
atom = self.molecule.addAtom()
atom.atomicNumber = 6
self.molecule.addHydrogens(atom)
self.assertEqual(self.molecule.numAtoms, 5)
self.molecule.removeHydrogens(atom)
self.assertEqual(self.molecule.numAtoms, 1)
def test_calculatePartialCharges(self):
print("FIXME: implement test_calculate_partialCharges")
def test_addCube(self):
for i in range(5):
cube = self.molecule.addCube()
self.assertEqual(cube.index, i)
self.assertEqual(cube.id, i)
self.assertEqual(cube.type, Avogadro.PrimitiveType.CubeType)
def test_removeCube(self):
# add 2 cubes
cube1 = self.molecule.addCube()
cube2 = self.molecule.addCube()
self.assertEqual(self.molecule.numCubes, 2)
# test removeCube(Cube)
self.molecule.removeCube(cube1)
self.assertEqual(self.molecule.numCubes, 1)
# test removeCube(id)
self.molecule.removeCube(cube2.id)
self.assertEqual(self.molecule.numCubes, 0)
def test_addResidue(self):
for i in range(5):
residue = self.molecule.addResidue()
self.assertEqual(residue.index, i)
self.assertEqual(residue.id, i)
self.assertEqual(residue.type, Avogadro.PrimitiveType.ResidueType)
def test_removeResidue(self):
# add 2 residues
residue1 = self.molecule.addResidue()
residue2 = self.molecule.addResidue()
self.assertEqual(self.molecule.numResidues, 2)
# test removeResidue(Residue)
self.molecule.removeResidue(residue1)
self.assertEqual(self.molecule.numResidues, 1)
# test removeResidue(id)
self.molecule.removeResidue(residue2.id)
self.assertEqual(self.molecule.numResidues, 0)
def test_addRing(self):
for i in range(5):
ring = self.molecule.addRing()
self.assertEqual(ring.index, i)
self.assertEqual(ring.id, i)
self.assertEqual(ring.type, Avogadro.PrimitiveType.FragmentType)
def test_removeRing(self):
# add 2 rings
ring1 = self.molecule.addRing()
ring2 = self.molecule.addRing()
self.assertEqual(self.molecule.numRings, 2)
# test removeRing(Ring)
self.molecule.removeRing(ring1)
self.assertEqual(self.molecule.numRings, 1)
# test removeRing(id)
self.molecule.removeRing(ring2.id)
self.assertEqual(self.molecule.numRings, 0)
def test_numXXX(self):
for i in range(5):
self.molecule.addAtom()
self.molecule.addBond()
self.molecule.addResidue()
self.molecule.addCube()
self.molecule.addRing()
self.assertEqual(self.molecule.numAtoms, 5)
self.assertEqual(self.molecule.numBonds, 5)
self.assertEqual(self.molecule.numResidues, 5)
self.assertEqual(self.molecule.numCubes, 5)
self.assertEqual(self.molecule.numRings, 5)
def test_atom(self):
# add 3 atoms
for i in range(3):
self.molecule.addAtom()
# delete the 2nd
self.molecule.removeAtom(1)
# atom with index 1 should now have id 2
self.assertEqual(self.molecule.atom(1).id, 2)
def test_atomById(self):
# add 3 atoms
for i in range(3):
self.molecule.addAtom()
# delete the 2nd
self.molecule.removeAtom(1)
# atom with id 2 should now have inex 1
self.assertEqual(self.molecule.atomById(2).index, 1)
def test_bond(self):
# add 3 bonds
for i in range(3):
self.molecule.addBond()
# delete the 2nd
self.molecule.removeBond(1)
# bond with index 1 should now have id 2
self.assertEqual(self.molecule.bond(1).id, 2)
def test_bond_id(self):
# add 10 atoms & bonds
for i in range(10):
self.molecule.addAtom()
self.molecule.addBond()
# add a bonds
bond = self.molecule.addBond()
# set the atoms to id 4 & 5, order 1
bond.setAtoms(4, 5, 1)
# test molecule.bond(id, id)
self.assertEqual(self.molecule.bond(4, 5).id, bond.id)
def test_bond_Bond(self):
# add 10 atoms & bonds
for i in range(10):
self.molecule.addAtom()
self.molecule.addBond()
# get atom with id 4 & 5
atom4 = self.molecule.atomById(4)
atom5 = self.molecule.atomById(5)
# add a bonds
bond = self.molecule.addBond()
# set the atoms to id 4 & 5, order 1
bond.setAtoms(atom4.id, atom5.id, 1)
# test molecule.bond(id, id)
self.assertEqual(self.molecule.bond(atom4, atom5).id, bond.id)
def test_bondById(self):
# add 3 bonds
for i in range(3):
self.molecule.addBond()
# delete the 2nd
self.molecule.removeBond(1)
# bond with id 2 should now have inex 1
self.assertEqual(self.molecule.bondById(2).index, 1)
def test_residue(self):
# add 3 residues
for i in range(3):
self.molecule.addResidue()
# delete the 2nd
self.molecule.removeResidue(1)
# residue with index 1 should now have id 2
self.assertEqual(self.molecule.residue(1).id, 2)
def test_residueById(self):
# add 3 residues
for i in range(3):
self.molecule.addResidue()
# delete the 2nd
self.molecule.removeResidue(1)
# residue with id 2 should now have inex 1
self.assertEqual(self.molecule.residueById(2).index, 1)
def test_atoms(self):
# add 10 atoms
for i in range(10):
self.molecule.addAtom()
i = 0
for atom in self.molecule.atoms:
self.assertEqual(atom.type, Avogadro.PrimitiveType.AtomType)
self.assertEqual(atom.id, i)
i += 1
def test_bonds(self):
# add 10 bonds
for i in range(10):
self.molecule.addBond()
i = 0
for bond in self.molecule.bonds:
self.assertEqual(bond.type, Avogadro.PrimitiveType.BondType)
self.assertEqual(bond.id, i)
i += 1
def test_cubes(self):
# add 10 cubes
for i in range(10):
self.molecule.addCube()
i = 0
for cube in self.molecule.cubes:
self.assertEqual(cube.type, Avogadro.PrimitiveType.CubeType)
self.assertEqual(cube.id, i)
i += 1
def test_residues(self):
# add 10 residues
for i in range(10):
self.molecule.addResidue()
i = 0
for residue in self.molecule.residues:
self.assertEqual(residue.type, Avogadro.PrimitiveType.ResidueType)
self.assertEqual(residue.id, i)
i += 1
def test_rings(self):
# add 10 rings
for i in range(10):
self.molecule.addAtom()
i = 0
for ring in self.molecule.rings:
self.assertEqual(ring.type, Avogadro.PrimitiveType.FragmentType)
self.assertEqual(ring.id, i)
i += 1
def test_clear(self):
for i in range(5):
self.molecule.addAtom()
self.molecule.addBond()
self.molecule.addResidue()
self.molecule.addCube()
self.molecule.addRing()
self.molecule.clear()
self.assertEqual(self.molecule.numAtoms, 0)
self.assertEqual(self.molecule.numBonds, 0)
self.assertEqual(self.molecule.numResidues, 0)
self.assertEqual(self.molecule.numCubes, 0)
self.assertEqual(self.molecule.numRings, 0)
def test_center(self):
atom1 = self.molecule.addAtom()
atom2 = self.molecule.addAtom()
atom1.pos = array([1.0, 2.0, 3.0])
atom2.pos = array([4.0, 5.0, 6.0])
# compute the center
center = (atom1.pos + atom2.pos) / 2.0
self.assertEqual(self.molecule.center[0], center[0])
self.assertEqual(self.molecule.center[1], center[1])
self.assertEqual(self.molecule.center[2], center[2])
def test_normalVector(self):
atom1 = self.molecule.addAtom()
atom2 = self.molecule.addAtom()
atom1.pos = array([1.0, 2.0, 3.0])
atom2.pos = array([4.0, 5.0, 6.0])
n = self.molecule.normalVector
# just check we got an array with size 3
self.assertEqual(len(n), 3)
def test_radius(self):
# just check the method is there
self.molecule.radius
def test_farthestAtom(self):
# just check the method is there
self.molecule.farthestAtom
def test_translate(self):
print("FIXME: Molecule::translate(Eigen::Vector3d isn't implemented)")
# just check the method is there and accepts the array
atom = self.molecule.addAtom()
vec = array([1., 2., 3.])
self.molecule.translate(vec)
if __name__ == "__main__":
unittest.main()
|
rcplane/periodicdisplay
|
reference/avogadro/libavogadro/src/python/unittest/molecule.py
|
Python
|
gpl-2.0
| 10,475
|
[
"Avogadro"
] |
d73e105372494c913ce3430d327decdb9d8dacf05070eaad00d69f5e33395aee
|
#
# Copyright (C) 2007, Mark Lee
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision: 473 $
# $Date: 2009-01-29 22:50:12 -0500 (Thu, 29 Jan 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/agent/AgentLoader.py $
import sys
import os
import rlglue.network.Network as Network
from ClientAgent import ClientAgent
from rlglue.versions import get_svn_codec_version
from rlglue.versions import get_codec_version
def loadAgent(theAgent):
theSVNVersion=get_svn_codec_version()
theCodecVersion=get_codec_version()
client = ClientAgent(theAgent)
host = Network.kLocalHost
port = Network.kDefaultPort
hostString = os.getenv("RLGLUE_HOST")
portString = os.getenv("RLGLUE_PORT")
if (hostString != None):
host = hostString
try:
port = int(portString)
except TypeError:
port = Network.kDefaultPort
print "RL-Glue Python Agent Codec Version: "+theCodecVersion+" (Build "+theSVNVersion+")"
print "\tConnecting to " + host + " on port " + str(port) + "..."
sys.stdout.flush()
client.connect(host, port, Network.kRetryTimeout)
print "\t Agent Codec Connected"
client.runAgentEventLoop()
client.close()
def loadAgentLikeScript():
agentModule = __import__(sys.argv[1])
agentClass = getattr(agentModule,sys.argv[1])
agent = agentClass()
client = ClientAgent(agent)
loadAgent(agent)
|
shiwalimohan/RLInfiniteMario
|
system/codecs/Python/src/rlglue/agent/AgentLoader.py
|
Python
|
gpl-2.0
| 1,942
|
[
"Brian"
] |
4e3b828370ef1a88d134d3b75594bdf94c3c30304156bfbbf19f6cb91e75644d
|
"""
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Helper methods to locate asset files."""
import collections
import logging
import os
import shutil
import tempfile
from typing import Callable, Dict, Optional
_MODULE_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
_SCENES_DIR_COMPONENT = 'adept_envs'
def get_asset_path(path: str):
"""Returns the absolute path of the given fully-qualified resource path.
Example:
>>> get_asset_path('adept_envs/dclaw/assets/')
Args:
path: The path to the resource, with components separated by slashes.
"""
if path.startswith('adept_models'):
asset_path = os.path.join(_MODULE_DIR, _SCENES_DIR_COMPONENT, path)
elif path.startswith('adept_envs'):
asset_path = os.path.join(_MODULE_DIR, path)
else:
raise ValueError('Unknown path root: ' + path)
asset_path = os.path.normpath(asset_path)
return asset_path
def get_resource(path: str, mode: str = 'rb'):
"""Returns the contents of the given resource file path."""
# GOOGLE3-ONLY from google3.pyglib import resources
# GOOGLE3-ONLY return resources.GetResource(path)
# GOOGLE3-ONLY # Ignore below - used outside google3.
with open(path, mode=mode) as f:
return f.read()
class AssetBundle:
"""Represents a bundle of assets files"""
def __init__(self,
dest_path: Optional[str] = None,
resource_fn: Callable[[str], bytes] = get_resource,
dry_run: bool = False,
verbose: bool = False):
"""Creates a new asset bundle.
Args:
dest_path: The destination directory to copy the bundle to.
resource_fn: The function used to get the contents of the file.
dry_run: If True, does not write files to the destination.
verbose: If True, logs copied files.
"""
self._resource_fn = resource_fn
self._dry_run = dry_run
self._verbose = verbose
self._copied_resources = collections.OrderedDict()
self._needs_cleanup = False
if dest_path is None and not dry_run:
dest_path = tempfile.mkdtemp()
self._needs_cleanup = True
self._dest_path = dest_path or ''
@property
def copied_paths(self) -> Dict[str, str]:
"""Returns the copied resource paths."""
return self._copied_resources
def cleanup(self):
"""Removes the temporary directory."""
if self._needs_cleanup and self._dest_path:
shutil.rmtree(self._dest_path)
self._needs_cleanup = False
def add_mujoco(self, main_path: str) -> str:
"""Adds the given MuJoCo XML file to the bundle."""
from xml.etree import ElementTree as etree
main_path = os.path.normpath(main_path)
main_dir = os.path.dirname(main_path)
directory_context = {
'mesh': main_dir,
'texture': main_dir,
}
# Traverse the XML tree depth-first.
node_stack = []
node_stack.append((directory_context, main_path))
while node_stack:
directories, file_path = node_stack.pop()
base_dir = os.path.dirname(file_path)
xml_contents = self._copy_asset(file_path)
node = etree.fromstring(xml_contents)
children = []
# Update the directories if a compiler tag is present.
for child in node.iter('compiler'):
if 'meshdir' in child.attrib:
directories['mesh'] = os.path.join(base_dir,
child.attrib['meshdir'])
if 'texturedir' in child.attrib:
directories['texture'] = os.path.join(
base_dir, child.attrib['texturedir'])
for child in node.iter():
# Resolve mesh and texture children with file tags.
if child.tag in directories:
if 'file' in child.attrib:
asset_path = os.path.join(directories[child.tag],
child.attrib['file'])
if asset_path not in self._copied_resources:
self._copy_asset(asset_path)
# Traverse includes.
elif child.tag == 'include':
child_path = os.path.join(base_dir, child.attrib['file'])
children.append((directories.copy(), child_path))
# Traverse children in visit order.
node_stack.extend(reversed(children))
return self._copied_resources[main_path]
def _copy_asset(self, asset_path: str) -> bytes:
"""Copies an asset and returns its contents."""
assert not asset_path.startswith('/'), asset_path
asset_path = os.path.normpath(asset_path)
if self._verbose:
logging.info('Found asset: %s', asset_path)
contents = self._resource_fn(asset_path)
# Copy the asset to the destination.
if asset_path not in self._copied_resources:
copy_path = os.path.join(self._dest_path, asset_path)
if not self._dry_run:
self._write_asset(copy_path, contents)
self._copied_resources[asset_path] = copy_path
return contents
def _write_asset(self, write_path: str, contents: bytes):
"""Writes the contents to the given path."""
copy_dir = os.path.dirname(write_path)
if not os.path.isdir(copy_dir):
os.makedirs(copy_dir)
with open(write_path, 'wb') as f:
f.write(contents)
def __enter__(self):
"""Enables use as a context manager."""
return self
def __exit__(self, *args):
"""Enables use as a context manager."""
self.cleanup()
|
google-research/DBAP-simulation
|
adept_envs/utils/resources.py
|
Python
|
apache-2.0
| 6,407
|
[
"VisIt"
] |
3fe1cce580a038f6acfadfde6a69b23859388f1cb7dd41353165f0c554b1bf87
|
# .. coding: utf-8
# $Id: __init__.py 7971 2016-09-13 19:11:48Z milde $
# Author: Engelbert Gruber, Günter Milde
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""LaTeX2e document tree Writer."""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # i.e. ##.
import sys
import os
import time
import re
import string
import urllib
try:
import roman
except ImportError:
import docutils.utils.roman as roman
from docutils import frontend, nodes, languages, writers, utils, io
from docutils.utils.error_reporting import SafeString
from docutils.transforms import writer_aux
from docutils.utils.math import pick_math_environment, unichar2tex
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
default_template = 'default.tex'
default_template_path = os.path.dirname(os.path.abspath(__file__))
default_preamble = '\n'.join([r'% PDF Standard Fonts',
r'\usepackage{mathptmx} % Times',
r'\usepackage[scaled=.90]{helvet}',
r'\usepackage{courier}'])
table_style_values = ('standard', 'booktabs','nolines', 'borderless',
'colwidths-auto', 'colwidths-given')
settings_spec = (
'LaTeX-Specific Options',
None,
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default is "a4paper".',
['--documentoptions'],
{'default': 'a4paper', }),
('Footnotes with numbers/symbols by Docutils. (default)',
['--docutils-footnotes'],
{'default': True, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use \\cite command for citations. ',
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for citations '
'(might get mixed with real figures). (default)',
['--figure-citations'],
{'dest': 'use_latex_citations', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify LaTeX packages/stylesheets. '
' A style is referenced with \\usepackage if extension is '
'".sty" or omitted and with \\input else. '
' Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'default': '', 'metavar': '<file[,file,...]>',
'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of LaTeX packages/stylesheets. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output *.tex file. ',
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list}),
('Link to the stylesheet(s) in the output file. (default)',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Embed the stylesheet(s) in the output file. '
'Stylesheets must be accessible during processing. ',
['--embed-stylesheet'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: "."',
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': ['.']}),
('Customization by LaTeX code in the preamble. '
'Default: select PDF standard fonts (Times, Helvetica, Courier).',
['--latex-preamble'],
{'default': default_preamble}),
('Specify the template file. Default: "%s".' % default_template,
['--template'],
{'default': default_template, 'metavar': '<file>'}),
('Table of contents by LaTeX. (default) ',
['--use-latex-toc'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table of contents by Docutils (without page numbers). ',
['--use-docutils-toc'],
{'dest': 'use_latex_toc', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Add parts on top of the section hierarchy.',
['--use-part-section'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Attach author and date to the document info table. (default) ',
['--use-docutils-docinfo'],
{'dest': 'use_latex_docinfo', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Attach author and date to the document title.',
['--use-latex-docinfo'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
("Typeset abstract as topic. (default)",
['--topic-abstract'],
{'dest': 'use_latex_abstract', 'action': 'store_false',
'validator': frontend.validate_boolean}),
("Use LaTeX abstract environment for the document's abstract. ",
['--use-latex-abstract'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "false" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Additional options to the "hyperref" package '
'(default: "").',
['--hyperref-options'], {'default': ''}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii"). Default: disabled.',
['--compound-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. '
'This is the default.',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators.'
'Default: disabled.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. '
'This is the default.',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default is "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possibile, use the specified environment for literal-blocks. '
'Default is quoting of whitespace and special chars.',
['--literal-block-env'],
{'default': ''}),
('When possibile, use verbatim for literal-blocks. '
'Compatibility alias for "--literal-block-env=verbatim".',
['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "borderless". '
'Default: "standard"',
['--table-style'],
{'default': ['standard'],
'metavar': '<format>',
'action': 'append',
'validator': frontend.validate_comma_separated_list,
'choices': table_style_values}),
('LaTeX graphicx package option. '
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "", "T1" (default), "OT1", "LGR,T1" or '
'any other combination of options to the `fontenc` package. ',
['--font-encoding'],
{'default': 'T1'}),
('Per default the latex-writer puts the reference title into '
'hyperreferences. Specify "ref*" or "pageref*" to get the section '
'number or the page number.',
['--reference-label'],
{'default': None, }),
('Specify style and database for bibtex, for example '
'"--use-bibtex=mystyle,mydb1,mydb2".',
['--use-bibtex'],
{'default': None, }),
),)
settings_defaults = {'sectnum_depth': 0 # updated by SectNum transform
}
config_section = 'latex2e writer'
config_section_dependencies = ('writers',)
head_parts = ('head_prefix', 'requirements', 'latex_preamble',
'stylesheet', 'fallbacks', 'pdfsetup',
'title', 'subtitle', 'titledata')
visitor_attributes = head_parts + ('body_pre_docinfo', 'docinfo',
'dedication', 'abstract', 'body')
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
# Override parent method to add latex-specific transforms
def get_transforms(self):
return writers.Writer.get_transforms(self) + [
# Convert specific admonitions to generic one
writer_aux.Admonitions,
# TODO: footnote collection transform
]
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
# copy parts
for part in self.visitor_attributes:
setattr(self, part, getattr(visitor, part))
# get template string from file
try:
template_file = open(self.document.settings.template, 'rb')
except IOError:
template_file = open(os.path.join(self.default_template_path,
self.document.settings.template), 'rb')
template = string.Template(unicode(template_file.read(), 'utf-8'))
template_file.close()
# fill template
self.assemble_parts() # create dictionary of parts
self.output = template.substitute(self.parts)
def assemble_parts(self):
"""Assemble the `self.parts` dictionary of output fragments."""
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
lines = getattr(self, part)
if part in self.head_parts:
if lines:
lines.append('') # to get a trailing newline
self.parts[part] = '\n'.join(lines)
else:
# body contains inline elements, so join without newline
self.parts[part] = ''.join(lines)
class Babel(object):
"""Language specifics for LaTeX."""
# TeX (babel) language names:
# ! not all of these are supported by Docutils!
#
# based on LyX' languages file with adaptions to `BCP 47`_
# (http://www.rfc-editor.org/rfc/bcp/bcp47.txt) and
# http://www.tug.org/TUGboat/Articles/tb29-3/tb93miklavec.pdf
# * the key without subtags is the default
# * case is ignored
# cf. http://docutils.sourceforge.net/docs/howto/i18n.html
# http://www.w3.org/International/articles/language-tags/
# and http://www.iana.org/assignments/language-subtag-registry
language_codes = {
# code TeX/Babel-name comment
'af': 'afrikaans',
'ar': 'arabic',
# 'be': 'belarusian',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
# 'cop': 'coptic',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'de': 'ngerman', # new spelling (de_1996)
'de-1901': 'german', # old spelling
'de-AT': 'naustrian',
'de-AT-1901': 'austrian',
'dsb': 'lowersorbian',
'el': 'greek', # monotonic (el-monoton)
'el-polyton': 'polutonikogreek',
'en': 'english', # TeX' default language
'en-AU': 'australian',
'en-CA': 'canadian',
'en-GB': 'british',
'en-NZ': 'newzealand',
'en-US': 'american',
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
# 'fa': 'farsi',
'fi': 'finnish',
'fr': 'french',
'fr-CA': 'canadien',
'ga': 'irish', # Irish Gaelic
# 'grc': # Ancient Greek
'grc-ibycus': 'ibycus', # Ibycus encoding
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hsb': 'uppersorbian',
'hu': 'magyar',
'ia': 'interlingua',
'id': 'bahasai', # Bahasa (Indonesian)
'is': 'icelandic',
'it': 'italian',
'ja': 'japanese',
'kk': 'kazakh',
'la': 'latin',
'lt': 'lithuanian',
'lv': 'latvian',
'mn': 'mongolian', # Mongolian, Cyrillic script (mn-cyrl)
'ms': 'bahasam', # Bahasa (Malay)
'nb': 'norsk', # Norwegian Bokmal
'nl': 'dutch',
'nn': 'nynorsk', # Norwegian Nynorsk
'no': 'norsk', # Norwegian (Bokmal)
'pl': 'polish',
'pt': 'portuges',
'pt-BR': 'brazil',
'ro': 'romanian',
'ru': 'russian',
'se': 'samin', # North Sami
'sh-Cyrl': 'serbianc', # Serbo-Croatian, Cyrillic script
'sh-Latn': 'serbian', # Serbo-Croatian, Latin script see also 'hr'
'sk': 'slovak',
'sl': 'slovene',
'sq': 'albanian',
'sr': 'serbianc', # Serbian, Cyrillic script (contributed)
'sr-Latn': 'serbian', # Serbian, Latin script
'sv': 'swedish',
# 'th': 'thai',
'tr': 'turkish',
'uk': 'ukrainian',
'vi': 'vietnam',
# zh-Latn: Chinese Pinyin
}
# normalize (downcase) keys
language_codes = dict([(k.lower(), v) for (k,v) in language_codes.items()])
warn_msg = 'Language "%s" not supported by LaTeX (babel)'
# "Active characters" are shortcuts that start a LaTeX macro and may need
# escaping for literals use. Characters that prevent literal use (e.g.
# starting accent macros like "a -> ä) will be deactivated if one of the
# defining languages is used in the document.
# Special cases:
# ~ (tilde) -- used in estonian, basque, galician, and old versions of
# spanish -- cannot be deactivated as it denotes a no-break space macro,
# " (straight quote) -- used in albanian, austrian, basque
# brazil, bulgarian, catalan, czech, danish, dutch, estonian,
# finnish, galician, german, icelandic, italian, latin, naustrian,
# ngerman, norsk, nynorsk, polish, portuges, russian, serbian, slovak,
# slovene, spanish, swedish, ukrainian, and uppersorbian --
# is escaped as ``\textquotedbl``.
active_chars = {# TeX/Babel-name: active characters to deactivate
# 'breton': ':;!?' # ensure whitespace
# 'esperanto': '^',
# 'estonian': '~"`',
# 'french': ':;!?' # ensure whitespace
'galician': '.<>', # also '~"'
# 'magyar': '`', # for special hyphenation cases
'spanish': '.<>', # old versions also '~'
# 'turkish': ':!=' # ensure whitespace
}
def __init__(self, language_code, reporter=None):
self.reporter = reporter
self.language = self.language_name(language_code)
self.otherlanguages = {}
def __call__(self):
"""Return the babel call with correct options and settings"""
languages = sorted(self.otherlanguages.keys())
languages.append(self.language or 'english')
self.setup = [r'\usepackage[%s]{babel}' % ','.join(languages)]
# Deactivate "active characters"
shorthands = []
for c in ''.join([self.active_chars.get(l, '') for l in languages]):
if c not in shorthands:
shorthands.append(c)
if shorthands:
self.setup.append(r'\AtBeginDocument{\shorthandoff{%s}}'
% ''.join(shorthands))
# Including '~' in shorthandoff prevents its use as no-break space
if 'galician' in languages:
self.setup.append(r'\deactivatetilden % restore ~ in Galician')
if 'estonian' in languages:
self.setup.extend([r'\makeatletter',
r' \addto\extrasestonian{\bbl@deactivate{~}}',
r'\makeatother'])
if 'basque' in languages:
self.setup.extend([r'\makeatletter',
r' \addto\extrasbasque{\bbl@deactivate{~}}',
r'\makeatother'])
if (languages[-1] == 'english' and
'french' in self.otherlanguages.keys()):
self.setup += ['% Prevent side-effects if French hyphenation '
'patterns are not loaded:',
r'\frenchbsetup{StandardLayout}',
r'\AtBeginDocument{\selectlanguage{%s}'
r'\noextrasfrench}' % self.language]
return '\n'.join(self.setup)
def language_name(self, language_code):
"""Return TeX language name for `language_code`"""
for tag in utils.normalize_language_tag(language_code):
try:
return self.language_codes[tag]
except KeyError:
pass
if self.reporter is not None:
self.reporter.warning(self.warn_msg % language_code)
return ''
def get_language(self):
# Obsolete, kept for backwards compatibility with Sphinx
return self.language
# Building blocks for the latex preamble
# --------------------------------------
class SortableDict(dict):
"""Dictionary with additional sorting methods
Tip: use key starting with with '_' for sorting before small letters
and with '~' for sorting after small letters.
"""
def sortedkeys(self):
"""Return sorted list of keys"""
keys = self.keys()
keys.sort()
return keys
def sortedvalues(self):
"""Return list of values sorted by keys"""
return [self[key] for key in self.sortedkeys()]
# PreambleCmds
# `````````````
# A container for LaTeX code snippets that can be
# inserted into the preamble if required in the document.
#
# .. The package 'makecmds' would enable shorter definitions using the
# \providelength and \provideenvironment commands.
# However, it is pretty non-standard (texlive-latex-extra).
class PreambleCmds(object):
"""Building blocks for the latex preamble."""
PreambleCmds.abstract = r"""
% abstract title
\providecommand*{\DUtitleabstract}[1]{\centering\textbf{#1}}"""
PreambleCmds.admonition = r"""
% admonition (specially marked topic)
\providecommand{\DUadmonition}[2][class-arg]{%
% try \DUadmonition#1{#2}:
\ifcsname DUadmonition#1\endcsname%
\csname DUadmonition#1\endcsname{#2}%
\else
\begin{center}
\fbox{\parbox{0.9\linewidth}{#2}}
\end{center}
\fi
}"""
PreambleCmds.align_center = r"""
\makeatletter
\@namedef{DUrolealign-center}{\centering}
\makeatother
"""
## PreambleCmds.caption = r"""% configure caption layout
## \usepackage{caption}
## \captionsetup{singlelinecheck=false}% no exceptions for one-liners"""
PreambleCmds.color = r"""\usepackage{color}"""
PreambleCmds.docinfo = r"""
% docinfo (width of docinfo table)
\DUprovidelength{\DUdocinfowidth}{0.9\linewidth}"""
# PreambleCmds.docinfo._depends = 'providelength'
PreambleCmds.dedication = r"""
% dedication topic
\providecommand{\DUtopicdedication}[1]{\begin{center}#1\end{center}}"""
PreambleCmds.error = r"""
% error admonition title
\providecommand*{\DUtitleerror}[1]{\DUtitle{\color{red}#1}}"""
# PreambleCmds.errortitle._depends = 'color'
PreambleCmds.fieldlist = r"""
% fieldlist environment
\ifthenelse{\isundefined{\DUfieldlist}}{
\newenvironment{DUfieldlist}%
{\quote\description}
{\enddescription\endquote}
}{}"""
PreambleCmds.float_settings = r"""\usepackage{float} % float configuration
\floatplacement{figure}{H} % place figures here definitely"""
PreambleCmds.footnotes = r"""% numeric or symbol footnotes with hyperlinks
\providecommand*{\DUfootnotemark}[3]{%
\raisebox{1em}{\hypertarget{#1}{}}%
\hyperlink{#2}{\textsuperscript{#3}}%
}
\providecommand{\DUfootnotetext}[4]{%
\begingroup%
\renewcommand{\thefootnote}{%
\protect\raisebox{1em}{\protect\hypertarget{#1}{}}%
\protect\hyperlink{#2}{#3}}%
\footnotetext{#4}%
\endgroup%
}"""
PreambleCmds.graphicx_auto = r"""% Check output format
\ifx\pdftexversion\undefined
\usepackage{graphicx}
\else
\usepackage[pdftex]{graphicx}
\fi"""
PreambleCmds.highlight_rules = r"""% basic code highlight:
\providecommand*\DUrolecomment[1]{\textcolor[rgb]{0.40,0.40,0.40}{#1}}
\providecommand*\DUroledeleted[1]{\textcolor[rgb]{0.40,0.40,0.40}{#1}}
\providecommand*\DUrolekeyword[1]{\textbf{#1}}
\providecommand*\DUrolestring[1]{\textit{#1}}"""
PreambleCmds.inline = r"""
% inline markup (custom roles)
% \DUrole{#1}{#2} tries \DUrole#1{#2}
\providecommand*{\DUrole}[2]{%
\ifcsname DUrole#1\endcsname%
\csname DUrole#1\endcsname{#2}%
\else% backwards compatibility: try \docutilsrole#1{#2}
\ifcsname docutilsrole#1\endcsname%
\csname docutilsrole#1\endcsname{#2}%
\else%
#2%
\fi%
\fi%
}"""
PreambleCmds.legend = r"""
% legend environment
\ifthenelse{\isundefined{\DUlegend}}{
\newenvironment{DUlegend}{\small}{}
}{}"""
PreambleCmds.lineblock = r"""
% lineblock environment
\DUprovidelength{\DUlineblockindent}{2.5em}
\ifthenelse{\isundefined{\DUlineblock}}{
\newenvironment{DUlineblock}[1]{%
\list{}{\setlength{\partopsep}{\parskip}
\addtolength{\partopsep}{\baselineskip}
\setlength{\topsep}{0pt}
\setlength{\itemsep}{0.15\baselineskip}
\setlength{\parsep}{0pt}
\setlength{\leftmargin}{#1}}
\raggedright
}
{\endlist}
}{}"""
# PreambleCmds.lineblock._depends = 'providelength'
PreambleCmds.linking = r"""
%% hyperlinks:
\ifthenelse{\isundefined{\hypersetup}}{
\usepackage[%s]{hyperref}
\usepackage{bookmark}
\urlstyle{same} %% normal text font (alternatives: tt, rm, sf)
}{}"""
PreambleCmds.minitoc = r"""%% local table of contents
\usepackage{minitoc}"""
PreambleCmds.optionlist = r"""
% optionlist environment
\providecommand*{\DUoptionlistlabel}[1]{\bf #1 \hfill}
\DUprovidelength{\DUoptionlistindent}{3cm}
\ifthenelse{\isundefined{\DUoptionlist}}{
\newenvironment{DUoptionlist}{%
\list{}{\setlength{\labelwidth}{\DUoptionlistindent}
\setlength{\rightmargin}{1cm}
\setlength{\leftmargin}{\rightmargin}
\addtolength{\leftmargin}{\labelwidth}
\addtolength{\leftmargin}{\labelsep}
\renewcommand{\makelabel}{\DUoptionlistlabel}}
}
{\endlist}
}{}"""
# PreambleCmds.optionlist._depends = 'providelength'
PreambleCmds.providelength = r"""
% providelength (provide a length variable and set default, if it is new)
\providecommand*{\DUprovidelength}[2]{
\ifthenelse{\isundefined{#1}}{\newlength{#1}\setlength{#1}{#2}}{}
}"""
PreambleCmds.rubric = r"""
% rubric (informal heading)
\providecommand*{\DUrubric}[2][class-arg]{%
\subsubsection*{\centering\textit{\textmd{#2}}}}"""
PreambleCmds.sidebar = r"""
% sidebar (text outside the main text flow)
\providecommand{\DUsidebar}[2][class-arg]{%
\begin{center}
\colorbox[gray]{0.80}{\parbox{0.9\linewidth}{#2}}
\end{center}
}"""
PreambleCmds.subtitle = r"""
% subtitle (for topic/sidebar)
\providecommand*{\DUsubtitle}[2][class-arg]{\par\emph{#2}\smallskip}"""
PreambleCmds.documentsubtitle = r"""
% subtitle (in document title)
\providecommand*{\DUdocumentsubtitle}[1]{{\large #1}}"""
PreambleCmds.table = r"""\usepackage{longtable,ltcaption,array}
\setlength{\extrarowheight}{2pt}
\newlength{\DUtablewidth} % internal use in tables"""
# Options [force,almostfull] prevent spurious error messages, see
# de.comp.text.tex/2005-12/msg01855
PreambleCmds.textcomp = """\
\\usepackage{textcomp} % text symbol macros"""
PreambleCmds.titlereference = r"""
% titlereference role
\providecommand*{\DUroletitlereference}[1]{\textsl{#1}}"""
PreambleCmds.title = r"""
% title for topics, admonitions, unsupported section levels, and sidebar
\providecommand*{\DUtitle}[2][class-arg]{%
% call \DUtitle#1{#2} if it exists:
\ifcsname DUtitle#1\endcsname%
\csname DUtitle#1\endcsname{#2}%
\else
\smallskip\noindent\textbf{#2}\smallskip%
\fi
}"""
PreambleCmds.topic = r"""
% topic (quote with heading)
\providecommand{\DUtopic}[2][class-arg]{%
\ifcsname DUtopic#1\endcsname%
\csname DUtopic#1\endcsname{#2}%
\else
\begin{quote}#2\end{quote}
\fi
}"""
PreambleCmds.transition = r"""
% transition (break, fancybreak, anonymous section)
\providecommand*{\DUtransition}[1][class-arg]{%
\hspace*{\fill}\hrulefill\hspace*{\fill}
\vskip 0.5\baselineskip
}"""
# LaTeX encoding maps
# -------------------
# ::
class CharMaps(object):
"""LaTeX representations for active and Unicode characters."""
# characters that need escaping even in `alltt` environments:
alltt = {
ord('\\'): ur'\textbackslash{}',
ord('{'): ur'\{',
ord('}'): ur'\}',
}
# characters that normally need escaping:
special = {
ord('#'): ur'\#',
ord('$'): ur'\$',
ord('%'): ur'\%',
ord('&'): ur'\&',
ord('~'): ur'\textasciitilde{}',
ord('_'): ur'\_',
ord('^'): ur'\textasciicircum{}',
# straight double quotes are 'active' in many languages
ord('"'): ur'\textquotedbl{}',
# Square brackets are ordinary chars and cannot be escaped with '\',
# so we put them in a group '{[}'. (Alternative: ensure that all
# macros with optional arguments are terminated with {} and text
# inside any optional argument is put in a group ``[{text}]``).
# Commands with optional args inside an optional arg must be put in a
# group, e.g. ``\item[{\hyperref[label]{text}}]``.
ord('['): ur'{[}',
ord(']'): ur'{]}',
# the soft hyphen is unknown in 8-bit text
# and not properly handled by XeTeX
0x00AD: ur'\-', # SOFT HYPHEN
}
# Unicode chars that are not recognized by LaTeX's utf8 encoding
unsupported_unicode = {
0x00A0: ur'~', # NO-BREAK SPACE
# TODO: ensure white space also at the beginning of a line?
# 0x00A0: ur'\leavevmode\nobreak\vadjust{}~'
0x2008: ur'\,', # PUNCTUATION SPACE
0x2011: ur'\hbox{-}', # NON-BREAKING HYPHEN
0x202F: ur'\,', # NARROW NO-BREAK SPACE
0x21d4: ur'$\Leftrightarrow$',
# Docutils footnote symbols:
0x2660: ur'$\spadesuit$',
0x2663: ur'$\clubsuit$',
}
# Unicode chars that are recognized by LaTeX's utf8 encoding
utf8_supported_unicode = {
0x00AB: ur'\guillemotleft{}', # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bb: ur'\guillemotright{}', # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x200C: ur'\textcompwordmark{}', # ZERO WIDTH NON-JOINER
0x2013: ur'\textendash{}',
0x2014: ur'\textemdash{}',
0x2018: ur'\textquoteleft{}',
0x2019: ur'\textquoteright{}',
0x201A: ur'\quotesinglbase{}', # SINGLE LOW-9 QUOTATION MARK
0x201C: ur'\textquotedblleft{}',
0x201D: ur'\textquotedblright{}',
0x201E: ur'\quotedblbase{}', # DOUBLE LOW-9 QUOTATION MARK
0x2030: ur'\textperthousand{}', # PER MILLE SIGN
0x2031: ur'\textpertenthousand{}', # PER TEN THOUSAND SIGN
0x2039: ur'\guilsinglleft{}',
0x203A: ur'\guilsinglright{}',
0x2423: ur'\textvisiblespace{}', # OPEN BOX
0x2020: ur'\dag{}',
0x2021: ur'\ddag{}',
0x2026: ur'\dots{}',
0x2122: ur'\texttrademark{}',
}
# recognized with 'utf8', if textcomp is loaded
textcomp = {
# Latin-1 Supplement
0x00a2: ur'\textcent{}', # ¢ CENT SIGN
0x00a4: ur'\textcurrency{}', # ¤ CURRENCY SYMBOL
0x00a5: ur'\textyen{}', # ¥ YEN SIGN
0x00a6: ur'\textbrokenbar{}', # ¦ BROKEN BAR
0x00a7: ur'\textsection{}', # § SECTION SIGN
0x00a8: ur'\textasciidieresis{}', # ¨ DIAERESIS
0x00a9: ur'\textcopyright{}', # © COPYRIGHT SIGN
0x00aa: ur'\textordfeminine{}', # ª FEMININE ORDINAL INDICATOR
0x00ac: ur'\textlnot{}', # ¬ NOT SIGN
0x00ae: ur'\textregistered{}', # ® REGISTERED SIGN
0x00af: ur'\textasciimacron{}', # ¯ MACRON
0x00b0: ur'\textdegree{}', # ° DEGREE SIGN
0x00b1: ur'\textpm{}', # ± PLUS-MINUS SIGN
0x00b2: ur'\texttwosuperior{}', # ² SUPERSCRIPT TWO
0x00b3: ur'\textthreesuperior{}', # ³ SUPERSCRIPT THREE
0x00b4: ur'\textasciiacute{}', # ´ ACUTE ACCENT
0x00b5: ur'\textmu{}', # µ MICRO SIGN
0x00b6: ur'\textparagraph{}', # ¶ PILCROW SIGN # != \textpilcrow
0x00b9: ur'\textonesuperior{}', # ¹ SUPERSCRIPT ONE
0x00ba: ur'\textordmasculine{}', # º MASCULINE ORDINAL INDICATOR
0x00bc: ur'\textonequarter{}', # 1/4 FRACTION
0x00bd: ur'\textonehalf{}', # 1/2 FRACTION
0x00be: ur'\textthreequarters{}', # 3/4 FRACTION
0x00d7: ur'\texttimes{}', # × MULTIPLICATION SIGN
0x00f7: ur'\textdiv{}', # ÷ DIVISION SIGN
# others
0x0192: ur'\textflorin{}', # LATIN SMALL LETTER F WITH HOOK
0x02b9: ur'\textasciiacute{}', # MODIFIER LETTER PRIME
0x02ba: ur'\textacutedbl{}', # MODIFIER LETTER DOUBLE PRIME
0x2016: ur'\textbardbl{}', # DOUBLE VERTICAL LINE
0x2022: ur'\textbullet{}', # BULLET
0x2032: ur'\textasciiacute{}', # PRIME
0x2033: ur'\textacutedbl{}', # DOUBLE PRIME
0x2035: ur'\textasciigrave{}', # REVERSED PRIME
0x2036: ur'\textgravedbl{}', # REVERSED DOUBLE PRIME
0x203b: ur'\textreferencemark{}', # REFERENCE MARK
0x203d: ur'\textinterrobang{}', # INTERROBANG
0x2044: ur'\textfractionsolidus{}', # FRACTION SLASH
0x2045: ur'\textlquill{}', # LEFT SQUARE BRACKET WITH QUILL
0x2046: ur'\textrquill{}', # RIGHT SQUARE BRACKET WITH QUILL
0x2052: ur'\textdiscount{}', # COMMERCIAL MINUS SIGN
0x20a1: ur'\textcolonmonetary{}', # COLON SIGN
0x20a3: ur'\textfrenchfranc{}', # FRENCH FRANC SIGN
0x20a4: ur'\textlira{}', # LIRA SIGN
0x20a6: ur'\textnaira{}', # NAIRA SIGN
0x20a9: ur'\textwon{}', # WON SIGN
0x20ab: ur'\textdong{}', # DONG SIGN
0x20ac: ur'\texteuro{}', # EURO SIGN
0x20b1: ur'\textpeso{}', # PESO SIGN
0x20b2: ur'\textguarani{}', # GUARANI SIGN
0x2103: ur'\textcelsius{}', # DEGREE CELSIUS
0x2116: ur'\textnumero{}', # NUMERO SIGN
0x2117: ur'\textcircledP{}', # SOUND RECORDING COYRIGHT
0x211e: ur'\textrecipe{}', # PRESCRIPTION TAKE
0x2120: ur'\textservicemark{}', # SERVICE MARK
0x2122: ur'\texttrademark{}', # TRADE MARK SIGN
0x2126: ur'\textohm{}', # OHM SIGN
0x2127: ur'\textmho{}', # INVERTED OHM SIGN
0x212e: ur'\textestimated{}', # ESTIMATED SYMBOL
0x2190: ur'\textleftarrow{}', # LEFTWARDS ARROW
0x2191: ur'\textuparrow{}', # UPWARDS ARROW
0x2192: ur'\textrightarrow{}', # RIGHTWARDS ARROW
0x2193: ur'\textdownarrow{}', # DOWNWARDS ARROW
0x2212: ur'\textminus{}', # MINUS SIGN
0x2217: ur'\textasteriskcentered{}', # ASTERISK OPERATOR
0x221a: ur'\textsurd{}', # SQUARE ROOT
0x2422: ur'\textblank{}', # BLANK SYMBOL
0x25e6: ur'\textopenbullet{}', # WHITE BULLET
0x25ef: ur'\textbigcircle{}', # LARGE CIRCLE
0x266a: ur'\textmusicalnote{}', # EIGHTH NOTE
0x26ad: ur'\textmarried{}', # MARRIAGE SYMBOL
0x26ae: ur'\textdivorced{}', # DIVORCE SYMBOL
0x27e8: ur'\textlangle{}', # MATHEMATICAL LEFT ANGLE BRACKET
0x27e9: ur'\textrangle{}', # MATHEMATICAL RIGHT ANGLE BRACKET
}
# Unicode chars that require a feature/package to render
pifont = {
0x2665: ur'\ding{170}', # black heartsuit
0x2666: ur'\ding{169}', # black diamondsuit
0x2713: ur'\ding{51}', # check mark
0x2717: ur'\ding{55}', # check mark
}
# TODO: greek alphabet ... ?
# see also LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# and unimap.py from TeXML
class DocumentClass(object):
"""Details of a LaTeX document class."""
def __init__(self, document_class, with_part=False):
self.document_class = document_class
self._with_part = with_part
self.sections = ['section', 'subsection', 'subsubsection',
'paragraph', 'subparagraph']
if self.document_class in ('book', 'memoir', 'report',
'scrbook', 'scrreprt'):
self.sections.insert(0, 'chapter')
if self._with_part:
self.sections.insert(0, 'part')
def section(self, level):
"""Return the LaTeX section name for section `level`.
The name depends on the specific document class.
Level is 1,2,3..., as level 0 is the title.
"""
if level <= len(self.sections):
return self.sections[level-1]
else: # unsupported levels
return 'DUtitle[section%s]' % roman.toRoman(level)
class Table(object):
"""Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
Table style might be
:standard: horizontal and vertical lines
:booktabs: only horizontal lines (requires "booktabs" LaTeX package)
:borderless: no borders around table cells
:nolines: alias for borderless
:colwidths-auto: column widths determined by LaTeX
:colwidths-given: use colum widths from rST source
"""
def __init__(self, translator, latex_type):
self._translator = translator
self._latex_type = latex_type
self._open = False
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
self.stubs = []
self.colwidths_auto = False
self._in_thead = 0
def open(self):
self._open = True
self._col_specs = []
self.caption = []
self._attrs = {}
self._in_head = False # maybe context with search
def close(self):
self._open = False
self._col_specs = None
self.caption = []
self._attrs = {}
self.stubs = []
self.colwidths_auto = False
def is_open(self):
return self._open
def set_table_style(self, table_style, classes):
borders = [cls.replace('nolines', 'borderless')
for cls in table_style+classes
if cls in ('standard','booktabs','borderless', 'nolines')]
try:
self.borders = borders[-1]
except IndexError:
self.borders = 'standard'
self.colwidths_auto = (('colwidths-auto' in classes
and 'colwidths-given' not in table_style)
or ('colwidths-auto' in table_style
and ('colwidths-given' not in classes)))
def get_latex_type(self):
if self._latex_type == 'longtable' and not self.caption:
# do not advance the "table" counter (requires "ltcaption" package)
return('longtable*')
return self._latex_type
def set(self,attr,value):
self._attrs[attr] = value
def get(self,attr):
if attr in self._attrs:
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self.borders == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row,
def get_opening(self):
align_map = {'left': 'l',
'center': 'c',
'right': 'r'}
align = align_map.get(self.get('align') or 'center')
opening = [r'\begin{%s}[%s]' % (self.get_latex_type(), align)]
if not self.colwidths_auto:
opening.insert(0, r'\setlength{\DUtablewidth}{\linewidth}')
return '\n'.join(opening)
def get_closing(self):
closing = []
if self.borders == 'booktabs':
closing.append(r'\bottomrule')
# elif self.borders == 'standard':
# closing.append(r'\hline')
closing.append(r'\end{%s}' % self.get_latex_type())
return '\n'.join(closing)
def visit_colspec(self, node):
self._col_specs.append(node)
# "stubs" list is an attribute of the tgroup element:
self.stubs.append(node.attributes.get('stub'))
def get_colspecs(self, node):
"""Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
bar = self.get_vertical_bar()
self._rowspan= [0] * len(self._col_specs)
self._col_width = []
if self.colwidths_auto:
latex_table_spec = (bar+'l')*len(self._col_specs)
return latex_table_spec+bar
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
latex_table_spec = ''
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
latex_table_spec += '%sp{%.3f\\DUtablewidth}' % (bar, colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
"""Return columnwidth for current cell (not multicell)."""
try:
return '%.2f\\DUtablewidth' % self._col_width[self._cell_in_row]
except IndexError:
return '*'
def get_multicolumn_width(self, start, len_):
"""Return sum of columnwidths for multicell."""
try:
mc_width = sum([width
for width in ([self._col_width[start + co]
for co in range (len_)])])
return 'p{%.2f\\DUtablewidth}' % mc_width
except IndexError:
return 'l'
def get_caption(self):
if not self.caption:
return ''
caption = ''.join(self.caption)
if 1 == self._translator.thead_depth():
return r'\caption{%s}\\' '\n' % caption
return r'\caption[]{%s (... continued)}\\' '\n' % caption
def need_recurse(self):
if self._latex_type == 'longtable':
return 1 == self._translator.thead_depth()
return 0
def visit_thead(self):
self._in_thead += 1
if self.borders == 'standard':
return ['\\hline\n']
elif self.borders == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self.borders == 'standard':
# a.append('\\hline\n')
if self.borders == 'booktabs':
a.append('\\midrule\n')
if self._latex_type == 'longtable':
if 1 == self._translator.thead_depth():
a.append('\\endfirsthead\n')
else:
a.append('\\endhead\n')
a.append(r'\multicolumn{%d}{c}' % len(self._col_specs) +
r'{\hfill ... continued on next page} \\')
a.append('\n\\endfoot\n\\endlastfoot\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead -= 1
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self.borders == 'standard':
rowspans = [i+1 for i in range(len(self._rowspan))
if (self._rowspan[i]<=0)]
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while True:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start,c_start)
res.append(cline)
return res
def set_rowspan(self,cell,value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self,cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
def is_stub_column(self):
if len(self.stubs) >= self._cell_in_row:
return self.stubs[self._cell_in_row]
return False
class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
# Write code for typesetting with 8-bit tex/pdftex (vs. xetex/luatex) engine
# overwritten by the XeTeX writer
is_xetex = False
# Config setting defaults
# -----------------------
# TODO: use mixins for different implementations.
# list environment for docinfo. else tabularx
## use_optionlist_for_docinfo = False # TODO: NOT YET IN USE
# Use compound enumerations (1.A.1.)
compound_enumerators = False
# If using compound enumerations, include section information.
section_prefix_for_enumerators = False
# This is the character that separates the section ("." subsection ...)
# prefix from the regular list enumerator.
section_enumerator_separator = '-'
# Auxiliary variables
# -------------------
has_latex_toc = False # is there a toc in the doc? (needed by minitoc)
is_toc_list = False # is the current bullet_list a ToC?
section_level = 0
# Flags to encode():
# inside citation reference labels underscores dont need to be escaped
inside_citation_reference_label = False
verbatim = False # do not encode
insert_non_breaking_blanks = False # replace blanks by "~"
insert_newline = False # add latex newline commands
literal = False # literal text (block or inline)
alltt = False # inside `alltt` environment
def __init__(self, document, babel_class=Babel):
nodes.NodeVisitor.__init__(self, document)
# Reporter
# ~~~~~~~~
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
# ~~~~~~~~
self.settings = settings = document.settings
self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo
self._use_latex_citations = settings.use_latex_citations
self._reference_label = settings.reference_label
self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators
self.font_encoding = getattr(settings, 'font_encoding', '')
self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators)
self.section_enumerator_separator = (
settings.section_enumerator_separator.replace('_', r'\_'))
# literal blocks:
self.literal_block_env = 'alltt'
self.literal_block_options = ''
if settings.literal_block_env != '':
(none,
self.literal_block_env,
self.literal_block_options,
none ) = re.split('(\w+)(.*)', settings.literal_block_env)
elif settings.use_verbatim_when_possible:
self.literal_block_env = 'verbatim'
#
if self.settings.use_bibtex:
self.bibtex = self.settings.use_bibtex.split(',',1)
# TODO avoid errors on not declared citations.
else:
self.bibtex = None
# language module for Docutils-generated text
# (labels, bibliographic_fields, and author_separators)
self.language_module = languages.get_language(settings.language_code,
document.reporter)
self.babel = babel_class(settings.language_code, document.reporter)
self.author_separator = self.language_module.author_separators[0]
d_options = [self.settings.documentoptions]
if self.babel.language not in ('english', ''):
d_options.append(self.babel.language)
self.documentoptions = ','.join(filter(None, d_options))
self.d_class = DocumentClass(settings.documentclass,
settings.use_part_section)
# graphic package options:
if self.settings.graphicx_option == '':
self.graphicx_package = r'\usepackage{graphicx}'
elif self.settings.graphicx_option.lower() == 'auto':
self.graphicx_package = PreambleCmds.graphicx_auto
else:
self.graphicx_package = (r'\usepackage[%s]{graphicx}' %
self.settings.graphicx_option)
# footnotes:
self.docutils_footnotes = settings.docutils_footnotes
# @@ table_style: list of values from fixed set: warn?
# for s in self.settings.table_style:
# if s not in Writer.table_style_values:
# self.warn('Ignoring value "%s" in "table-style" setting.' %s)
# Output collection stacks
# ~~~~~~~~~~~~~~~~~~~~~~~~
# Document parts
self.head_prefix = [r'\documentclass[%s]{%s}' %
(self.documentoptions, self.settings.documentclass)]
self.requirements = SortableDict() # made a list in depart_document()
self.requirements['__static'] = r'\usepackage{ifthen}'
self.latex_preamble = [settings.latex_preamble]
self.fallbacks = SortableDict() # made a list in depart_document()
self.pdfsetup = [] # PDF properties (hyperref package)
self.title = []
self.subtitle = []
self.titledata = [] # \title, \author, \date
## self.body_prefix = ['\\begin{document}\n']
self.body_pre_docinfo = [] # \maketitle
self.docinfo = []
self.dedication = []
self.abstract = []
self.body = []
## self.body_suffix = ['\\end{document}\n']
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
# Title metadata:
self.title_labels = []
self.subtitle_labels = []
# (if use_latex_docinfo: collects lists of
# author/organization/contact/address lines)
self.author_stack = []
self.date = []
# PDF properties: pdftitle, pdfauthor
# TODO?: pdfcreator, pdfproducer, pdfsubject, pdfkeywords
self.pdfinfo = []
self.pdfauthor = []
# Stack of section counters so that we don't have to use_latex_toc.
# This will grow and shrink as processing occurs.
# Initialized for potential first-level sections.
self._section_number = [0]
# The current stack of enumerations so that we can expand
# them into a compound enumeration.
self._enumeration_counters = []
# The maximum number of enumeration counters we've used.
# If we go beyond this number, we need to create a new
# counter; otherwise, just reuse an old one.
self._max_enumeration_counters = 0
self._bibitems = []
# object for a table while proccessing.
self.table_stack = []
self.active_table = Table(self, 'longtable')
# Where to collect the output of visitor methods (default: body)
self.out = self.body
self.out_stack = [] # stack of output collectors
# Process settings
# ~~~~~~~~~~~~~~~~
# Encodings:
# Docutils' output-encoding => TeX input encoding
if self.latex_encoding != 'ascii':
self.requirements['_inputenc'] = (r'\usepackage[%s]{inputenc}'
% self.latex_encoding)
# TeX font encoding
if not self.is_xetex:
if self.font_encoding:
self.requirements['_fontenc'] = (r'\usepackage[%s]{fontenc}' %
self.font_encoding)
# ensure \textquotedbl is defined:
for enc in self.font_encoding.split(','):
enc = enc.strip()
if enc == 'OT1':
self.requirements['_textquotedblOT1'] = (
r'\DeclareTextSymbol{\textquotedbl}{OT1}{`\"}')
elif enc not in ('T1', 'T2A', 'T2B', 'T2C', 'T4', 'T5'):
self.requirements['_textquotedbl'] = (
r'\DeclareTextSymbolDefault{\textquotedbl}{T1}')
# page layout with typearea (if there are relevant document options)
if (settings.documentclass.find('scr') == -1 and
(self.documentoptions.find('DIV') != -1 or
self.documentoptions.find('BCOR') != -1)):
self.requirements['typearea'] = r'\usepackage{typearea}'
# Stylesheets
# (the name `self.stylesheet` is singular because only one
# stylesheet was supported before Docutils 0.6).
self.stylesheet = [self.stylesheet_call(path)
for path in utils.get_stylesheet_list(settings)]
# PDF setup
if self.hyperlink_color in ('0', 'false', 'False', ''):
self.hyperref_options = ''
else:
self.hyperref_options = 'colorlinks=true,linkcolor=%s,urlcolor=%s' % (
self.hyperlink_color, self.hyperlink_color)
if settings.hyperref_options:
self.hyperref_options += ',' + settings.hyperref_options
# LaTeX Toc
# include all supported sections in toc and PDF bookmarks
# (or use documentclass-default (as currently))?
## if self.use_latex_toc:
## self.requirements['tocdepth'] = (r'\setcounter{tocdepth}{%d}' %
## len(self.d_class.sections))
# Section numbering
if settings.sectnum_xform: # section numbering by Docutils
PreambleCmds.secnumdepth = r'\setcounter{secnumdepth}{0}'
else: # section numbering by LaTeX:
secnumdepth = settings.sectnum_depth
# Possible values of settings.sectnum_depth:
# None "sectnum" directive without depth arg -> LaTeX default
# 0 no "sectnum" directive -> no section numbers
# >0 value of "depth" argument -> translate to LaTeX levels:
# -1 part (0 with "article" document class)
# 0 chapter (missing in "article" document class)
# 1 section
# 2 subsection
# 3 subsubsection
# 4 paragraph
# 5 subparagraph
if secnumdepth is not None:
# limit to supported levels
secnumdepth = min(secnumdepth, len(self.d_class.sections))
# adjust to document class and use_part_section settings
if 'chapter' in self.d_class.sections:
secnumdepth -= 1
if self.d_class.sections[0] == 'part':
secnumdepth -= 1
PreambleCmds.secnumdepth = \
r'\setcounter{secnumdepth}{%d}' % secnumdepth
# start with specified number:
if (hasattr(settings, 'sectnum_start') and
settings.sectnum_start != 1):
self.requirements['sectnum_start'] = (
r'\setcounter{%s}{%d}' % (self.d_class.sections[0],
settings.sectnum_start-1))
# TODO: currently ignored (configure in a stylesheet):
## settings.sectnum_prefix
## settings.sectnum_suffix
# Auxiliary Methods
# -----------------
def stylesheet_call(self, path):
"""Return code to reference or embed stylesheet file `path`"""
# is it a package (no extension or *.sty) or "normal" tex code:
(base, ext) = os.path.splitext(path)
is_package = ext in ['.sty', '']
# Embed content of style file:
if self.settings.embed_stylesheet:
if is_package:
path = base + '.sty' # ensure extension
try:
content = io.FileInput(source_path=path,
encoding='utf-8').read()
self.settings.record_dependencies.add(path)
except IOError, err:
msg = u"Cannot embed stylesheet '%s':\n %s." % (
path, SafeString(err.strerror))
self.document.reporter.error(msg)
return '% ' + msg.replace('\n', '\n% ')
if is_package:
content = '\n'.join([r'\makeatletter',
content,
r'\makeatother'])
return '%% embedded stylesheet: %s\n%s' % (path, content)
# Link to style file:
if is_package:
path = base # drop extension
cmd = r'\usepackage{%s}'
else:
cmd = r'\input{%s}'
if self.settings.stylesheet_path:
# adapt path relative to output (cf. config.html#stylesheet-path)
path = utils.relative_path(self.settings._destination, path)
return cmd % path
def to_latex_encoding(self,docutils_encoding):
"""Translate docutils encoding name into LaTeX's.
Default method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { 'iso-8859-1': 'latin1', # west european
'iso-8859-2': 'latin2', # east european
'iso-8859-3': 'latin3', # esperanto, maltese
'iso-8859-4': 'latin4', # north european, scandinavian, baltic
'iso-8859-5': 'iso88595', # cyrillic (ISO)
'iso-8859-9': 'latin5', # turkish
'iso-8859-15': 'latin9', # latin9, update to latin1.
'mac_cyrillic': 'maccyr', # cyrillic (on Mac)
'windows-1251': 'cp1251', # cyrillic (on Windows)
'koi8-r': 'koi8-r', # cyrillic (Russian)
'koi8-u': 'koi8-u', # cyrillic (Ukrainian)
'windows-1250': 'cp1250', #
'windows-1252': 'cp1252', #
'us-ascii': 'ascii', # ASCII (US)
# unmatched encodings
#'': 'applemac',
#'': 'ansinew', # windows 3.1 ansi
#'': 'ascii', # ASCII encoding for the range 32--127.
#'': 'cp437', # dos latin us
#'': 'cp850', # dos latin 1
#'': 'cp852', # dos latin 2
#'': 'decmulti',
#'': 'latin10',
#'iso-8859-6': '' # arabic
#'iso-8859-7': '' # greek
#'iso-8859-8': '' # hebrew
#'iso-8859-10': '' # latin6, more complete iso-8859-4
}
encoding = docutils_encoding.lower()
if encoding in tr:
return tr[encoding]
# drop hyphen or low-line from "latin-1", "latin_1", "utf-8" and similar
encoding = encoding.replace('_', '').replace('-', '')
# strip the error handler
return encoding.split(':')[0]
def language_label(self, docutil_label):
return self.language_module.labels[docutil_label]
def encode(self, text):
"""Return text with 'problematic' characters escaped.
* Escape the special printing characters ``# $ % & ~ _ ^ \ { }``,
square brackets ``[ ]``, double quotes and (in OT1) ``< | >``.
* Translate non-supported Unicode characters.
* Separate ``-`` (and more in literal text) to prevent input ligatures.
"""
if self.verbatim:
return text
# Set up the translation table:
table = CharMaps.alltt.copy()
if not self.alltt:
table.update(CharMaps.special)
# keep the underscore in citation references
if self.inside_citation_reference_label:
del(table[ord('_')])
# Workarounds for OT1 font-encoding
if self.font_encoding in ['OT1', ''] and not self.is_xetex:
# * out-of-order characters in cmtt
if self.literal:
# replace underscore by underlined blank,
# because this has correct width.
table[ord('_')] = u'\\underline{~}'
# the backslash doesn't work, so we use a mirrored slash.
# \reflectbox is provided by graphicx:
self.requirements['graphicx'] = self.graphicx_package
table[ord('\\')] = ur'\reflectbox{/}'
# * ``< | >`` come out as different chars (except for cmtt):
else:
table[ord('|')] = ur'\textbar{}'
table[ord('<')] = ur'\textless{}'
table[ord('>')] = ur'\textgreater{}'
if self.insert_non_breaking_blanks:
table[ord(' ')] = ur'~'
# Unicode replacements for 8-bit tex engines (not required with XeTeX/LuaTeX):
if not self.is_xetex:
table.update(CharMaps.unsupported_unicode)
if not self.latex_encoding.startswith('utf8'):
table.update(CharMaps.utf8_supported_unicode)
table.update(CharMaps.textcomp)
table.update(CharMaps.pifont)
# Characters that require a feature/package to render
if [True for ch in text if ord(ch) in CharMaps.textcomp]:
self.requirements['textcomp'] = PreambleCmds.textcomp
if [True for ch in text if ord(ch) in CharMaps.pifont]:
self.requirements['pifont'] = '\\usepackage{pifont}'
text = text.translate(table)
# Break up input ligatures e.g. '--' to '-{}-'.
if not self.is_xetex: # Not required with xetex/luatex
separate_chars = '-'
# In monospace-font, we also separate ',,', '``' and "''" and some
# other characters which can't occur in non-literal text.
if self.literal:
separate_chars += ',`\'"<>'
for char in separate_chars * 2:
# Do it twice ("* 2") because otherwise we would replace
# '---' by '-{}--'.
text = text.replace(char + char, char + '{}' + char)
# Literal line breaks (in address or literal blocks):
if self.insert_newline:
lines = text.split('\n')
# Add a protected space to blank lines (except the last)
# to avoid ``! LaTeX Error: There's no line here to end.``
for i, line in enumerate(lines[:-1]):
if not line.lstrip():
lines[i] += '~'
text = (r'\\' + '\n').join(lines)
if self.literal and not self.insert_non_breaking_blanks:
# preserve runs of spaces but allow wrapping
text = text.replace(' ', ' ~')
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
# TODO: is this used anywhere? -> update (use template) or delete
## def astext(self):
## """Assemble document parts and return as string."""
## head = '\n'.join(self.head_prefix + self.stylesheet + self.head)
## body = ''.join(self.body_prefix + self.body + self.body_suffix)
## return head + '\n' + body
def is_inline(self, node):
"""Check whether a node represents an inline or block-level element"""
return isinstance(node.parent, nodes.TextElement)
def append_hypertargets(self, node):
"""Append hypertargets for all ids of `node`"""
# hypertarget places the anchor at the target's baseline,
# so we raise it explicitely
self.out.append('%\n'.join(['\\raisebox{1em}{\\hypertarget{%s}{}}' %
id for id in node['ids']]))
def ids_to_labels(self, node, set_anchor=True):
"""Return list of label definitions for all ids of `node`
If `set_anchor` is True, an anchor is set with \phantomsection.
"""
labels = ['\\label{%s}' % id for id in node.get('ids', [])]
if set_anchor and labels:
labels.insert(0, '\\phantomsection')
return labels
def push_output_collector(self, new_out):
self.out_stack.append(self.out)
self.out = new_out
def pop_output_collector(self):
self.out = self.out_stack.pop()
# Visitor methods
# ---------------
def visit_Text(self, node):
self.out.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_abbreviation(self, node):
node['classes'].insert(0, 'abbreviation')
self.visit_inline(node)
def depart_abbreviation(self, node):
self.depart_inline(node)
def visit_acronym(self, node):
node['classes'].insert(0, 'acronym')
self.visit_inline(node)
def depart_acronym(self, node):
self.depart_inline(node)
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node):
self.fallbacks['admonition'] = PreambleCmds.admonition
if 'error' in node['classes']:
self.fallbacks['error'] = PreambleCmds.error
# strip the generic 'admonition' from the list of classes
node['classes'] = [cls for cls in node['classes']
if cls != 'admonition']
self.out.append('\n\\DUadmonition[%s]{\n' % ','.join(node['classes']))
def depart_admonition(self, node=None):
self.out.append('}\n')
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
self.out.append( '%\n\\begin{quote}\n')
if node['classes']:
self.visit_inline(node)
def depart_block_quote(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '\n\\end{quote}\n')
def visit_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '%\n\\begin{list}{}{}\n' )
else:
self.out.append( '%\n\\begin{itemize}\n' )
# if node['classes']:
# self.visit_inline(node)
def depart_bullet_list(self, node):
# if node['classes']:
# self.depart_inline(node)
if self.is_toc_list:
self.out.append( '\n\\end{list}\n' )
else:
self.out.append( '\n\\end{itemize}\n' )
def visit_superscript(self, node):
self.out.append(r'\textsuperscript{')
if node['classes']:
self.visit_inline(node)
def depart_superscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_subscript(self, node):
self.out.append(r'\textsubscript{') # requires `fixltx2e`
if node['classes']:
self.visit_inline(node)
def depart_subscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_caption(self, node):
self.out.append('\n\\caption{')
def depart_caption(self, node):
self.out.append('}\n')
def visit_title_reference(self, node):
self.fallbacks['titlereference'] = PreambleCmds.titlereference
self.out.append(r'\DUroletitlereference{')
if node['classes']:
self.visit_inline(node)
def depart_title_reference(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '}' )
def visit_citation(self, node):
# TODO maybe use cite bibitems
if self._use_latex_citations:
self.push_output_collector([])
else:
# TODO: do we need these?
## self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append(r'\begin{figure}[b]')
self.append_hypertargets(node)
def depart_citation(self, node):
if self._use_latex_citations:
label = self.out[0]
text = ''.join(self.out[1:])
self._bibitems.append([label, text])
self.pop_output_collector()
else:
self.out.append('\\end{figure}\n')
def visit_citation_reference(self, node):
if self._use_latex_citations:
if not self.inside_citation_reference_label:
self.out.append(r'\cite{')
self.inside_citation_reference_label = 1
else:
assert self.body[-1] in (' ', '\n'),\
'unexpected non-whitespace while in reference label'
del self.body[-1]
else:
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
self.out.append('\\hyperlink{%s}{[' % href)
def depart_citation_reference(self, node):
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
next_siblings = node.traverse(descend=False, siblings=True,
include_self=False)
if len(next_siblings) > 1:
next = next_siblings[0]
if (isinstance(next, nodes.Text) and
next.astext() in (' ', '\n')):
if next_siblings[1].__class__ == node.__class__:
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append(']}')
def visit_classifier(self, node):
self.out.append( '(\\textbf{' )
def depart_classifier(self, node):
self.out.append( '})\n' )
def visit_colspec(self, node):
self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
def visit_comment(self, node):
# Precede every line with a comment sign, wrap in newlines
self.out.append('\n%% %s\n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
def depart_comment(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
# header and footer
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
self.out.append('\n')
def visit_definition_list(self, node):
self.out.append( '%\n\\begin{description}\n' )
def depart_definition_list(self, node):
self.out.append( '\\end{description}\n' )
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.out.append(' ')
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.push_output_collector(self.docinfo)
def depart_docinfo(self, node):
self.pop_output_collector()
# Some itmes (e.g. author) end up at other places
if self.docinfo:
# tabularx: automatic width of columns, no page breaks allowed.
self.requirements['tabularx'] = r'\usepackage{tabularx}'
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['docinfo'] = PreambleCmds.docinfo
#
self.docinfo.insert(0, '\n% Docinfo\n'
'\\begin{center}\n'
'\\begin{tabularx}{\\DUdocinfowidth}{lX}\n')
self.docinfo.append('\\end{tabularx}\n'
'\\end{center}\n')
def visit_docinfo_item(self, node, name):
if name == 'author':
self.pdfauthor.append(self.attval(node.astext()))
if self.use_latex_docinfo:
if name in ('author', 'organization', 'contact', 'address'):
# We attach these to the last author. If any of them precedes
# the first author, put them in a separate "author" group
# (in lack of better semantics).
if name == 'author' or not self.author_stack:
self.author_stack.append([])
if name == 'address': # newlines are meaningful
self.insert_newline = True
text = self.encode(node.astext())
self.insert_newline = False
else:
text = self.attval(node.astext())
self.author_stack[-1].append(text)
raise nodes.SkipNode
elif name == 'date':
self.date.append(self.attval(node.astext()))
raise nodes.SkipNode
self.out.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
self.insert_newline = True
self.out.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
def depart_docinfo_item(self, node):
self.out.append(self.context.pop())
# for address we did set insert_newline
self.insert_newline = False
def visit_doctest_block(self, node):
self.visit_literal_block(node)
def depart_doctest_block(self, node):
self.depart_literal_block(node)
def visit_document(self, node):
# titled document?
if (self.use_latex_docinfo or len(node) and
isinstance(node[0], nodes.title)):
self.title_labels += self.ids_to_labels(node, set_anchor=False)
def depart_document(self, node):
# Complete header with information gained from walkabout
# * language setup
if (self.babel.otherlanguages or
self.babel.language not in ('', 'english')):
self.requirements['babel'] = self.babel()
# * conditional requirements (before style sheet)
self.requirements = self.requirements.sortedvalues()
# * coditional fallback definitions (after style sheet)
self.fallbacks = self.fallbacks.sortedvalues()
# * PDF properties
self.pdfsetup.append(PreambleCmds.linking % self.hyperref_options)
if self.pdfauthor:
authors = self.author_separator.join(self.pdfauthor)
self.pdfinfo.append(' pdfauthor={%s}' % authors)
if self.pdfinfo:
self.pdfsetup += [r'\hypersetup{'] + self.pdfinfo + ['}']
# Complete body
# * document title (with "use_latex_docinfo" also
# 'author', 'organization', 'contact', 'address' and 'date')
if self.title or (
self.use_latex_docinfo and (self.author_stack or self.date)):
# with the default template, titledata is written to the preamble
self.titledata.append('%%% Title Data')
# \title (empty \title prevents error with \maketitle)
if self.title:
self.title.insert(0, '\phantomsection%\n ')
title = [''.join(self.title)] + self.title_labels
if self.subtitle:
title += [r'\\ % subtitle',
r'\DUdocumentsubtitle{%s}' % ''.join(self.subtitle)
] + self.subtitle_labels
self.titledata.append(r'\title{%s}' % '%\n '.join(title))
# \author (empty \author prevents warning with \maketitle)
authors = ['\\\\\n'.join(author_entry)
for author_entry in self.author_stack]
self.titledata.append(r'\author{%s}' %
' \\and\n'.join(authors))
# \date (empty \date prevents defaulting to \today)
self.titledata.append(r'\date{%s}' % ', '.join(self.date))
# \maketitle in the body formats title with LaTeX
self.body_pre_docinfo.append('\\maketitle\n')
# * bibliography
# TODO insertion point of bibliography should be configurable.
if self._use_latex_citations and len(self._bibitems)>0:
if not self.bibtex:
widest_label = ''
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.out.append('\n\\begin{thebibliography}{%s}\n' %
widest_label)
for bi in self._bibitems:
# cite_key: underscores must not be escaped
cite_key = bi[0].replace(r'\_','_')
self.out.append('\\bibitem[%s]{%s}{%s}\n' %
(bi[0], cite_key, bi[1]))
self.out.append('\\end{thebibliography}\n')
else:
self.out.append('\n\\bibliographystyle{%s}\n' %
self.bibtex[0])
self.out.append('\\bibliography{%s}\n' % self.bibtex[1])
# * make sure to generate a toc file if needed for local contents:
if 'minitoc' in self.requirements and not self.has_latex_toc:
self.out.append('\n\\faketableofcontents % for local ToCs\n')
def visit_emphasis(self, node):
self.out.append('\\emph{')
if node['classes']:
self.visit_inline(node)
def depart_emphasis(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Append column delimiters and advance column counter,
# if the current cell is a multi-row continuation."""
def insert_additional_table_colum_delimiters(self):
while self.active_table.get_rowspan(
self.active_table.get_entry_number()):
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_entry(self, node):
# cell separation
if self.active_table.get_entry_number() == 0:
self.insert_additional_table_colum_delimiters()
else:
self.out.append(' & ')
# multirow, multicolumn
if 'morerows' in node and 'morecols' in node:
raise NotImplementedError('Cells that '
'span multiple rows *and* columns currently not supported, sorry.')
# TODO: should be possible with LaTeX, see e.g.
# http://texblog.org/2012/12/21/multi-column-and-multi-row-cells-in-latex-tables/
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if 'morerows' in node:
self.requirements['multirow'] = r'\usepackage{multirow}'
mrows = node['morerows'] + 1
self.active_table.set_rowspan(
self.active_table.get_entry_number(), mrows)
self.out.append('\\multirow{%d}{%s}{' %
(mrows, self.active_table.get_column_width()))
self.context.append('}')
elif 'morecols' in node:
# the vertical bar before column is missing if it is the first
# column. the one after always.
if self.active_table.get_entry_number() == 0:
bar1 = self.active_table.get_vertical_bar()
else:
bar1 = ''
mcols = node['morecols'] + 1
self.out.append('\\multicolumn{%d}{%s%s%s}{' %
(mcols, bar1,
self.active_table.get_multicolumn_width(
self.active_table.get_entry_number(),
mcols),
self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
# bold header/stub-column
if len(node) and (isinstance(node.parent.parent, nodes.thead)
or self.active_table.is_stub_column()):
self.out.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
# if line ends with '{', mask line break to prevent spurious whitespace
if not self.active_table.colwidths_auto and self.out[-1].endswith("{"):
self.out.append("%")
self.active_table.visit_entry() # increment cell count
def depart_entry(self, node):
self.out.append(self.context.pop()) # header / not header
self.out.append(self.context.pop()) # multirow/column
# insert extra "&"s, if following rows are spanned from above:
self.insert_additional_table_colum_delimiters()
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.out.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node):
# enumeration styles:
types = {'': '',
'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman'}
# the 4 default LaTeX enumeration labels: präfix, enumtype, suffix,
labels = [('', 'arabic', '.'), # 1.
('(', 'alph', ')'), # (a)
('', 'roman', '.'), # i.
('', 'Alph', '.')] # A.
prefix = ''
if self.compound_enumerators:
if (self.section_prefix_for_enumerators and self.section_level
and not self._enumeration_counters):
prefix = '.'.join([str(n) for n in
self._section_number[:self.section_level]]
) + self.section_enumerator_separator
if self._enumeration_counters:
prefix += self._enumeration_counters[-1]
# TODO: use LaTeX default for unspecified label-type?
# (needs change of parser)
prefix += node.get('prefix', '')
enumtype = types[node.get('enumtype' '')]
suffix = node.get('suffix', '')
enumeration_level = len(self._enumeration_counters)+1
counter_name = 'enum' + roman.toRoman(enumeration_level).lower()
label = r'%s\%s{%s}%s' % (prefix, enumtype, counter_name, suffix)
self._enumeration_counters.append(label)
if enumeration_level <= 4:
self.out.append('\\begin{enumerate}\n')
if (prefix, enumtype, suffix
) != labels[enumeration_level-1]:
self.out.append('\\renewcommand{\\label%s}{%s}\n' %
(counter_name, label))
else:
self.fallbacks[counter_name] = '\\newcounter{%s}' % counter_name
self.out.append('\\begin{list}')
self.out.append('{%s}' % label)
self.out.append('{\\usecounter{%s}}\n' % counter_name)
if 'start' in node:
self.out.append('\\setcounter{%s}{%d}\n' %
(counter_name,node['start']-1))
# ## set rightmargin equal to leftmargin
# self.out.append('\\setlength{\\rightmargin}{\\leftmargin}\n')
def depart_enumerated_list(self, node):
if len(self._enumeration_counters) <= 4:
self.out.append('\\end{enumerate}\n')
else:
self.out.append('\\end{list}\n')
self._enumeration_counters.pop()
def visit_field(self, node):
# real output is done in siblings: _argument, _body, _name
pass
def depart_field(self, node):
self.out.append('\n')
##self.out.append('%[depart_field]\n')
def visit_field_argument(self, node):
self.out.append('%[visit_field_argument]\n')
def depart_field_argument(self, node):
self.out.append('%[depart_field_argument]\n')
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
if self.out is self.docinfo:
self.out.append(r'\\')
def visit_field_list(self, node):
if self.out is not self.docinfo:
self.fallbacks['fieldlist'] = PreambleCmds.fieldlist
self.out.append('%\n\\begin{DUfieldlist}\n')
def depart_field_list(self, node):
if self.out is not self.docinfo:
self.out.append('\\end{DUfieldlist}\n')
def visit_field_name(self, node):
if self.out is self.docinfo:
self.out.append('\\textbf{')
else:
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_field_name(self, node):
if self.out is self.docinfo:
self.out.append('}: &')
else:
self.out.append(':}]')
def visit_figure(self, node):
self.requirements['float_settings'] = PreambleCmds.float_settings
# The 'align' attribute sets the "outer alignment",
# for "inner alignment" use LaTeX default alignment (similar to HTML)
alignment = node.attributes.get('align', 'center')
if alignment != 'center':
# The LaTeX "figure" environment always uses the full linewidth,
# so "outer alignment" is ignored. Just write a comment.
# TODO: use the wrapfigure environment?
self.out.append('\n\\begin{figure} %% align = "%s"\n' % alignment)
else:
self.out.append('\n\\begin{figure}\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def depart_figure(self, node):
self.out.append('\\end{figure}\n')
def visit_footer(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUfooter}{')
def depart_footer(self, node):
self.out.append('}')
self.requirements['~footer'] = ''.join(self.out)
self.pop_output_collector()
def visit_footnote(self, node):
try:
backref = node['backrefs'][0]
except IndexError:
backref = node['ids'][0] # no backref, use self-ref instead
if self.docutils_footnotes:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
num = node[0].astext()
if self.settings.footnote_references == 'brackets':
num = '[%s]' % num
self.out.append('%%\n\\DUfootnotetext{%s}{%s}{%s}{' %
(node['ids'][0], backref, self.encode(num)))
if node['ids'] == node['names']:
self.out += self.ids_to_labels(node)
# mask newline to prevent spurious whitespace if paragraph follows:
if node[1:] and isinstance(node[1], nodes.paragraph):
self.out.append('%')
## else: # TODO: "real" LaTeX \footnote{}s
def depart_footnote(self, node):
self.out.append('}\n')
def visit_footnote_reference(self, node):
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
# if not self.docutils_footnotes:
# TODO: insert footnote content at (or near) this place
# print "footnote-ref to", node['refid']
# footnotes = (self.document.footnotes +
# self.document.autofootnotes +
# self.document.symbol_footnotes)
# for footnote in footnotes:
# # print footnote['ids']
# if node.get('refid', '') in footnote['ids']:
# print 'matches', footnote['ids']
format = self.settings.footnote_references
if format == 'brackets':
self.append_hypertargets(node)
self.out.append('\\hyperlink{%s}{[' % href)
self.context.append(']}')
else:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
self.out.append(r'\DUfootnotemark{%s}{%s}{' %
(node['ids'][0], href))
self.context.append('}')
def depart_footnote_reference(self, node):
self.out.append(self.context.pop())
# footnote/citation label
def label_delim(self, node, bracket, superscript):
if isinstance(node.parent, nodes.footnote):
raise nodes.SkipNode
else:
assert isinstance(node.parent, nodes.citation)
if not self._use_latex_citations:
self.out.append(bracket)
def visit_label(self, node):
"""footnote or citation label: in brackets or as superscript"""
self.label_delim(node, '[', '\\textsuperscript{')
def depart_label(self, node):
self.label_delim(node, ']', '}')
# elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUheader}{')
def depart_header(self, node):
self.out.append('}')
self.requirements['~header'] = ''.join(self.out)
self.pop_output_collector()
def to_latex_length(self, length_str, pxunit=None):
"""Convert `length_str` with rst lenght to LaTeX length
"""
if pxunit is not None:
sys.stderr.write('deprecation warning: LaTeXTranslator.to_latex_length()'
' option `pxunit` will be removed.')
match = re.match('(\d*\.?\d*)\s*(\S*)', length_str)
if not match:
return length_str
value, unit = match.groups()[:2]
# no unit or "DTP" points (called 'bp' in TeX):
if unit in ('', 'pt'):
length_str = '%sbp' % value
# percentage: relate to current line width
elif unit == '%':
length_str = '%.3f\\linewidth' % (float(value)/100.0)
elif self.is_xetex and unit == 'px':
# XeTeX does not know the length unit px.
# Use \pdfpxdimen, the macro to set the value of 1 px in pdftex.
# This way, configuring works the same for pdftex and xetex.
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['px'] = '\n\\DUprovidelength{\\pdfpxdimen}{1bp}\n'
length_str = r'%s\pdfpxdimen' % value
return length_str
def visit_image(self, node):
self.requirements['graphicx'] = self.graphicx_package
attrs = node.attributes
# Convert image URI to a local file path
imagepath = urllib.url2pathname(attrs['uri']).replace('\\', '/')
# alignment defaults:
if not 'align' in attrs:
# Set default align of image in a figure to 'center'
if isinstance(node.parent, nodes.figure):
attrs['align'] = 'center'
# query 'align-*' class argument
for cls in node['classes']:
if cls.startswith('align-'):
attrs['align'] = cls.split('-')[1]
# pre- and postfix (prefix inserted in reverse order)
pre = []
post = []
include_graphics_options = []
align_codes = {
# inline images: by default latex aligns the bottom.
'bottom': ('', ''),
'middle': (r'\raisebox{-0.5\height}{', '}'),
'top': (r'\raisebox{-\height}{', '}'),
# block level images:
'center': (r'\noindent\makebox[\linewidth][c]{', '}'),
'left': (r'\noindent{', r'\hfill}'),
'right': (r'\noindent{\hfill', '}'),}
if 'align' in attrs:
# TODO: warn or ignore non-applicable alignment settings?
try:
align_code = align_codes[attrs['align']]
pre.append(align_code[0])
post.append(align_code[1])
except KeyError:
pass # TODO: warn?
if 'height' in attrs:
include_graphics_options.append('height=%s' %
self.to_latex_length(attrs['height']))
if 'scale' in attrs:
include_graphics_options.append('scale=%f' %
(attrs['scale'] / 100.0))
if 'width' in attrs:
include_graphics_options.append('width=%s' %
self.to_latex_length(attrs['width']))
if not (self.is_inline(node) or
isinstance(node.parent, nodes.figure)):
pre.append('\n')
post.append('\n')
pre.reverse()
self.out.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % (','.join(include_graphics_options))
self.out.append('\\includegraphics%s{%s}' % (options, imagepath))
self.out.extend(post)
def depart_image(self, node):
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def visit_inline(self, node): # <span>, i.e. custom roles
self.context.append('}' * len(node['classes']))
for cls in node['classes']:
if cls == 'align-center':
self.fallbacks['align-center'] = PreambleCmds.align_center
if cls.startswith('language-'):
language = self.babel.language_name(cls[9:])
if language:
self.babel.otherlanguages[language] = True
self.out.append(r'\foreignlanguage{%s}{' % language)
else:
self.fallbacks['inline'] = PreambleCmds.inline
self.out.append(r'\DUrole{%s}{' % cls)
def depart_inline(self, node):
self.out.append(self.context.pop())
def visit_interpreted(self, node):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self.visit_literal(node)
def depart_interpreted(self, node):
self.depart_literal(node)
def visit_legend(self, node):
self.fallbacks['legend'] = PreambleCmds.legend
self.out.append('\\begin{DUlegend}')
def depart_legend(self, node):
self.out.append('\\end{DUlegend}\n')
def visit_line(self, node):
self.out.append('\item[] ')
def depart_line(self, node):
self.out.append('\n')
def visit_line_block(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['lineblock'] = PreambleCmds.lineblock
if isinstance(node.parent, nodes.line_block):
self.out.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
else:
self.out.append('\n\\begin{DUlineblock}{0em}\n')
if node['classes']:
self.visit_inline(node)
self.out.append('\n')
def depart_line_block(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('\n')
self.out.append('\\end{DUlineblock}\n')
def visit_list_item(self, node):
self.out.append('\n\\item ')
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.literal = True
if 'code' in node['classes'] and (
self.settings.syntax_highlight != 'none'):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['code'] = PreambleCmds.highlight_rules
self.out.append('\\texttt{')
if node['classes']:
self.visit_inline(node)
def depart_literal(self, node):
self.literal = False
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Literal blocks are used for '::'-prefixed literal-indented
# blocks of text, where the inline markup is not recognized,
# but are also the product of the "parsed-literal" directive,
# where the markup is respected.
#
# In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all
# the others we must use \mbox or \alltt.
#
# We can distinguish between the two kinds by the number of
# siblings that compose this node: if it is composed by a
# single element, it's either
# * a real one,
# * a parsed-literal that does not contain any markup, or
# * a parsed-literal containing just one markup construct.
def is_plaintext(self, node):
"""Check whether a node can be typeset verbatim"""
return (len(node) == 1) and isinstance(node[0], nodes.Text)
def visit_literal_block(self, node):
"""Render a literal block."""
# environments and packages to typeset literal blocks
packages = {'alltt': r'\usepackage{alltt}',
'listing': r'\usepackage{moreverb}',
'lstlisting': r'\usepackage{listings}',
'Verbatim': r'\usepackage{fancyvrb}',
# 'verbatim': '',
'verbatimtab': r'\usepackage{moreverb}'}
if node.get('ids'):
self.out += ['\n'] + self.ids_to_labels(node)
if not self.active_table.is_open():
# no quote inside tables, to avoid vertical space between
# table border and literal block.
# TODO: fails if normal text precedes the literal block.
# check parent node instead?
self.out.append('%\n\\begin{quote}\n')
self.context.append('\n\\end{quote}\n')
else:
self.out.append('\n')
self.context.append('\n')
if self.is_plaintext(node):
environment = self.literal_block_env
self.requirements['literal_block'] = packages.get(environment, '')
if environment == 'alltt':
self.alltt = True
else:
self.verbatim = True
self.out.append('\\begin{%s}%s\n' %
(environment, self.literal_block_options))
self.context.append('\n\\end{%s}' % environment)
else:
self.literal = True
self.insert_newline = True
self.insert_non_breaking_blanks = True
if 'code' in node['classes'] and (
self.settings.syntax_highlight != 'none'):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['code'] = PreambleCmds.highlight_rules
self.out.append('{\\ttfamily \\raggedright \\noindent\n')
self.context.append('\n}')
def depart_literal_block(self, node):
self.insert_non_breaking_blanks = False
self.insert_newline = False
self.literal = False
self.verbatim = False
self.alltt = False
self.out.append(self.context.pop())
self.out.append(self.context.pop())
## def visit_meta(self, node):
## self.out.append('[visit_meta]\n')
# TODO: set keywords for pdf?
# But:
# The reStructuredText "meta" directive creates a "pending" node,
# which contains knowledge that the embedded "meta" node can only
# be handled by HTML-compatible writers. The "pending" node is
# resolved by the docutils.transforms.components.Filter transform,
# which checks that the calling writer supports HTML; if it doesn't,
# the "pending" node (and enclosed "meta" node) is removed from the
# document.
# --- docutils/docs/peps/pep-0258.html#transformer
## def depart_meta(self, node):
## self.out.append('[depart_meta]\n')
def visit_math(self, node, math_env='$'):
"""math role"""
if node['classes']:
self.visit_inline(node)
self.requirements['amsmath'] = r'\usepackage{amsmath}'
math_code = node.astext().translate(unichar2tex.uni2tex_table)
if node.get('ids'):
math_code = '\n'.join([math_code] + self.ids_to_labels(node))
if math_env == '$':
if self.alltt:
wrapper = u'\(%s\)'
else:
wrapper = u'$%s$'
else:
wrapper = u'\n'.join(['%%',
r'\begin{%s}' % math_env,
'%s',
r'\end{%s}' % math_env])
# print repr(wrapper), repr(math_code)
self.out.append(wrapper % math_code)
if node['classes']:
self.depart_inline(node)
# Content already processed:
raise nodes.SkipNode
def depart_math(self, node):
pass # never reached
def visit_math_block(self, node):
math_env = pick_math_environment(node.astext())
self.visit_math(node, math_env=math_env)
def depart_math_block(self, node):
pass # never reached
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.out.append(', ')
def depart_option(self, node):
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""Append the delimiter betweeen an option and its argument to body."""
self.out.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
self.out.append('\n\\item[')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
self.out.append('] ')
def visit_option_list(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['optionlist'] = PreambleCmds.optionlist
self.out.append('%\n\\begin{DUoptionlist}\n')
def depart_option_list(self, node):
self.out.append('\n\\end{DUoptionlist}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_string(self, node):
##self.out.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.out.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
# insert blank line, unless
# * the paragraph is first in a list item,
# * follows a non-paragraph node in a compound,
# * is in a table with auto-width columns
index = node.parent.index(node)
if (index == 0 and (isinstance(node.parent, nodes.list_item) or
isinstance(node.parent, nodes.description))):
pass
elif (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
pass
elif self.active_table.colwidths_auto:
if index == 1: # second paragraph
self.warn('LaTeX merges paragraphs in tables '
'with auto-sized columns!', base_node=node)
if index > 0:
self.out.append('\n')
else:
self.out.append('\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
if node['classes']:
self.visit_inline(node)
def depart_paragraph(self, node):
if node['classes']:
self.depart_inline(node)
if not self.active_table.colwidths_auto:
self.out.append('\n')
def visit_problematic(self, node):
self.requirements['color'] = PreambleCmds.color
self.out.append('%\n')
self.append_hypertargets(node)
self.out.append(r'\hyperlink{%s}{\textbf{\color{red}' % node['refid'])
def depart_problematic(self, node):
self.out.append('}}')
def visit_raw(self, node):
if not 'latex' in node.get('format', '').split():
raise nodes.SkipNode
if not self.is_inline(node):
self.out.append('\n')
if node['classes']:
self.visit_inline(node)
# append "as-is" skipping any LaTeX-encoding
self.verbatim = True
def depart_raw(self, node):
self.verbatim = False
if node['classes']:
self.depart_inline(node)
if not self.is_inline(node):
self.out.append('\n')
def has_unbalanced_braces(self, string):
"""Test whether there are unmatched '{' or '}' characters."""
level = 0
for ch in string:
if ch == '{':
level += 1
if ch == '}':
level -= 1
if level < 0:
return True
return level != 0
def visit_reference(self, node):
# We need to escape #, \, and % if we use the URL in a command.
special_chars = {ord('#'): ur'\#',
ord('%'): ur'\%',
ord('\\'): ur'\\',
}
# external reference (URL)
if 'refuri' in node:
href = unicode(node['refuri']).translate(special_chars)
# problematic chars double caret and unbalanced braces:
if href.find('^^') != -1 or self.has_unbalanced_braces(href):
self.error(
'External link "%s" not supported by LaTeX.\n'
' (Must not contain "^^" or unbalanced braces.)' % href)
if node['refuri'] == node.astext():
self.out.append(r'\url{%s}' % href)
raise nodes.SkipNode
self.out.append(r'\href{%s}{' % href)
return
# internal reference
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
else:
raise AssertionError('Unknown reference.')
if not self.is_inline(node):
self.out.append('\n')
self.out.append('\\hyperref[%s]{' % href)
if self._reference_label:
self.out.append('\\%s{%s}}' %
(self._reference_label, href.replace('#', '')))
raise nodes.SkipNode
def depart_reference(self, node):
self.out.append('}')
if not self.is_inline(node):
self.out.append('\n')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_section(self, node):
self.section_level += 1
# Initialize counter for potential subsections:
self._section_number.append(0)
# Counter for this section's level (initialized by parent section):
self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
# Remove counter for potential subsections:
self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['sidebar'] = PreambleCmds.sidebar
self.out.append('\n\\DUsidebar{\n')
def depart_sidebar(self, node):
self.out.append('}\n')
attribution_formats = {'dash': (u'—', ''), # EM DASH
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.out.append('\\nopagebreak\n\n\\raggedleft ')
self.out.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.out.append(self.context.pop() + '\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.out.append('\\textbf{')
if node['classes']:
self.visit_inline(node)
def depart_strong(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.push_output_collector(self.subtitle)
self.fallbacks['documentsubtitle'] = PreambleCmds.documentsubtitle
self.subtitle_labels += self.ids_to_labels(node, set_anchor=False)
# section subtitle: "starred" (no number, not in ToC)
elif isinstance(node.parent, nodes.section):
self.out.append(r'\%s*{' %
self.d_class.section(self.section_level + 1))
else:
self.fallbacks['subtitle'] = PreambleCmds.subtitle
self.out.append('\n\\DUsubtitle[%s]{' % node.parent.tagname)
def depart_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.pop_output_collector()
else:
self.out.append('}\n')
def visit_system_message(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['title'] = PreambleCmds.title
node['classes'] = ['system-message']
self.visit_admonition(node)
self.out.append('\\DUtitle[system-message]{system-message}\n')
self.append_hypertargets(node)
try:
line = ', line~%s' % node['line']
except KeyError:
line = ''
self.out.append('\n\n{\color{red}%s/%s} in \\texttt{%s}%s\n' %
(node['type'], node['level'],
self.encode(node['source']), line))
if len(node['backrefs']) == 1:
self.out.append('\n\\hyperlink{%s}{' % node['backrefs'][0])
self.context.append('}')
else:
backrefs = ['\\hyperlink{%s}{%d}' % (href, i+1)
for (i, href) in enumerate(node['backrefs'])]
self.context.append('backrefs: ' + ' '.join(backrefs))
def depart_system_message(self, node):
self.out.append(self.context.pop())
self.depart_admonition()
def visit_table(self, node):
self.requirements['table'] = PreambleCmds.table
if self.active_table.is_open():
self.table_stack.append(self.active_table)
# nesting longtable does not work (e.g. 2007-04-18)
self.active_table = Table(self,'tabular')
# A longtable moves before \paragraph and \subparagraph
# section titles if it immediately follows them:
if (self.active_table._latex_type == 'longtable' and
isinstance(node.parent, nodes.section) and
node.parent.index(node) == 1 and
self.d_class.section(self.section_level).find('paragraph') != -1):
self.out.append('\\leavevmode')
self.active_table.open()
self.active_table.set_table_style(self.settings.table_style,
node['classes'])
if 'align' in node:
self.active_table.set('align', node['align'])
if self.active_table.borders == 'booktabs':
self.requirements['booktabs'] = r'\usepackage{booktabs}'
self.push_output_collector([])
def depart_table(self, node):
# wrap content in the right environment:
content = self.out
self.pop_output_collector()
self.out.append('\n' + self.active_table.get_opening())
self.out += content
self.out.append(self.active_table.get_closing() + '\n')
self.active_table.close()
if len(self.table_stack)>0:
self.active_table = self.table_stack.pop()
# Insert hyperlabel after (long)table, as
# other places (beginning, caption) result in LaTeX errors.
if node.get('ids'):
self.out += self.ids_to_labels(node, set_anchor=False) + ['\n']
def visit_target(self, node):
# Skip indirect targets:
if ('refuri' in node # external hyperlink
or 'refid' in node # resolved internal link
or 'refname' in node): # unresolved internal link
## self.out.append('%% %s\n' % node) # for debugging
return
self.out.append('%\n')
# do we need an anchor (\phantomsection)?
set_anchor = not(isinstance(node.parent, nodes.caption) or
isinstance(node.parent, nodes.title))
# TODO: where else can/must we omit the \phantomsection?
self.out += self.ids_to_labels(node, set_anchor)
def depart_target(self, node):
pass
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if not self.active_table.get('preamble written'):
self.visit_thead(node)
self.depart_thead(None)
def depart_tbody(self, node):
pass
def visit_term(self, node):
"""definition list term"""
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_term(self, node):
# \leavevmode results in a line break if the
# term is followed by an item list.
self.out.append('}] \leavevmode ')
def visit_tgroup(self, node):
#self.out.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def depart_tgroup(self, node):
pass
_thead_depth = 0
def thead_depth (self):
return self._thead_depth
def visit_thead(self, node):
self._thead_depth += 1
if 1 == self.thead_depth():
self.out.append('{%s}\n' % self.active_table.get_colspecs(node))
self.active_table.set('preamble written',1)
self.out.append(self.active_table.get_caption())
self.out.extend(self.active_table.visit_thead())
def depart_thead(self, node):
if node is not None:
self.out.extend(self.active_table.depart_thead())
if self.active_table.need_recurse():
node.walkabout(self)
self._thead_depth -= 1
def visit_title(self, node):
"""Append section and other titles."""
# Document title
if node.parent.tagname == 'document':
self.push_output_collector(self.title)
self.context.append('')
self.pdfinfo.append(' pdftitle={%s},' %
self.encode(node.astext()))
# Topic titles (topic, admonition, sidebar)
elif (isinstance(node.parent, nodes.topic) or
isinstance(node.parent, nodes.admonition) or
isinstance(node.parent, nodes.sidebar)):
self.fallbacks['title'] = PreambleCmds.title
classes = ','.join(node.parent['classes'])
if not classes:
classes = node.tagname
self.out.append('\\DUtitle[%s]{' % classes)
self.context.append('}\n')
# Table caption
elif isinstance(node.parent, nodes.table):
self.push_output_collector(self.active_table.caption)
self.context.append('')
# Section title
else:
if hasattr(PreambleCmds, 'secnumdepth'):
self.requirements['secnumdepth'] = PreambleCmds.secnumdepth
section_name = self.d_class.section(self.section_level)
self.out.append('\n\n')
# System messages heading in red:
if ('system-messages' in node.parent['classes']):
self.requirements['color'] = PreambleCmds.color
section_title = self.encode(node.astext())
self.out.append(r'\%s[%s]{\color{red}' % (
section_name,section_title))
else:
self.out.append(r'\%s{' % section_name)
if self.section_level > len(self.d_class.sections):
# section level not supported by LaTeX
self.fallbacks['title'] = PreambleCmds.title
# self.out.append('\\phantomsection%\n ')
# label and ToC entry:
bookmark = ['']
# add sections with unsupported level to toc and pdfbookmarks?
## if self.section_level > len(self.d_class.sections):
## section_title = self.encode(node.astext())
## bookmark.append(r'\addcontentsline{toc}{%s}{%s}' %
## (section_name, section_title))
bookmark += self.ids_to_labels(node.parent, set_anchor=False)
self.context.append('%\n '.join(bookmark) + '%\n}\n')
# MAYBE postfix paragraph and subparagraph with \leavemode to
# ensure floats stay in the section and text starts on a new line.
def depart_title(self, node):
self.out.append(self.context.pop())
if (isinstance(node.parent, nodes.table) or
node.parent.tagname == 'document'):
self.pop_output_collector()
def minitoc(self, node, title, depth):
"""Generate a local table of contents with LaTeX package minitoc"""
section_name = self.d_class.section(self.section_level)
# name-prefix for current section level
minitoc_names = {'part': 'part', 'chapter': 'mini'}
if 'chapter' not in self.d_class.sections:
minitoc_names['section'] = 'sect'
try:
minitoc_name = minitoc_names[section_name]
except KeyError: # minitoc only supports part- and toplevel
self.warn('Skipping local ToC at %s level.\n' % section_name +
' Feature not supported with option "use-latex-toc"',
base_node=node)
return
# Requirements/Setup
self.requirements['minitoc'] = PreambleCmds.minitoc
self.requirements['minitoc-'+minitoc_name] = (r'\do%stoc' %
minitoc_name)
# depth: (Docutils defaults to unlimited depth)
maxdepth = len(self.d_class.sections)
self.requirements['minitoc-%s-depth' % minitoc_name] = (
r'\mtcsetdepth{%stoc}{%d}' % (minitoc_name, maxdepth))
# Process 'depth' argument (!Docutils stores a relative depth while
# minitoc expects an absolute depth!):
offset = {'sect': 1, 'mini': 0, 'part': 0}
if 'chapter' in self.d_class.sections:
offset['part'] = -1
if depth:
self.out.append('\\setcounter{%stocdepth}{%d}' %
(minitoc_name, depth + offset[minitoc_name]))
# title:
self.out.append('\\mtcsettitle{%stoc}{%s}\n' % (minitoc_name, title))
# the toc-generating command:
self.out.append('\\%stoc\n' % minitoc_name)
def visit_topic(self, node):
# Topic nodes can be generic topic, abstract, dedication, or ToC.
# table of contents:
if 'contents' in node['classes']:
self.out.append('\n')
self.out += self.ids_to_labels(node)
# add contents to PDF bookmarks sidebar
if isinstance(node.next_node(), nodes.title):
self.out.append('\n\\pdfbookmark[%d]{%s}{%s}\n' %
(self.section_level+1,
node.next_node().astext(),
node.get('ids', ['contents'])[0]
))
if self.use_latex_toc:
title = ''
if isinstance(node.next_node(), nodes.title):
title = self.encode(node.pop(0).astext())
depth = node.get('depth', 0)
if 'local' in node['classes']:
self.minitoc(node, title, depth)
self.context.append('')
return
if depth:
self.out.append('\\setcounter{tocdepth}{%d}\n' % depth)
if title != 'Contents':
self.out.append('\\renewcommand{\\contentsname}{%s}\n' %
title)
self.out.append('\\tableofcontents\n\n')
self.has_latex_toc = True
else: # Docutils generated contents list
# set flag for visit_bullet_list() and visit_title()
self.is_toc_list = True
self.context.append('')
elif ('abstract' in node['classes'] and
self.settings.use_latex_abstract):
self.push_output_collector(self.abstract)
self.out.append('\\begin{abstract}')
self.context.append('\\end{abstract}\n')
if isinstance(node.next_node(), nodes.title):
node.pop(0) # LaTeX provides its own title
else:
self.fallbacks['topic'] = PreambleCmds.topic
# special topics:
if 'abstract' in node['classes']:
self.fallbacks['abstract'] = PreambleCmds.abstract
self.push_output_collector(self.abstract)
if 'dedication' in node['classes']:
self.fallbacks['dedication'] = PreambleCmds.dedication
self.push_output_collector(self.dedication)
self.out.append('\n\\DUtopic[%s]{\n' % ','.join(node['classes']))
self.context.append('}\n')
def depart_topic(self, node):
self.out.append(self.context.pop())
self.is_toc_list = False
if ('abstract' in node['classes'] or
'dedication' in node['classes']):
self.pop_output_collector()
def visit_rubric(self, node):
self.fallbacks['rubric'] = PreambleCmds.rubric
self.out.append('\n\\DUrubric{')
self.context.append('}\n')
def depart_rubric(self, node):
self.out.append(self.context.pop())
def visit_transition(self, node):
self.fallbacks['transition'] = PreambleCmds.transition
self.out.append('\n\n')
self.out.append('%' + '_' * 75 + '\n')
self.out.append(r'\DUtransition')
self.out.append('\n\n')
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s' %
node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
|
axbaretto/beam
|
sdks/python/.tox/docs/lib/python2.7/site-packages/docutils/writers/latex2e/__init__.py
|
Python
|
apache-2.0
| 124,941
|
[
"VisIt"
] |
0f81e1b18f62ff70571352f174ae0e7dc4085a110c621ad1808e6253022c5bc7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.