text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#!python
# coding=utf-8
import os
import re
import itertools
from datetime import datetime
from .utils import all_subclasses, is_url
from .dataset import EnhancedDataset
from . import logger
class CFDataset(EnhancedDataset):
default_fill_value = -9999.9
default_time_unit = 'seconds since 1990-01-01 00:00:00Z'
@classmethod
def load(cls, path):
"""Attempt to load a netCDF file as a CF compatible dataset
Extended description of function.
Parameters
----------
path :
Path to netCDF file
Returns
-------
CFDataset subclass for your netCDF file
Raises
------
ValueError:
If no suitable class is found for your dataset
"""
if not is_url(path):
path = os.path.realpath(path)
subs = list(all_subclasses(cls))
dsg = None
try:
dsg = cls(path)
for klass in subs:
logger.debug('Trying {}...'.format(klass.__name__))
if hasattr(klass, 'is_mine'):
if klass.is_mine(dsg):
return klass(path)
except OSError:
raise
finally:
if hasattr(dsg, 'close'):
dsg.close()
subnames = ', '.join([ s.__name__ for s in subs ])
raise ValueError(
'Could not open {} as any type of CF Dataset. Tried: {}.'.format(
path,
subnames
)
)
def axes(self, name):
return getattr(self, '{}_axes'.format(name.lower()))()
def t_axes(self):
# If there is only one variable with the axis parameter, return it
hasaxis = self.filter_by_attrs(axis=lambda x: x and str(x).lower() == 't')
if len(hasaxis) == 1:
return hasaxis
tvars = list(set(itertools.chain(
hasaxis,
self.filter_by_attrs(standard_name=lambda x: x in ['time', 'forecast_reference_time'])
)))
return tvars
def x_axes(self):
"""
CF X axis will have one of the following:
* The `axis` property has the value ``'X'``
* Units of longitude (see `cf.Units.islongitude` for details)
* The `standard_name` property is one of ``'longitude'``,
``'projection_x_coordinate'`` or ``'grid_longitude'``
"""
xnames = ['longitude', 'grid_longitude', 'projection_x_coordinate']
xunits = [
'degrees_east',
'degree_east',
'degree_E',
'degrees_E',
'degreeE',
'degreesE'
]
# If there is only one variable with the axis parameter, return it
hasaxis = self.filter_by_attrs(axis=lambda x: x and str(x).lower() == 'x')
if len(hasaxis) == 1:
return hasaxis
xvars = list(set(itertools.chain(
hasaxis,
self.filter_by_attrs(standard_name=lambda x: x and str(x).lower() in xnames),
self.filter_by_attrs(units=lambda x: x and str(x).lower() in xunits)
)))
return xvars
def y_axes(self):
ynames = ['latitude', 'grid_latitude', 'projection_y_coordinate']
yunits = [
'degrees_north',
'degree_north',
'degree_N',
'degrees_N',
'degreeN',
'degreesN'
]
# If there is only one variable with the axis parameter, return it
hasaxis = self.filter_by_attrs(axis=lambda x: x and str(x).lower() == 'y')
if len(hasaxis) == 1:
return hasaxis
yvars = list(set(itertools.chain(
hasaxis,
self.filter_by_attrs(standard_name=lambda x: x and str(x).lower() in ynames),
self.filter_by_attrs(units=lambda x: x and str(x).lower() in yunits)
)))
return yvars
def z_axes(self):
znames = [
'atmosphere_ln_pressure_coordinate',
'atmosphere_sigma_coordinate',
'atmosphere_hybrid_sigma_pressure_coordinate',
'atmosphere_hybrid_height_coordinate',
'atmosphere_sleve_coordinate',
'ocean_sigma_coordinate',
'ocean_s_coordinate',
'ocean_s_coordinate_g1',
'ocean_s_coordinate_g2',
'ocean_sigma_z_coordinate',
'ocean_double_sigma_coordinate'
]
# If there is only one variable with the axis parameter, return it
hasaxis = self.filter_by_attrs(axis=lambda x: x and str(x).lower() == 'z')
if len(hasaxis) == 1:
return hasaxis
zvars = list(set(itertools.chain(
hasaxis,
self.filter_by_attrs(positive=lambda x: x and str(x).lower() in ['up', 'down']),
self.filter_by_attrs(standard_name=lambda x: x and str(x).lower() in znames)
)))
return zvars
def is_valid(self, *args, **kwargs):
return self.__class__.is_mine(self, *args, **kwargs)
def data_vars(self):
return self.filter_by_attrs(
coordinates=lambda x: x is not None,
units=lambda x: x is not None,
standard_name=lambda x: x is not None,
flag_values=lambda x: x is None,
flag_masks=lambda x: x is None,
flag_meanings=lambda x: x is None
)
def ancillary_vars(self):
ancillary_variables = []
for rv in self.filter_by_attrs(
ancillary_variables=lambda x: x is not None
):
# Space separated ancillary variables
for av in rv.ancillary_variables.split(' '):
if av in self.variables:
ancillary_variables.append(self.variables[av])
return list(set(ancillary_variables))
def nc_attributes(self):
return {
'global' : {
'Conventions': 'CF-1.6',
'date_created': datetime.utcnow().strftime("%Y-%m-%dT%H:%M:00Z"),
}
}
def cf_safe_name(name):
if isinstance(name, str):
if re.match('^[0-9_]', name):
# Add a letter to the front
name = "v_{}".format(name)
return re.sub(r'[^_a-zA-Z0-9]', "_", name)
raise ValueError('Could not convert "{}" to a safe name'.format(name))
| pyoceans/pocean-core | pocean/cf.py | Python | mit | 6,355 | [
"NetCDF"
] | 1dcb04e1b7d7523e4717ac729bbb3a0768253c87f2fc8083483334dba39920b3 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008-2009 Gary Burton
# Copyright (C) 2008 Robert Cheramy <robert@cheramy.net>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2012 Doug Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Export to GEDCOM"
#-------------------------------------------------------------------------
#
# Standard Python Modules
#
#-------------------------------------------------------------------------
import os
import time
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.lib import (AttributeType, ChildRefType, Citation, Date,
EventRoleType, EventType, LdsOrd, NameType,
PlaceType, NoteType, Person, UrlType)
from gramps.version import VERSION
import gramps.plugins.lib.libgedcom as libgedcom
from gramps.gen.errors import DatabaseError
# keep the following line even though not obviously used (works on import)
from gramps.gui.plug.export import WriterOptionBox
from gramps.gen.updatecallback import UpdateCallback
from gramps.gen.utils.file import media_path_full
from gramps.gen.utils.place import conv_lat_lon
from gramps.gen.utils.location import get_main_location
from gramps.gen.display.place import displayer as _pd
#-------------------------------------------------------------------------
#
# GEDCOM tags representing attributes that may take a parameter, value or
# description on the same line as the tag
#
#-------------------------------------------------------------------------
NEEDS_PARAMETER = set(
["CAST", "DSCR", "EDUC", "IDNO", "NATI", "NCHI",
"NMR", "OCCU", "PROP", "RELI", "SSN", "TITL"])
LDS_ORD_NAME = {
LdsOrd.BAPTISM : 'BAPL',
LdsOrd.ENDOWMENT : 'ENDL',
LdsOrd.SEAL_TO_PARENTS : 'SLGC',
LdsOrd.SEAL_TO_SPOUSE : 'SLGS',
LdsOrd.CONFIRMATION : 'CONL',
}
LDS_STATUS = {
LdsOrd.STATUS_BIC : "BIC",
LdsOrd.STATUS_CANCELED : "CANCELED",
LdsOrd.STATUS_CHILD : "CHILD",
LdsOrd.STATUS_CLEARED : "CLEARED",
LdsOrd.STATUS_COMPLETED : "COMPLETED",
LdsOrd.STATUS_DNS : "DNS",
LdsOrd.STATUS_INFANT : "INFANT",
LdsOrd.STATUS_PRE_1970 : "PRE-1970",
LdsOrd.STATUS_QUALIFIED : "QUALIFIED",
LdsOrd.STATUS_DNS_CAN : "DNS/CAN",
LdsOrd.STATUS_STILLBORN : "STILLBORN",
LdsOrd.STATUS_SUBMITTED : "SUBMITTED",
LdsOrd.STATUS_UNCLEARED : "UNCLEARED",
}
LANGUAGES = {
'cs' : 'Czech', 'da' : 'Danish', 'nl' : 'Dutch', 'en' : 'English',
'eo' : 'Esperanto', 'fi' : 'Finnish', 'fr' : 'French', 'de' : 'German',
'hu' : 'Hungarian', 'it' : 'Italian', 'lt' : 'Latvian',
'lv' : 'Lithuanian', 'no' : 'Norwegian', 'po' : 'Polish',
'pt' : 'Portuguese', 'ro' : 'Romanian', 'sk' : 'Slovak',
'es' : 'Spanish', 'sv' : 'Swedish', 'ru' : 'Russian', }
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
QUALITY_MAP = {
Citation.CONF_VERY_HIGH : "3",
Citation.CONF_HIGH : "2",
Citation.CONF_LOW : "1",
Citation.CONF_VERY_LOW : "0",
}
PEDIGREE_TYPES = {
ChildRefType.BIRTH : 'birth',
ChildRefType.STEPCHILD: 'Step',
ChildRefType.ADOPTED : 'Adopted',
ChildRefType.FOSTER : 'Foster',
}
NOTES_PER_PERSON = 104 # fudge factor to make progress meter a bit smoother
#-------------------------------------------------------------------------
#
# sort_handles_by_id
#
#-------------------------------------------------------------------------
def sort_handles_by_id(handle_list, handle_to_object):
"""
Sort a list of handles by the Gramps ID.
The function that returns the object from the handle needs to be supplied
so that we get the right object.
"""
sorted_list = []
for handle in handle_list:
obj = handle_to_object(handle)
if obj:
data = (obj.get_gramps_id(), handle)
sorted_list.append(data)
sorted_list.sort()
return sorted_list
#-------------------------------------------------------------------------
#
# breakup
#
#-------------------------------------------------------------------------
def breakup(txt, limit):
"""
Break a line of text into a list of strings that conform to the
maximum length specified, while breaking words in the middle of a word
to avoid issues with spaces.
"""
if limit < 1:
raise ValueError("breakup: unexpected limit: %r" % limit)
data = []
while len(txt) > limit:
# look for non-space pair to break between
# do not break within a UTF-8 byte sequence, i. e. first char >127
idx = limit
while (idx > 0 and (txt[idx - 1].isspace() or txt[idx].isspace() or
ord(txt[idx - 1]) > 127)):
idx -= 1
if idx == 0:
#no words to break on, just break at limit anyway
idx = limit
data.append(txt[:idx])
txt = txt[idx:]
if len(txt) > 0:
data.append(txt)
return data
#-------------------------------------------------------------------------
#
# event_has_subordinate_data
# may want to compare description w/ auto-generated one, and
# if so, treat it same as if it were empty for this purpose
#
#-------------------------------------------------------------------------
def event_has_subordinate_data(event, event_ref):
""" determine if event is empty or not """
if event and event_ref:
return (event.get_description().strip() or
not event.get_date_object().is_empty() or
event.get_place_handle() or
event.get_attribute_list() or
event_ref.get_attribute_list() or
event.get_note_list() or
event.get_citation_list() or
event.get_media_list())
else:
return False
#-------------------------------------------------------------------------
#
# GedcomWriter class
#
#-------------------------------------------------------------------------
class GedcomWriter(UpdateCallback):
"""
The GEDCOM writer creates a GEDCOM file that contains the exported
information from the database. It derives from UpdateCallback
so that it can provide visual feedback via a progress bar if needed.
"""
def __init__(self, database, user, option_box=None):
UpdateCallback.__init__(self, user.callback)
self.dbase = database
self.dirname = None
self.gedcom_file = None
self.progress_cnt = 0
self.setup(option_box)
def setup(self, option_box):
"""
If the option_box is present (GUI interface), then we check the
"private", "restrict", and "cfilter" arguments to see if we need
to apply proxy databases.
"""
if option_box:
option_box.parse_options()
self.dbase = option_box.get_filtered_database(self.dbase, self)
def write_gedcom_file(self, filename):
"""
Write the actual GEDCOM file to the specified filename.
"""
self.dirname = os.path.dirname(filename)
with open(filename, "w", encoding='utf-8') as self.gedcom_file:
person_len = self.dbase.get_number_of_people()
family_len = self.dbase.get_number_of_families()
source_len = self.dbase.get_number_of_sources()
repo_len = self.dbase.get_number_of_repositories()
note_len = self.dbase.get_number_of_notes() / NOTES_PER_PERSON
total_steps = (person_len + family_len + source_len + repo_len +
note_len)
self.set_total(total_steps)
self._header(filename)
self._submitter()
self._individuals()
self._families()
self._sources()
self._repos()
self._notes()
self._all_media()
self._writeln(0, "TRLR")
return True
def _writeln(self, level, token, textlines="", limit=72):
"""
Write a line of text to the output file in the form of:
LEVEL TOKEN text
If the line contains newlines, it is broken into multiple lines using
the CONT token. If any line is greater than the limit, it will broken
into multiple lines using CONC.
"""
assert token
if textlines:
# break the line into multiple lines if a newline is found
textlines = textlines.replace('\n\r', '\n')
textlines = textlines.replace('\r', '\n')
# Need to double '@' See Gedcom 5.5 spec 'any_char'
# but avoid xrefs and escapes
if not textlines.startswith('@') and '@#' not in textlines:
textlines = textlines.replace('@', '@@')
textlist = textlines.split('\n')
token_level = level
for text in textlist:
# make it unicode so that breakup below does the right thin.
text = str(text)
if limit:
prefix = "\n%d CONC " % (level + 1)
txt = prefix.join(breakup(text, limit))
else:
txt = text
self.gedcom_file.write("%d %s %s\n" %
(token_level, token, txt))
token_level = level + 1
token = "CONT"
else:
self.gedcom_file.write("%d %s\n" % (level, token))
def _header(self, filename):
"""
Write the GEDCOM header.
HEADER:=
n HEAD {1:1}
+1 SOUR <APPROVED_SYSTEM_ID> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+2 NAME <NAME_OF_PRODUCT> {0:1}
+2 CORP <NAME_OF_BUSINESS> {0:1} # Not used
+3 <<ADDRESS_STRUCTURE>> {0:1} # Not used
+2 DATA <NAME_OF_SOURCE_DATA> {0:1} # Not used
+3 DATE <PUBLICATION_DATE> {0:1} # Not used
+3 COPR <COPYRIGHT_SOURCE_DATA> {0:1} # Not used
+1 DEST <RECEIVING_SYSTEM_NAME> {0:1*} # Not used
+1 DATE <TRANSMISSION_DATE> {0:1}
+2 TIME <TIME_VALUE> {0:1}
+1 SUBM @XREF:SUBM@ {1:1}
+1 SUBN @XREF:SUBN@ {0:1}
+1 FILE <FILE_NAME> {0:1}
+1 COPR <COPYRIGHT_GEDCOM_FILE> {0:1}
+1 GEDC {1:1}
+2 VERS <VERSION_NUMBER> {1:1}
+2 FORM <GEDCOM_FORM> {1:1}
+1 CHAR <CHARACTER_SET> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+1 LANG <LANGUAGE_OF_TEXT> {0:1}
+1 PLAC {0:1}
+2 FORM <PLACE_HIERARCHY> {1:1}
+1 NOTE <GEDCOM_CONTENT_DESCRIPTION> {0:1}
+2 [CONT|CONC] <GEDCOM_CONTENT_DESCRIPTION> {0:M}
"""
local_time = time.localtime(time.time())
(year, mon, day, hour, minutes, sec) = local_time[0:6]
date_str = "%d %s %d" % (day, libgedcom.MONTH[mon], year)
time_str = "%02d:%02d:%02d" % (hour, minutes, sec)
rname = self.dbase.get_researcher().get_name()
self._writeln(0, "HEAD")
self._writeln(1, "SOUR", "Gramps")
self._writeln(2, "VERS", VERSION)
self._writeln(2, "NAME", "Gramps")
self._writeln(1, "DATE", date_str)
self._writeln(2, "TIME", time_str)
self._writeln(1, "SUBM", "@SUBM@")
self._writeln(1, "FILE", filename, limit=255)
self._writeln(1, "COPR", 'Copyright (c) %d %s.' % (year, rname))
self._writeln(1, "GEDC")
self._writeln(2, "VERS", "5.5.1")
self._writeln(2, "FORM", 'LINEAGE-LINKED')
self._writeln(1, "CHAR", "UTF-8")
# write the language string if the current LANG variable
# matches something we know about.
lang = glocale.language[0]
if lang and len(lang) >= 2:
lang_code = LANGUAGES.get(lang[0:2])
if lang_code:
self._writeln(1, 'LANG', lang_code)
def _submitter(self):
"""
n @<XREF:SUBM>@ SUBM {1:1}
+1 NAME <SUBMITTER_NAME> {1:1}
+1 <<ADDRESS_STRUCTURE>> {0:1}
+1 <<MULTIMEDIA_LINK>> {0:M} # not used
+1 LANG <LANGUAGE_PREFERENCE> {0:3} # not used
+1 RFN <SUBMITTER_REGISTERED_RFN> {0:1} # not used
+1 RIN <AUTOMATED_RECORD_ID> {0:1} # not used
+1 <<CHANGE_DATE>> {0:1} # not used
"""
owner = self.dbase.get_researcher()
name = owner.get_name()
phon = owner.get_phone()
mail = owner.get_email()
self._writeln(0, "@SUBM@", "SUBM")
self._writeln(1, "NAME", name)
# Researcher is a sub-type of LocationBase, so get_city etc. which are
# used in __write_addr work fine. However, the database owner street is
# stored in address, so we need to temporarily copy it into street so
# __write_addr works properly
owner.set_street(owner.get_address())
self.__write_addr(1, owner)
if phon:
self._writeln(1, "PHON", phon)
if mail:
self._writeln(1, "EMAIL", mail)
def _individuals(self):
"""
Write the individual people to the gedcom file.
Since people like to have the list sorted by ID value, we need to go
through a sorting step. We need to reset the progress bar, otherwise,
people will be confused when the progress bar is idle.
"""
self.set_text(_("Writing individuals"))
phandles = self.dbase.iter_person_handles()
sorted_list = []
for handle in phandles:
person = self.dbase.get_person_from_handle(handle)
if person:
data = (person.get_gramps_id(), handle)
sorted_list.append(data)
sorted_list.sort()
for data in sorted_list:
self.update()
self._person(self.dbase.get_person_from_handle(data[1]))
def _person(self, person):
"""
Write out a single person.
n @XREF:INDI@ INDI {1:1}
+1 RESN <RESTRICTION_NOTICE> {0:1} # not used
+1 <<PERSONAL_NAME_STRUCTURE>> {0:M}
+1 SEX <SEX_VALUE> {0:1}
+1 <<INDIVIDUAL_EVENT_STRUCTURE>> {0:M}
+1 <<INDIVIDUAL_ATTRIBUTE_STRUCTURE>> {0:M}
+1 <<LDS_INDIVIDUAL_ORDINANCE>> {0:M}
+1 <<CHILD_TO_FAMILY_LINK>> {0:M}
+1 <<SPOUSE_TO_FAMILY_LINK>> {0:M}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<ASSOCIATION_STRUCTURE>> {0:M}
+1 ALIA @<XREF:INDI>@ {0:M}
+1 ANCI @<XREF:SUBM>@ {0:M}
+1 DESI @<XREF:SUBM>@ {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<MULTIMEDIA_LINK>> {0:M} ,*
+1 <<NOTE_STRUCTURE>> {0:M}
+1 RFN <PERMANENT_RECORD_FILE_NUMBER> {0:1}
+1 AFN <ANCESTRAL_FILE_NUMBER> {0:1}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
if person is None:
return
self._writeln(0, "@%s@" % person.get_gramps_id(), "INDI")
self._names(person)
self._gender(person)
self._person_event_ref('BIRT', person.get_birth_ref())
self._person_event_ref('DEAT', person.get_death_ref())
self._remaining_events(person)
self._attributes(person)
self._lds_ords(person, 1)
self._child_families(person)
self._parent_families(person)
self._assoc(person, 1)
self._person_sources(person)
self._addresses(person)
self._photos(person.get_media_list(), 1)
self._url_list(person, 1)
self._note_references(person.get_note_list(), 1)
self._change(person.get_change_time(), 1)
def _assoc(self, person, level):
"""
n ASSO @<XREF:INDI>@ {0:M}
+1 RELA <RELATION_IS_DESCRIPTOR> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
"""
for ref in person.get_person_ref_list():
person = self.dbase.get_person_from_handle(ref.ref)
if person:
self._writeln(level, "ASSO", "@%s@" % person.get_gramps_id())
self._writeln(level + 1, "RELA", ref.get_relation())
self._note_references(ref.get_note_list(), level + 1)
self._source_references(ref.get_citation_list(), level + 1)
def _note_references(self, notelist, level):
"""
Write out the list of note handles to the current level.
We use the Gramps ID as the XREF for the GEDCOM file.
"""
for note_handle in notelist:
note = self.dbase.get_note_from_handle(note_handle)
if note:
self._writeln(level, 'NOTE', '@%s@' % note.get_gramps_id())
def _names(self, person):
"""
Write the names associated with the person to the current level.
Since nicknames in version < 3.3 are separate from the name structure,
we search the attribute list to see if we can find a nickname.
Because we do not know the mappings, we just take the first nickname
we find, and add it to the primary name.
If a nickname is present in the name structure, it has precedence
"""
nicknames = [attr.get_value() for attr in person.get_attribute_list()
if int(attr.get_type()) == AttributeType.NICKNAME]
if len(nicknames) > 0:
nickname = nicknames[0]
else:
nickname = ""
self._person_name(person.get_primary_name(), nickname)
for name in person.get_alternate_names():
self._person_name(name, "")
def _gender(self, person):
"""
Write out the gender of the person to the file.
If the gender is not male or female, simply do not output anything.
The only valid values are M (male) or F (female). So if the geneder is
unknown, we output nothing.
"""
if person.get_gender() == Person.MALE:
self._writeln(1, "SEX", "M")
elif person.get_gender() == Person.FEMALE:
self._writeln(1, "SEX", "F")
def _lds_ords(self, obj, level):
"""
Simply loop through the list of LDS ordinances, and call the function
that writes the LDS ordinance structure.
"""
for lds_ord in obj.get_lds_ord_list():
self.write_ord(lds_ord, level)
def _remaining_events(self, person):
"""
Output all events associated with the person that are not BIRTH or
DEATH events.
Because all we have are event references, we have to
extract the real event to discover the event type.
"""
global adop_written
# adop_written is only shared between this function and
# _process_person_event. This is rather ugly code, but it is difficult
# to support an Adoption event without an Adopted relationship from the
# parent(s), an Adopted relationship from the parent(s) without an
# event, and both an event and a relationship. All these need to be
# supported without duplicating the output of the ADOP GEDCOM tag. See
# bug report 2370.
adop_written = False
for event_ref in person.get_event_ref_list():
event = self.dbase.get_event_from_handle(event_ref.ref)
if not event:
continue
self._process_person_event(person, event, event_ref)
if not adop_written:
self._adoption_records(person, adop_written)
def _process_person_event(self, person, event, event_ref):
"""
Process a person event, which is not a BIRTH or DEATH event.
"""
global adop_written
etype = int(event.get_type())
# if the event is a birth or death, skip it.
if etype in (EventType.BIRTH, EventType.DEATH):
return
role = int(event_ref.get_role())
# if the event role is not primary, skip the event.
if role != EventRoleType.PRIMARY:
return
val = libgedcom.PERSONALCONSTANTEVENTS.get(etype, "").strip()
if val and val.strip():
if val in NEEDS_PARAMETER:
if event.get_description().strip():
self._writeln(1, val, event.get_description())
else:
self._writeln(1, val)
else:
if event_has_subordinate_data(event, event_ref):
self._writeln(1, val)
else:
self._writeln(1, val, 'Y')
if event.get_description().strip():
self._writeln(2, 'TYPE', event.get_description())
else:
descr = event.get_description()
if descr:
self._writeln(1, 'EVEN', descr)
else:
self._writeln(1, 'EVEN')
if val.strip():
self._writeln(2, 'TYPE', val)
else:
self._writeln(2, 'TYPE', event.get_type().xml_str())
self._dump_event_stats(event, event_ref)
if etype == EventType.ADOPT and not adop_written:
adop_written = True
self._adoption_records(person, adop_written)
def _adoption_records(self, person, adop_written):
"""
Write Adoption events for each child that has been adopted.
n ADOP
+1 <<INDIVIDUAL_EVENT_DETAIL>>
+1 FAMC @<XREF:FAM>@
+2 ADOP <ADOPTED_BY_WHICH_PARENT>
"""
adoptions = []
for family in [self.dbase.get_family_from_handle(fh)
for fh in person.get_parent_family_handle_list()]:
if family is None:
continue
for child_ref in [ref for ref in family.get_child_ref_list()
if ref.ref == person.handle]:
if child_ref.mrel == ChildRefType.ADOPTED \
or child_ref.frel == ChildRefType.ADOPTED:
adoptions.append((family, child_ref.frel, child_ref.mrel))
for (fam, frel, mrel) in adoptions:
if not adop_written:
self._writeln(1, 'ADOP', 'Y')
self._writeln(2, 'FAMC', '@%s@' % fam.get_gramps_id())
if mrel == frel:
self._writeln(3, 'ADOP', 'BOTH')
elif mrel == ChildRefType.ADOPTED:
self._writeln(3, 'ADOP', 'WIFE')
else:
self._writeln(3, 'ADOP', 'HUSB')
def _attributes(self, person):
"""
Write out the attributes to the GEDCOM file.
Since we have already looked at nicknames when we generated the names,
we filter them out here.
We use the GEDCOM 5.5.1 FACT command to write out attributes not
built in to GEDCOM.
"""
# filter out the nicknames
attr_list = [attr for attr in person.get_attribute_list()
if attr.get_type() != AttributeType.NICKNAME]
for attr in attr_list:
attr_type = int(attr.get_type())
name = libgedcom.PERSONALCONSTANTATTRIBUTES.get(attr_type)
key = attr.get_type().xml_str()
value = attr.get_value().strip().replace('\r', ' ')
if key in ("AFN", "RFN", "REFN", "_UID", "_FSFTID"):
self._writeln(1, key, value)
continue
if key == "RESN":
self._writeln(1, 'RESN')
continue
if name and name.strip():
self._writeln(1, name, value)
elif value:
self._writeln(1, 'FACT', value)
self._writeln(2, 'TYPE', key)
else:
continue
self._note_references(attr.get_note_list(), 2)
self._source_references(attr.get_citation_list(), 2)
def _source_references(self, citation_list, level):
"""
Loop through the list of citation handles, writing the information
to the file.
"""
for citation_handle in citation_list:
self._source_ref_record(level, citation_handle)
def _addresses(self, person):
"""
Write out the addresses associated with the person as RESI events.
"""
for addr in person.get_address_list():
self._writeln(1, 'RESI')
self._date(2, addr.get_date_object())
self.__write_addr(2, addr)
if addr.get_phone():
self._writeln(2, 'PHON', addr.get_phone())
self._note_references(addr.get_note_list(), 2)
self._source_references(addr.get_citation_list(), 2)
def _photos(self, media_list, level):
"""
Loop through the list of media objects, writing the information
to the file.
"""
for photo in media_list:
self._photo(photo, level)
def _child_families(self, person):
"""
Write the Gramps ID as the XREF for each family in which the person
is listed as a child.
"""
# get the list of familes from the handle list
family_list = [self.dbase.get_family_from_handle(hndl)
for hndl in person.get_parent_family_handle_list()]
for family in family_list:
if family:
self._writeln(1, 'FAMC', '@%s@' % family.get_gramps_id())
for child in family.get_child_ref_list():
if child.get_reference_handle() == person.get_handle():
if child.frel == ChildRefType.ADOPTED and \
child.mrel == ChildRefType.ADOPTED:
self._writeln(2, 'PEDI adopted')
elif child.frel == ChildRefType.BIRTH and \
child.mrel == ChildRefType.BIRTH:
self._writeln(2, 'PEDI birth')
elif child.frel == ChildRefType.STEPCHILD and \
child.mrel == ChildRefType.STEPCHILD:
self._writeln(2, 'PEDI stepchild')
elif child.frel == ChildRefType.FOSTER and \
child.mrel == ChildRefType.FOSTER:
self._writeln(2, 'PEDI foster')
elif child.frel == child.mrel:
self._writeln(2, 'PEDI %s' % child.frel.xml_str())
else:
self._writeln(
2, '_FREL %s' % PEDIGREE_TYPES.get(
child.frel.value, child.frel.xml_str()))
self._writeln(
2, '_MREL %s' % PEDIGREE_TYPES.get(
child.mrel.value, child.mrel.xml_str()))
def _parent_families(self, person):
"""
Write the Gramps ID as the XREF for each family in which the person
is listed as a parent.
"""
# get the list of familes from the handle list
family_list = [self.dbase.get_family_from_handle(hndl)
for hndl in person.get_family_handle_list()]
for family in family_list:
if family:
self._writeln(1, 'FAMS', '@%s@' % family.get_gramps_id())
def _person_sources(self, person):
"""
Loop through the list of citations, writing the information
to the file.
"""
for citation_handle in person.get_citation_list():
self._source_ref_record(1, citation_handle)
def _url_list(self, obj, level):
"""
For Person's FAX, PHON, EMAIL, WWW lines;
n PHON <PHONE_NUMBER> {0:3}
n EMAIL <ADDRESS_EMAIL> {0:3}
n FAX <ADDRESS_FAX> {0:3}
n WWW <ADDRESS_WEB_PAGE> {0:3}
n OBJE {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
for url in obj.get_url_list():
if url.get_type() == UrlType.EMAIL:
self._writeln(level, 'EMAIL', url.get_path())
elif url.get_type() == UrlType.WEB_HOME:
self._writeln(level, 'WWW', url.get_path())
elif url.get_type() == _('Phone'):
self._writeln(level, 'PHON', url.get_path())
elif url.get_type() == _('FAX'):
self._writeln(level, 'FAX', url.get_path())
else:
self._writeln(level, 'OBJE')
self._writeln(level + 1, 'FORM', 'URL')
if url.get_description():
self._writeln(level + 1, 'TITL', url.get_description())
if url.get_path():
self._writeln(level + 1, 'FILE', url.get_path(), limit=255)
def _families(self):
"""
Write out the list of families, sorting by Gramps ID.
"""
self.set_text(_("Writing families"))
# generate a list of (GRAMPS_ID, HANDLE) pairs. This list
# can then be sorted by the sort routine, which will use the
# first value of the tuple as the sort key.
sorted_list = sort_handles_by_id(self.dbase.get_family_handles(),
self.dbase.get_family_from_handle)
# loop through the sorted list, pulling of the handle. This list
# has already been sorted by GRAMPS_ID
for family_handle in [hndl[1] for hndl in sorted_list]:
self.update()
self._family(self.dbase.get_family_from_handle(family_handle))
def _family(self, family):
"""
n @<XREF:FAM>@ FAM {1:1}
+1 RESN <RESTRICTION_NOTICE> {0:1)
+1 <<FAMILY_EVENT_STRUCTURE>> {0:M}
+1 HUSB @<XREF:INDI>@ {0:1}
+1 WIFE @<XREF:INDI>@ {0:1}
+1 CHIL @<XREF:INDI>@ {0:M}
+1 NCHI <COUNT_OF_CHILDREN> {0:1}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<LDS_SPOUSE_SEALING>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
"""
if family is None:
return
gramps_id = family.get_gramps_id()
self._writeln(0, '@%s@' % gramps_id, 'FAM')
self._family_reference('HUSB', family.get_father_handle())
self._family_reference('WIFE', family.get_mother_handle())
self._lds_ords(family, 1)
self._family_events(family)
self._family_attributes(family.get_attribute_list(), 1)
self._family_child_list(family.get_child_ref_list())
self._source_references(family.get_citation_list(), 1)
self._photos(family.get_media_list(), 1)
self._note_references(family.get_note_list(), 1)
self._change(family.get_change_time(), 1)
def _family_child_list(self, child_ref_list):
"""
Write the child XREF values to the GEDCOM file.
"""
child_list = [
self.dbase.get_person_from_handle(cref.ref).get_gramps_id()
for cref in child_ref_list]
for gid in child_list:
if gid is None:
continue
self._writeln(1, 'CHIL', '@%s@' % gid)
def _family_reference(self, token, person_handle):
"""
Write the family reference to the file.
This is either 'WIFE' or 'HUSB'. As usual, we use the Gramps ID as the
XREF value.
"""
if person_handle:
person = self.dbase.get_person_from_handle(person_handle)
if person:
self._writeln(1, token, '@%s@' % person.get_gramps_id())
def _family_events(self, family):
"""
Output the events associated with the family.
Because all we have are event references, we have to extract the real
event to discover the event type.
"""
for event_ref in family.get_event_ref_list():
event = self.dbase.get_event_from_handle(event_ref.ref)
if event is None:
continue
self._process_family_event(event, event_ref)
self._dump_event_stats(event, event_ref)
def _process_family_event(self, event, event_ref):
"""
Process a single family event.
"""
etype = int(event.get_type())
val = libgedcom.FAMILYCONSTANTEVENTS.get(etype)
if val:
if event_has_subordinate_data(event, event_ref):
self._writeln(1, val)
else:
self._writeln(1, val, 'Y')
if event.get_type() == EventType.MARRIAGE:
self._family_event_attrs(event.get_attribute_list(), 2)
if event.get_description().strip() != "":
self._writeln(2, 'TYPE', event.get_description())
else:
descr = event.get_description()
if descr:
self._writeln(1, 'EVEN', descr)
else:
self._writeln(1, 'EVEN')
the_type = event.get_type()
if the_type:
self._writeln(2, 'TYPE', the_type.xml_str())
def _family_event_attrs(self, attr_list, level):
"""
Write the attributes associated with the family event.
The only ones we really care about are FATHER_AGE and MOTHER_AGE which
we translate to WIFE/HUSB AGE attributes.
"""
for attr in attr_list:
if attr.get_type() == AttributeType.FATHER_AGE:
self._writeln(level, 'HUSB')
self._writeln(level + 1, 'AGE', attr.get_value())
elif attr.get_type() == AttributeType.MOTHER_AGE:
self._writeln(level, 'WIFE')
self._writeln(level + 1, 'AGE', attr.get_value())
def _family_attributes(self, attr_list, level):
"""
Write out the attributes associated with a family to the GEDCOM file.
Since we have already looked at nicknames when we generated the names,
we filter them out here.
We use the GEDCOM 5.5.1 FACT command to write out attributes not
built in to GEDCOM.
"""
for attr in attr_list:
attr_type = int(attr.get_type())
name = libgedcom.FAMILYCONSTANTATTRIBUTES.get(attr_type)
key = attr.get_type().xml_str()
value = attr.get_value().replace('\r', ' ')
if key in ("AFN", "RFN", "REFN", "_UID"):
self._writeln(1, key, value)
continue
if name and name.strip():
self._writeln(1, name, value)
continue
else:
self._writeln(1, 'FACT', value)
self._writeln(2, 'TYPE', key)
self._note_references(attr.get_note_list(), level + 1)
self._source_references(attr.get_citation_list(),
level + 1)
def _sources(self):
"""
Write out the list of sources, sorting by Gramps ID.
"""
self.set_text(_("Writing sources"))
sorted_list = sort_handles_by_id(self.dbase.get_source_handles(),
self.dbase.get_source_from_handle)
for (source_id, handle) in sorted_list:
self.update()
source = self.dbase.get_source_from_handle(handle)
if source is None:
continue
self._writeln(0, '@%s@' % source_id, 'SOUR')
if source.get_title():
self._writeln(1, 'TITL', source.get_title())
if source.get_author():
self._writeln(1, "AUTH", source.get_author())
if source.get_publication_info():
self._writeln(1, "PUBL", source.get_publication_info())
if source.get_abbreviation():
self._writeln(1, 'ABBR', source.get_abbreviation())
self._photos(source.get_media_list(), 1)
for reporef in source.get_reporef_list():
self._reporef(reporef, 1)
# break
self._note_references(source.get_note_list(), 1)
self._change(source.get_change_time(), 1)
def _notes(self):
"""
Write out the list of notes, sorting by Gramps ID.
"""
self.set_text(_("Writing notes"))
note_cnt = 0
sorted_list = sort_handles_by_id(self.dbase.get_note_handles(),
self.dbase.get_note_from_handle)
for note_handle in [hndl[1] for hndl in sorted_list]:
# the following makes the progress bar a bit smoother
if not note_cnt % NOTES_PER_PERSON:
self.update()
note_cnt += 1
note = self.dbase.get_note_from_handle(note_handle)
if note is None:
continue
self._note_record(note)
def _note_record(self, note):
"""
n @<XREF:NOTE>@ NOTE <SUBMITTER_TEXT> {1:1}
+1 [ CONC | CONT] <SUBMITTER_TEXT> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
if note:
self._writeln(0, '@%s@' % note.get_gramps_id(),
'NOTE ' + note.get())
def _repos(self):
"""
Write out the list of repositories, sorting by Gramps ID.
REPOSITORY_RECORD:=
n @<XREF:REPO>@ REPO {1:1}
+1 NAME <NAME_OF_REPOSITORY> {1:1}
+1 <<ADDRESS_STRUCTURE>> {0:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
self.set_text(_("Writing repositories"))
sorted_list = sort_handles_by_id(self.dbase.get_repository_handles(),
self.dbase.get_repository_from_handle)
# GEDCOM only allows for a single repository per source
for (repo_id, handle) in sorted_list:
self.update()
repo = self.dbase.get_repository_from_handle(handle)
if repo is None:
continue
self._writeln(0, '@%s@' % repo_id, 'REPO')
if repo.get_name():
self._writeln(1, 'NAME', repo.get_name())
for addr in repo.get_address_list():
self.__write_addr(1, addr)
if addr.get_phone():
self._writeln(1, 'PHON', addr.get_phone())
for url in repo.get_url_list():
if url.get_type() == UrlType.EMAIL:
self._writeln(1, 'EMAIL', url.get_path())
elif url.get_type() == UrlType.WEB_HOME:
self._writeln(1, 'WWW', url.get_path())
elif url.get_type() == _('FAX'):
self._writeln(1, 'FAX', url.get_path())
self._note_references(repo.get_note_list(), 1)
def _reporef(self, reporef, level):
"""
n REPO [ @XREF:REPO@ | <NULL>] {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 CALN <SOURCE_CALL_NUMBER> {0:M}
+2 MEDI <SOURCE_MEDIA_TYPE> {0:1}
"""
if reporef.ref is None:
return
repo = self.dbase.get_repository_from_handle(reporef.ref)
if repo is None:
return
repo_id = repo.get_gramps_id()
self._writeln(level, 'REPO', '@%s@' % repo_id)
self._note_references(reporef.get_note_list(), level + 1)
if reporef.get_call_number():
self._writeln(level + 1, 'CALN', reporef.get_call_number())
if reporef.get_media_type():
self._writeln(level + 2, 'MEDI',
reporef.get_media_type().xml_str())
def _person_event_ref(self, key, event_ref):
"""
Write out the BIRTH and DEATH events for the person.
"""
if event_ref:
event = self.dbase.get_event_from_handle(event_ref.ref)
if event_has_subordinate_data(event, event_ref):
self._writeln(1, key)
else:
self._writeln(1, key, 'Y')
if event.get_description().strip() != "":
self._writeln(2, 'TYPE', event.get_description())
self._dump_event_stats(event, event_ref)
def _change(self, timeval, level):
"""
CHANGE_DATE:=
n CHAN {1:1}
+1 DATE <CHANGE_DATE> {1:1}
+2 TIME <TIME_VALUE> {0:1}
+1 <<NOTE_STRUCTURE>> # not used
"""
self._writeln(level, 'CHAN')
time_val = time.localtime(timeval)
self._writeln(level + 1, 'DATE', '%d %s %d' % (
time_val[2], libgedcom.MONTH[time_val[1]], time_val[0]))
self._writeln(level + 2, 'TIME', '%02d:%02d:%02d' % (
time_val[3], time_val[4], time_val[5]))
def _dump_event_stats(self, event, event_ref):
"""
Write the event details for the event, using the event and event
reference information.
GEDCOM does not make a distinction between the two.
"""
dateobj = event.get_date_object()
self._date(2, dateobj)
if self._datewritten:
# write out TIME if present
times = [attr.get_value() for attr in event.get_attribute_list()
if int(attr.get_type()) == AttributeType.TIME]
# Not legal, but inserted by PhpGedView
if len(times) > 0:
self._writeln(3, 'TIME', times[0])
place = None
if event.get_place_handle():
place = self.dbase.get_place_from_handle(event.get_place_handle())
self._place(place, dateobj, 2)
for attr in event.get_attribute_list():
attr_type = attr.get_type()
if attr_type == AttributeType.CAUSE:
self._writeln(2, 'CAUS', attr.get_value())
elif attr_type == AttributeType.AGENCY:
self._writeln(2, 'AGNC', attr.get_value())
elif attr_type == _("Phone"):
self._writeln(2, 'PHON', attr.get_value())
elif attr_type == _("FAX"):
self._writeln(2, 'FAX', attr.get_value())
elif attr_type == _("EMAIL"):
self._writeln(2, 'EMAIL', attr.get_value())
elif attr_type == _("WWW"):
self._writeln(2, 'WWW', attr.get_value())
for attr in event_ref.get_attribute_list():
attr_type = attr.get_type()
if attr_type == AttributeType.AGE:
self._writeln(2, 'AGE', attr.get_value())
elif attr_type == AttributeType.FATHER_AGE:
self._writeln(2, 'HUSB')
self._writeln(3, 'AGE', attr.get_value())
elif attr_type == AttributeType.MOTHER_AGE:
self._writeln(2, 'WIFE')
self._writeln(3, 'AGE', attr.get_value())
self._note_references(event.get_note_list(), 2)
self._source_references(event.get_citation_list(), 2)
self._photos(event.get_media_list(), 2)
if place:
self._photos(place.get_media_list(), 2)
def write_ord(self, lds_ord, index):
"""
LDS_INDIVIDUAL_ORDINANCE:=
[
n [ BAPL | CONL ] {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 STAT <LDS_BAPTISM_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M} p.39
|
n ENDL {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 STAT <LDS_ENDOWMENT_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
|
n SLGC {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 FAMC @<XREF:FAM>@ {1:1}
+1 STAT <LDS_CHILD_SEALING_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
]
"""
self._writeln(index, LDS_ORD_NAME[lds_ord.get_type()])
self._date(index + 1, lds_ord.get_date_object())
if lds_ord.get_family_handle():
family_handle = lds_ord.get_family_handle()
family = self.dbase.get_family_from_handle(family_handle)
if family:
self._writeln(index + 1, 'FAMC', '@%s@' %
family.get_gramps_id())
if lds_ord.get_temple():
self._writeln(index + 1, 'TEMP', lds_ord.get_temple())
if lds_ord.get_place_handle():
place = self.dbase.get_place_from_handle(
lds_ord.get_place_handle())
self._place(place, lds_ord.get_date_object(), 2)
if lds_ord.get_status() != LdsOrd.STATUS_NONE:
self._writeln(2, 'STAT', LDS_STATUS[lds_ord.get_status()])
self._note_references(lds_ord.get_note_list(), index + 1)
self._source_references(lds_ord.get_citation_list(), index + 1)
def _date(self, level, date):
"""
Write the 'DATE' GEDCOM token, along with the date in GEDCOM's
expected format.
"""
self._datewritten = True
start = date.get_start_date()
if start != Date.EMPTY:
cal = date.get_calendar()
mod = date.get_modifier()
quality = None if mod else date.get_quality()
if mod == Date.MOD_SPAN:
val = "FROM %s TO %s" % (
libgedcom.make_gedcom_date(start, cal, mod, None),
libgedcom.make_gedcom_date(date.get_stop_date(),
cal, mod, None))
elif mod == Date.MOD_RANGE:
val = "BET %s AND %s" % (
libgedcom.make_gedcom_date(start, cal, mod, None),
libgedcom.make_gedcom_date(date.get_stop_date(),
cal, mod, None))
else:
val = libgedcom.make_gedcom_date(start, cal, mod, quality)
self._writeln(level, 'DATE', val)
elif date.get_text():
self._writeln(level, 'DATE', date.get_text())
else:
self._datewritten = False
def _person_name(self, name, attr_nick):
"""
n NAME <NAME_PERSONAL> {1:1}
+1 NPFX <NAME_PIECE_PREFIX> {0:1}
+1 GIVN <NAME_PIECE_GIVEN> {0:1}
+1 NICK <NAME_PIECE_NICKNAME> {0:1}
+1 SPFX <NAME_PIECE_SURNAME_PREFIX {0:1}
+1 SURN <NAME_PIECE_SURNAME> {0:1}
+1 NSFX <NAME_PIECE_SUFFIX> {0:1}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
gedcom_name = name.get_gedcom_name()
firstname = name.get_first_name().strip()
surns = []
surprefix = ''
for surn in name.get_surname_list():
surns.append(surn.get_surname().replace('/', '?'))
if surn.get_connector():
#we store connector with the surname
surns[-1] = surns[-1] + ' ' + surn.get_connector()
surname = ', '.join(surns)
if name.get_surname_list():
# GEDCOM only supports a single surname prefix
surn = name.get_surname_list()[0]
surprefix = surn.get_prefix().replace('/', '?')
suffix = name.get_suffix()
title = name.get_title()
nick = name.get_nick_name()
if nick.strip() == '':
nick = attr_nick
self._writeln(1, 'NAME', gedcom_name)
if int(name.get_type()) == NameType.BIRTH:
self._writeln(2, 'TYPE', 'birth')
elif int(name.get_type()) == NameType.MARRIED:
self._writeln(2, 'TYPE', 'married')
elif int(name.get_type()) == NameType.AKA:
self._writeln(2, 'TYPE', 'aka')
else:
self._writeln(2, 'TYPE', name.get_type().xml_str())
if firstname:
self._writeln(2, 'GIVN', firstname)
if surprefix:
self._writeln(2, 'SPFX', surprefix)
if surname:
self._writeln(2, 'SURN', surname)
if name.get_suffix():
self._writeln(2, 'NSFX', suffix)
if name.get_title():
self._writeln(2, 'NPFX', title)
if nick:
self._writeln(2, 'NICK', nick)
self._source_references(name.get_citation_list(), 2)
self._note_references(name.get_note_list(), 2)
def _source_ref_record(self, level, citation_handle):
"""
n SOUR @<XREF:SOUR>@ /* pointer to source record */ {1:1}
+1 PAGE <WHERE_WITHIN_SOURCE> {0:1}
+1 EVEN <EVENT_TYPE_CITED_FROM> {0:1}
+2 ROLE <ROLE_IN_EVENT> {0:1}
+1 DATA {0:1}
+2 DATE <ENTRY_RECORDING_DATE> {0:1}
+2 TEXT <TEXT_FROM_SOURCE> {0:M}
+3 [ CONC | CONT ] <TEXT_FROM_SOURCE> {0:M}
+1 QUAY <CERTAINTY_ASSESSMENT> {0:1}
+1 <<MULTIMEDIA_LINK>> {0:M} ,*
+1 <<NOTE_STRUCTURE>> {0:M}
"""
citation = self.dbase.get_citation_from_handle(citation_handle)
src_handle = citation.get_reference_handle()
if src_handle is None:
return
src = self.dbase.get_source_from_handle(src_handle)
if src is None:
return
# Reference to the source
self._writeln(level, "SOUR", "@%s@" % src.get_gramps_id())
if citation.get_page() != "":
# PAGE <WHERE_WITHIN_SOURCE> can not have CONC lines.
# WHERE_WITHIN_SOURCE:= {Size=1:248}
# Maximize line to 248 and set limit to 248, for no line split
self._writeln(level + 1, 'PAGE', citation.get_page()[0:248],
limit=248)
conf = min(citation.get_confidence_level(),
Citation.CONF_VERY_HIGH)
if conf != Citation.CONF_NORMAL and conf != -1:
self._writeln(level + 1, "QUAY", QUALITY_MAP[conf])
if not citation.get_date_object().is_empty():
self._writeln(level + 1, 'DATA')
self._date(level + 2, citation.get_date_object())
if len(citation.get_note_list()) > 0:
note_list = [self.dbase.get_note_from_handle(h)
for h in citation.get_note_list()]
note_list = [n for n in note_list
if n.get_type() == NoteType.SOURCE_TEXT]
if note_list:
ref_text = note_list[0].get()
else:
ref_text = ""
if ref_text != "" and citation.get_date_object().is_empty():
self._writeln(level + 1, 'DATA')
if ref_text != "":
self._writeln(level + 2, "TEXT", ref_text)
note_list = [self.dbase.get_note_from_handle(h)
for h in citation.get_note_list()]
note_list = [n.handle for n in note_list
if n and n.get_type() != NoteType.SOURCE_TEXT]
self._note_references(note_list, level + 1)
self._photos(citation.get_media_list(), level + 1)
even = None
for srcattr in citation.get_attribute_list():
if str(srcattr.type) == "EVEN":
even = srcattr.value
self._writeln(level + 1, "EVEN", even)
break
if even:
for srcattr in citation.get_attribute_list():
if str(srcattr.type) == "EVEN:ROLE":
self._writeln(level + 2, "ROLE", srcattr.value)
break
def _photo(self, photo, level):
"""
n OBJE @<XREF:OBJE>@ {1:1}
"""
photo_obj_id = photo.get_reference_handle()
photo_obj = self.dbase.get_media_from_handle(photo_obj_id)
if photo_obj:
# if not os.path.isfile(path):
# return
self._writeln(level, 'OBJE @%s@' % photo_obj.get_gramps_id())
def _all_media(self):
"""
Write out the list of media, sorting by Gramps ID.
"""
self.set_text(_("Writing media"))
# generate a list of (GRAMPS_ID, HANDLE) pairs. This list
# can then be sorted by the sort routine, which will use the
# first value of the tuple as the sort key.
sorted_list = sort_handles_by_id(self.dbase.get_media_handles(),
self.dbase.get_media_from_handle)
# loop through the sorted list, pulling of the handle. This list
# has already been sorted by GRAMPS_ID
for media_handle in [hndl[1] for hndl in sorted_list]:
self.update()
self._media(self.dbase.get_media_from_handle(media_handle))
def _media(self, media):
"""
n @XREF:OBJE@ OBJE {1:1}
+1 FILE <MULTIMEDIA_FILE_REFN> {1:M}
+2 FORM <MULTIMEDIA_FORMAT> {1:1}
+3 TYPE <SOURCE_MEDIA_TYPE> {0:1}
+2 TITL <DESCRIPTIVE_TITLE> {0:1} p.48
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<CHANGE_DATE>> {0:1}
"""
if media is None:
return
gramps_id = media.get_gramps_id()
self._writeln(0, '@%s@' % gramps_id, 'OBJE')
form = os.path.splitext(media.get_path())[1][1:]
path = media_path_full(self.dbase, media.get_path())
self._writeln(1, 'FILE', path, limit=255)
if form:
self._writeln(2, 'FORM', form)
self._writeln(2, 'TITL', media.get_description())
for attr in media.get_attribute_list():
key = str(attr.get_type())
value = attr.get_value().replace('\r', ' ')
if key in ("RIN", "RFN", "REFN"):
self._writeln(1, key, value)
continue
self._note_references(media.get_note_list(), 1)
self._source_references(media.get_citation_list(), 1)
self._change(media.get_change_time(), 1)
def _place(self, place, dateobj, level):
"""
PLACE_STRUCTURE:=
n PLAC <PLACE_NAME> {1:1}
+1 FORM <PLACE_HIERARCHY> {0:1}
+1 FONE <PLACE_PHONETIC_VARIATION> {0:M} # not used
+2 TYPE <PHONETIC_TYPE> {1:1}
+1 ROMN <PLACE_ROMANIZED_VARIATION> {0:M} # not used
+2 TYPE <ROMANIZED_TYPE> {1:1}
+1 MAP {0:1}
+2 LATI <PLACE_LATITUDE> {1:1}
+2 LONG <PLACE_LONGITUDE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
if place is None:
return
place_name = _pd.display(self.dbase, place, dateobj)
self._writeln(level, "PLAC", place_name.replace('\r', ' '), limit=120)
longitude = place.get_longitude()
latitude = place.get_latitude()
if longitude and latitude:
(latitude, longitude) = conv_lat_lon(latitude, longitude, "GEDCOM")
if longitude and latitude:
self._writeln(level + 1, "MAP")
self._writeln(level + 2, 'LATI', latitude)
self._writeln(level + 2, 'LONG', longitude)
# The Gedcom standard shows that an optional address structure can
# be written out in the event detail.
# http://homepages.rootsweb.com/~pmcbride/gedcom/55gcch2.htm#EVENT_DETAIL
location = get_main_location(self.dbase, place)
street = location.get(PlaceType.STREET)
locality = location.get(PlaceType.LOCALITY)
city = location.get(PlaceType.CITY)
state = location.get(PlaceType.STATE)
country = location.get(PlaceType.COUNTRY)
postal_code = place.get_code()
if street or locality or city or state or postal_code or country:
self._writeln(level, "ADDR", street)
if street:
self._writeln(level + 1, 'ADR1', street)
if locality:
self._writeln(level + 1, 'ADR2', locality)
if city:
self._writeln(level + 1, 'CITY', city)
if state:
self._writeln(level + 1, 'STAE', state)
if postal_code:
self._writeln(level + 1, 'POST', postal_code)
if country:
self._writeln(level + 1, 'CTRY', country)
self._note_references(place.get_note_list(), level + 1)
def __write_addr(self, level, addr):
"""
n ADDR <ADDRESS_LINE> {0:1}
+1 CONT <ADDRESS_LINE> {0:M}
+1 ADR1 <ADDRESS_LINE1> {0:1} (Street)
+1 ADR2 <ADDRESS_LINE2> {0:1} (Locality)
+1 CITY <ADDRESS_CITY> {0:1}
+1 STAE <ADDRESS_STATE> {0:1}
+1 POST <ADDRESS_POSTAL_CODE> {0:1}
+1 CTRY <ADDRESS_COUNTRY> {0:1}
This is done along the lines suggested by Tamura Jones in
http://www.tamurajones.net/GEDCOMADDR.xhtml as a result of bug 6382.
"GEDCOM writers should always use the structured address format,
and it use it for all addresses, including the submitter address and
their own corporate address." "Vendors that want their product to pass
even the strictest GEDCOM validation, should include export to the old
free-form format..." [This goes on to say the free-form should be an
option, but we have not made it an option in Gramps].
@param level: The level number for the ADDR tag
@type level: Integer
@param addr: The location or address
@type addr: [a super-type of] LocationBase
"""
if addr.get_street() or addr.get_locality() or addr.get_city() or \
addr.get_state() or addr.get_postal_code() or addr.get_country():
self._writeln(level, 'ADDR', addr.get_street())
if addr.get_locality():
self._writeln(level + 1, 'CONT', addr.get_locality())
if addr.get_city():
self._writeln(level + 1, 'CONT', addr.get_city())
if addr.get_state():
self._writeln(level + 1, 'CONT', addr.get_state())
if addr.get_postal_code():
self._writeln(level + 1, 'CONT', addr.get_postal_code())
if addr.get_country():
self._writeln(level + 1, 'CONT', addr.get_country())
if addr.get_street():
self._writeln(level + 1, 'ADR1', addr.get_street())
if addr.get_locality():
self._writeln(level + 1, 'ADR2', addr.get_locality())
if addr.get_city():
self._writeln(level + 1, 'CITY', addr.get_city())
if addr.get_state():
self._writeln(level + 1, 'STAE', addr.get_state())
if addr.get_postal_code():
self._writeln(level + 1, 'POST', addr.get_postal_code())
if addr.get_country():
self._writeln(level + 1, 'CTRY', addr.get_country())
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
def export_data(database, filename, user, option_box=None):
"""
External interface used to register with the plugin system.
"""
ret = False
try:
ged_write = GedcomWriter(database, user, option_box)
ret = ged_write.write_gedcom_file(filename)
except IOError as msg:
msg2 = _("Could not create %s") % filename
user.notify_error(msg2, str(msg))
except DatabaseError as msg:
user.notify_db_error("%s\n%s" % (_("GEDCOM Export failed"), str(msg)))
return ret
| gramps-project/gramps | gramps/plugins/export/exportgedcom.py | Python | gpl-2.0 | 61,377 | [
"Brian"
] | 8fea5cc8b4c311a7a3ba8b1910aef00ffac19471e4a70fc336d992d0ca03fc02 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
NeuroKitEditor for NeuroKit plugin.
"""
__author__ = "Aviral Goel"
__credits__ = ["Upi Lab"]
__license__ = "GPL3"
__version__ = "1.0.0"
__maintainer__ = "Aviral Goel"
__email__ = "goel.aviral@gmail.com"
__status__ = "Development"
import mplugin
import moose
import pprint
# import NeuroKitEditorWidget
import default
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4 import Qt
from PyQt4.QtGui import QPushButton
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QGridLayout
from PyQt4.QtGui import QDialog
from PyQt4.QtGui import QTableWidget
from PyQt4.QtGui import QTableWidgetItem
from PyQt4.QtGui import QCheckBox
from PyQt4.QtGui import QComboBox
from default import *
from mplugin import *
import moose
import neuroextractor
import moogli
import numpy as np
from global_constants import preferences
from NeuroKitVisualizer import MorphologyEditor
class NeuroKitEditor(mplugin.EditorBase):
"""
NeuroKitEditor
"""
def __init__(self, plugin, modelRoot):
super(NeuroKitEditor, self).__init__(plugin)
self._centralWidget = None #default.DefaultEditorWidget(None)
self.modelRoot = modelRoot
# self._centralWidget = NeuroKitEditorWidget.NeuroKitEditorWidget(modelRoot)
self._menus = []
# self._propertyTable = MorphologyProperyTable()
self._propertyTable = QWidget()
self.__initMenus()
self.__initToolBars()
self.setModelRoot(modelRoot)
# if hasattr(self._centralWidget, 'init'):
# self._centralWidget.init()
# self._centralWidget.setModelRoot(self.plugin.modelRoot)
# return self._centralWidget
def __initMenus(self):
return self._menus
# editMenu = QtGui.QMenu('&Edit')
# for menu in self.getCentralWidget().getMenus():
# editMenu.addMenu(menu)
# self._menus.append(detailsButton)
def __initToolBars(self):
return self._toolBars
# for toolbar in self.getCentralWidget().getToolBars():
# self._toolBars.append(toolbar)
def getToolPanes(self):
return super(NeuroKitEditor, self).getToolPanes()
def getLibraryPane(self):
return super(NeuroKitEditor, self).getLibraryPane()
def getOperationsWidget(self):
return super(NeuroKitEditor, self).getOperationsPane()
# def getCentralWidget(self):
# """Retrieve or initialize the central widget.
# Note that we call the widget's setModelRoot() function
# explicitly with the plugin's modelRoot as the argument. This
# enforces an update of the widget display with the current
# modelRoot.
# This function should be overridden by any derived class as it
# has the editor widget class hard coded into it.
# """
# self._centralWidget.setModelRoot(self.plugin.modelRoot)
# return self._centralWidget
def updateModelView(self):
pass
def setModelRoot(self, path):
self.modelRoot = path
self.updateModelView()
def setBaseColor(self, color):
self.morphology.set_initial_color( color[0] / 255.0
, color[1] / 255.0
, color[2] / 255.0
, color[3] / 255.0
)
def setPeakColor(self, color):
self.morphology.set_final_color( color[0] / 255.0
, color[1] / 255.0
, color[2] / 255.0
, color[3] / 255.0
)
def setBackgroundColor(self, color):
self.visualizer.set_background_color( color[0] / 255.0
, color[1] / 255.0
, color[2] / 255.0
, color[3] / 255.0
)
def setBaseVm(self, vm):
self.morphology.set_base_membrane_voltage(vm)
def setPeakVm(self, vm):
self.morphology.set_peak_membrane_voltage(vm)
def createCentralWidget(self):
self._centralWidget = default.EditorWidgetBase()#self.modelRoot)
self._centralWidget.setLayout(QHBoxLayout())
# self.plotWidgetContainer = PlotWidgetContainer(self.modelRoot)
self.geometry = neuroextractor.model(moose.element(self.modelRoot + "/model"))
self.morphology = self.createMorphology(self.geometry)
self.morphology.set_compartment_order(
map(lambda x : x.path, self.compartmentOrder)
)
self.vms = np.empty(len(self.compartmentOrder), dtype=np.float, order='C')
self.ims = np.empty(len(self.compartmentOrder), dtype=np.float, order='C')
# self.visualizer.insertPlainText(pprint.pformat(self.geometry, indent = 4))
# self.visualizer = QTextEdit()#NeuroKitVisualizer(self.modelRoot)
desktop = QtGui.QApplication.desktop()
# print("**********************")
# print(desktop.screenGeometry())
# print("***********************")
self.visualizer = MorphologyEditor( self.morphology
, desktop.screenGeometry().width()
, desktop.screenGeometry().height()
, self.plugin
)
# self.scheduler = self.getSchedulingDockWidget().widget()
# self._centralWidget.setChildWidget(self.scheduler, False, 0,0,1,-1)
self.visualizer.setGeometry( 0, 0, 1200, 400 )
self.visualizer.show()
# self.visualizer.start()
self._centralWidget.layout().addWidget(self.visualizer)
# self._centralWidget.setChildWidget(self.visualizer, False, 0, 0,-1,1)
# self._centralWidget.setChildWidget(self.plotWidgetContainer, False, 0, 1,-1,1)
# self._centralWidget.setPlotWidgetContainer(self.plotWidgetContainer)
# label = QLabel("Aviral Goel")
# self._centralWidget.setChildWidget(label, False, 0, 0)
# self._centralWidget.setWindowTitle("Aviral Goel")
# self.scheduler = self.getSchedulingDockWidget().widget()
# self.scheduler.runner.update.connect(self.kkitRunView.getCentralWidget().changeBgSize)
# self.scheduler.runner.resetAndRun.connect(self.kkitRunView.getCentralWidget().resetColor)
########################################################
# self.schedular = self.getSchedulingDockWidget().widget()
# self.schedular.runner.simulationProgressed.connect(self.update)
# self.schedular.runner.simulationReset.connect(self.reset)
preferences.getView().setCurrentIndex(1)
preferences.getView().electricalBaseColorDialog.colorSelected.connect(lambda x: self.reset())
preferences.getView().electricalPeakColorDialog.colorSelected.connect(lambda x: self.reset())
preferences.getView().electricalBackgroundColorDialog.colorSelected.connect(lambda x: self.reset())
preferences.getView().electricalPeakMembraneVoltage.editingFinished.connect(self.reset)
preferences.getView().electricalBaseMembraneVoltage.editingFinished.connect(self.reset)
# print("getting central widget")
# self._centralWidget.show()
# print(self.schedular.runner._updateInterval)
self.reset()
return self._centralWidget
def update(self, time):
# print("Update called => ", time)
# print("Update called")
# for neuron_id in self.geometry["neurons"]:
# neuron = self.geometry["neurons"][neuron_id]
# for compartment_id in neuron["compartments"]:
# voltage = neuron["compartments"][compartment_id]["object"].Vm
# print(compartment_id + " => " + str(voltage))
# self.visualizer.
self.updateVms()
# self.updateIms()
# self.visualizer.next()
# print(self.vms)
def updateVms(self):
for i in range(0, len(self.compartmentOrder)):
self.vms[i] = self.compartmentOrder[i].Vm
self.morphology.set_membrane_voltages(self.vms)
def updateIms(self):
for i in range(0, len(self.compartmentOrder)):
self.ims[i] = self.compartmentOrder[i].Im
def reset(self):
# print(" => reset called")
prefs = preferences.getElectricalPreferences()
self.setPeakColor(prefs["visualization"]["peak-color"])
self.setBaseColor(prefs["visualization"]["base-color"])
self.setBackgroundColor(prefs["visualization"]["background-color"])
self.setBaseVm(prefs["visualization"]["base-membrane-voltage"])
self.setPeakVm(prefs["visualization"]["peak-membrane-voltage"])
self.updateVms()
# self.visualizer.next()
# self.updateIms()
# self.ims[i] = self.compartmentOrder[i].Im
# print(self.vms)
# self.morphology.resetVms(self.vms)
# self.morphology.resetIms(self.ims)
def createMorphology(self, geometry):
# import json
# f = open("/home/aviral/purkinje.json", "w")
# f.write(json.dumps(geometry, indent=4))
# f.close()
# morphology = moogli.Morphology("morph")
# morphology.add_compartment( "a"
# , "b", 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0)
# morphology.add_compartment( "c"
# , "b"
# , 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0)
morphology = moogli.Morphology("morph", 1)
self.compartmentOrder = []
for neuron_id in geometry["neurons"]:
neuron = geometry["neurons"][neuron_id]
for compartment_id in neuron["compartments"]:
compartment = neuron["compartments"][compartment_id]
# print( compartment_id
# , neuron_id
# , compartment["proximal"]["x"]
# , compartment["proximal"]["y"]
# , compartment["proximal"]["z"]
# , compartment["diameter"]
# , compartment["distal"]["x"]
# , compartment["distal"]["y"]
# , compartment["distal"]["z"]
# , compartment["diameter"]
# )
self.compartmentOrder.append(compartment["object"])
morphology.add_compartment( compartment_id
, neuron_id
, compartment["proximal"]["x"] * 10000000
, compartment["proximal"]["y"] * 10000000
, compartment["proximal"]["z"] * 10000000
, compartment["diameter"] * 10000000
, compartment["distal"]["x"] * 10000000
, compartment["distal"]["y"] * 10000000
, compartment["distal"]["z"] * 10000000
, compartment["diameter"] * 10000000
)
return morphology
def getCentralWidget(self):
if self._centralWidget is None:
self.createCentralWidget()
# self._centralWidget.show()
return self._centralWidget
| dilawar/moose-full | moose-gui/plugins/NeuroKitEditor.py | Python | gpl-2.0 | 11,841 | [
"MOOSE",
"NEURON"
] | 79a7ae358497fb65f781a7e53ba2db85fe3b9b198c653467b72fe5ae64ba1c46 |
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from lettuce.django import django_url
import time
@step('I register for the course "([^"]*)"$')
def i_register_for_the_course(_step, course):
url = django_url('courses/%s/about' % world.scenario_dict['COURSE'].id.to_deprecated_string())
world.browser.visit(url)
world.css_click('section.intro a.register')
assert world.is_css_present('section.container.dashboard')
@step('I register to audit the course$')
def i_register_to_audit_the_course(_step):
url = django_url('courses/%s/about' % world.scenario_dict['COURSE'].id.to_deprecated_string())
world.browser.visit(url)
world.css_click('section.intro a.register')
# the below button has a race condition. When the page first loads
# some animation needs to complete before this button is in a stable
# position. TODO: implement this without a sleep.
time.sleep(2)
audit_button = world.browser.find_by_name("audit_mode")
audit_button.click()
time.sleep(1)
assert world.is_css_present('section.container.dashboard')
@step(u'I should see an empty dashboard message')
def i_should_see_empty_dashboard(_step):
empty_dash_css = 'section.empty-dashboard-message'
assert world.is_css_present(empty_dash_css)
@step(u'I should( NOT)? see the course numbered "([^"]*)" in my dashboard$')
def i_should_see_that_course_in_my_dashboard(_step, doesnt_appear, course):
course_link_css = 'section.my-courses a[href*="%s"]' % course
if doesnt_appear:
assert world.is_css_not_present(course_link_css)
else:
assert world.is_css_present(course_link_css)
@step(u'I unenroll from the course numbered "([^"]*)"')
def i_unenroll_from_that_course(_step, course):
unregister_css = 'section.info a[href*="#unenroll-modal"][data-course-number*="%s"]' % course
world.css_click(unregister_css)
button_css = 'section#unenroll-modal input[value="Unenroll"]'
world.css_click(button_css)
| olexiim/edx-platform | lms/djangoapps/courseware/features/registration.py | Python | agpl-3.0 | 2,032 | [
"VisIt"
] | c1b32f54b1e8de7f3bfb4c44f60a1a13fcf001bfedede3b754d1fab952efc792 |
#!/usr/bin/env python
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import math
import srwl_uti_smp
def set_optics(v, names=None, want_final_propagation=True):
el = []
pp = []
if not names:
names = ['VFM', 'VFM_HFM', 'HFM', 'HFM_Watchpoint', 'Watchpoint', 'Watchpoint_Mask', 'Mask', 'Watchpoint2']
for el_name in names:
if el_name == 'VFM':
# VFM: ellipsoidMirror 50.0m
el.append(srwlib.SRWLOptMirEl(
_p=v.op_VFM_p,
_q=v.op_VFM_q,
_ang_graz=v.op_VFM_ang,
_size_tang=v.op_VFM_size_tang,
_size_sag=v.op_VFM_size_sag,
_nvx=v.op_VFM_nvx,
_nvy=v.op_VFM_nvy,
_nvz=v.op_VFM_nvz,
_tvx=v.op_VFM_tvx,
_tvy=v.op_VFM_tvy,
_x=v.op_VFM_x,
_y=v.op_VFM_y,
))
pp.append(v.op_VFM_pp)
elif el_name == 'VFM_HFM':
# VFM_HFM: drift 50.0m
el.append(srwlib.SRWLOptD(
_L=v.op_VFM_HFM_L,
))
pp.append(v.op_VFM_HFM_pp)
elif el_name == 'HFM':
# HFM: ellipsoidMirror 50.2m
el.append(srwlib.SRWLOptMirEl(
_p=v.op_HFM_p,
_q=v.op_HFM_q,
_ang_graz=v.op_HFM_ang,
_size_tang=v.op_HFM_size_tang,
_size_sag=v.op_HFM_size_sag,
_nvx=v.op_HFM_nvx,
_nvy=v.op_HFM_nvy,
_nvz=v.op_HFM_nvz,
_tvx=v.op_HFM_tvx,
_tvy=v.op_HFM_tvy,
_x=v.op_HFM_x,
_y=v.op_HFM_y,
))
pp.append(v.op_HFM_pp)
elif el_name == 'HFM_Watchpoint':
# HFM_Watchpoint: drift 50.2m
el.append(srwlib.SRWLOptD(
_L=v.op_HFM_Watchpoint_L,
))
pp.append(v.op_HFM_Watchpoint_pp)
elif el_name == 'Watchpoint':
# Watchpoint: watch 50.4m
pass
elif el_name == 'Watchpoint_Mask':
# Watchpoint_Mask: drift 50.4m
el.append(srwlib.SRWLOptD(
_L=v.op_Watchpoint_Mask_L,
))
pp.append(v.op_Watchpoint_Mask_pp)
elif el_name == 'Mask':
# Mask: mask 50.6m
el.append(srwlib.srwl_opt_setup_mask(
_delta=v.op_Mask_delta,
_atten_len=v.op_Mask_atten_len,
_thick=v.op_Mask_thick,
_grid_sh=v.op_Mask_grid_sh,
_grid_dx=v.op_Mask_grid_dx,
_grid_dy=v.op_Mask_grid_dy,
_pitch_x=v.op_Mask_pitch_x,
_pitch_y=v.op_Mask_pitch_y,
_grid_nx=v.op_Mask_grid_nx,
_grid_ny=v.op_Mask_grid_ny,
_mask_Nx=v.op_Mask_mask_Nx,
_mask_Ny=v.op_Mask_mask_Ny,
_grid_angle=v.op_Mask_gridTiltAngle,
_hx=v.op_Mask_hx,
_hy=v.op_Mask_hy,
_mask_x0=v.op_Mask_mask_x0,
_mask_y0=v.op_Mask_mask_y0,
))
pp.append(v.op_Mask_pp)
elif el_name == 'Watchpoint2':
# Watchpoint2: watch 50.6m
pass
if want_final_propagation:
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = [
['name', 's', 'Mask example', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
['gbm_x', 'f', 0.0, 'average horizontal coordinates of waist [m]'],
['gbm_y', 'f', 0.0, 'average vertical coordinates of waist [m]'],
['gbm_z', 'f', 0.0, 'average longitudinal coordinate of waist [m]'],
['gbm_xp', 'f', 0.0, 'average horizontal angle at waist [rad]'],
['gbm_yp', 'f', 0.0, 'average verical angle at waist [rad]'],
['gbm_ave', 'f', 9000.0, 'average photon energy [eV]'],
['gbm_pen', 'f', 0.001, 'energy per pulse [J]'],
['gbm_rep', 'f', 1, 'rep. rate [Hz]'],
['gbm_pol', 'f', 1, 'polarization 1- lin. hor., 2- lin. vert., 3- lin. 45 deg., 4- lin.135 deg., 5- circ. right, 6- circ. left'],
['gbm_sx', 'f', 3e-06, 'rms beam size vs horizontal position [m] at waist (for intensity)'],
['gbm_sy', 'f', 3e-06, 'rms beam size vs vertical position [m] at waist (for intensity)'],
['gbm_st', 'f', 1e-13, 'rms pulse duration [s] (for intensity)'],
['gbm_mx', 'f', 0, 'transverse Gauss-Hermite mode order in horizontal direction'],
['gbm_my', 'f', 0, 'transverse Gauss-Hermite mode order in vertical direction'],
['gbm_ca', 's', 'c', 'treat _sigX, _sigY as sizes in [m] in coordinate representation (_presCA="c") or as angular divergences in [rad] in angular representation (_presCA="a")'],
['gbm_ft', 's', 't', 'treat _sigT as pulse duration in [s] in time domain/representation (_presFT="t") or as bandwidth in [eV] in frequency domain/representation (_presFT="f")'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 100.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20000.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 9000.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.002, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 2048, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.002, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 2048, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 1.0, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 2, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 1000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0.0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0.0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
['wm_ff', 's', 'ascii', 'format of file name for saving propagated multi-e intensity distribution vs horizontal and vertical position (ascii and hdf5 supported)'],
['wm_nmm', 'i', 1, 'number of MPI masters to use'],
['wm_ncm', 'i', 100, 'number of Coherent Modes to calculate'],
['wm_acm', 's', 'SP', 'coherent mode decomposition algorithm to be used (supported algorithms are: "SP" for SciPy, "SPS" for SciPy Sparse, "PM" for Primme, based on names of software packages)'],
['wm_nop', '', '', 'switch forcing to do calculations ignoring any optics defined (by set_optics function)', 'store_true'],
['wm_fnmi', 's', '', 'file name of input cross-spectral density / mutual intensity; if this file name is supplied, the initial cross-spectral density (for such operations as coherent mode decomposition) will not be calculated, but rathre it will be taken from that file.'],
['wm_fncm', 's', '', 'file name of input coherent modes; if this file name is supplied, the eventual partially-coherent radiation propagation simulation will be done based on propagation of the coherent modes from that file.'],
['wm_fbk', '', '', 'create backup file(s) with propagated multi-e intensity distribution vs horizontal and vertical position and other radiation characteristics', 'store_true'],
# Optics parameters
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 'g', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# VFM: ellipsoidMirror
['op_VFM_hfn', 's', 'None', 'heightProfileFile'],
['op_VFM_dim', 's', 'x', 'orientation'],
['op_VFM_p', 'f', 50.0, 'firstFocusLength'],
['op_VFM_q', 'f', 0.4, 'focalLength'],
['op_VFM_ang', 'f', 0.003, 'grazingAngle'],
['op_VFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_VFM_size_tang', 'f', 0.2, 'tangentialSize'],
['op_VFM_size_sag', 'f', 0.01, 'sagittalSize'],
['op_VFM_nvx', 'f', 0.0, 'normalVectorX'],
['op_VFM_nvy', 'f', 0.999995500003375, 'normalVectorY'],
['op_VFM_nvz', 'f', -0.002999995500002025, 'normalVectorZ'],
['op_VFM_tvx', 'f', 0.0, 'tangentialVectorX'],
['op_VFM_tvy', 'f', -0.002999995500002025, 'tangentialVectorY'],
['op_VFM_x', 'f', 0.0, 'horizontalOffset'],
['op_VFM_y', 'f', 0.0, 'verticalOffset'],
# VFM_HFM: drift
['op_VFM_HFM_L', 'f', 0.20000000000000284, 'length'],
# HFM: ellipsoidMirror
['op_HFM_hfn', 's', 'None', 'heightProfileFile'],
['op_HFM_dim', 's', 'x', 'orientation'],
['op_HFM_p', 'f', 50.0, 'firstFocusLength'],
['op_HFM_q', 'f', 0.2, 'focalLength'],
['op_HFM_ang', 'f', 0.003, 'grazingAngle'],
['op_HFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_HFM_size_tang', 'f', 0.2, 'tangentialSize'],
['op_HFM_size_sag', 'f', 0.01, 'sagittalSize'],
['op_HFM_nvx', 'f', 0.999995500003375, 'normalVectorX'],
['op_HFM_nvy', 'f', 0.0, 'normalVectorY'],
['op_HFM_nvz', 'f', -0.002999995500002025, 'normalVectorZ'],
['op_HFM_tvx', 'f', -0.002999995500002025, 'tangentialVectorX'],
['op_HFM_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_HFM_x', 'f', 0.0, 'horizontalOffset'],
['op_HFM_y', 'f', 0.0, 'verticalOffset'],
# HFM_Watchpoint: drift
['op_HFM_Watchpoint_L', 'f', 0.19999999999999574, 'length'],
# Watchpoint_Mask: drift
['op_Watchpoint_Mask_L', 'f', 0.20000000000000284, 'length'],
# Mask: mask
['op_Mask_delta', 'f', 1.0, 'refractiveIndex'],
['op_Mask_atten_len', 'f', 1.0, 'attenuationLength'],
['op_Mask_thick', 'f', 1.0, 'maskThickness'],
['op_Mask_grid_sh', 'f', 0, 'gridShape'],
['op_Mask_grid_dx', 'f', 5e-06, 'horizontalGridDimension'],
['op_Mask_grid_dy', 'f', 5e-06, 'verticalGridDimension'],
['op_Mask_pitch_x', 'f', 2e-05, 'horizontalGridPitch'],
['op_Mask_pitch_y', 'f', 2e-05, 'verticalGridPitch'],
['op_Mask_gridTiltAngle', 'f', 0.4363323129985824, 'gridTiltAngle'],
['op_Mask_hx', 'f', 7.319999999999999e-07, 'horizontalSamplingInterval'],
['op_Mask_hy', 'f', 7.319999999999999e-07, 'verticalSamplingInterval'],
['op_Mask_mask_x0', 'f', 0.0, 'horizontalMaskCoordinate'],
['op_Mask_mask_y0', 'f', 0.0, 'verticalMaskCoordinate'],
['op_Mask_mask_Nx', 'i', 1024, 'horizontalPixelsNumber'],
['op_Mask_mask_Ny', 'i', 1024, 'verticalPixelsNumber'],
['op_Mask_grid_nx', 'i', 21, 'horizontalGridsNumber'],
['op_Mask_grid_ny', 'i', 21, 'verticalGridsNumber'],
#---Propagation parameters
['op_VFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM'],
['op_VFM_HFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM_HFM'],
['op_HFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM'],
['op_HFM_Watchpoint_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM_Watchpoint'],
['op_Watchpoint_Mask_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Watchpoint_Mask'],
['op_Mask_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Mask'],
['op_fin_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
]
def epilogue():
pass
def main():
v = srwl_bl.srwl_uti_parse_options(srwl_bl.srwl_uti_ext_options(varParam), use_sys_argv=True)
names = ['VFM','VFM_HFM','HFM','HFM_Watchpoint','Watchpoint','Watchpoint_Mask','Mask','Watchpoint2']
op = set_optics(v, names, True)
v.ws = True
v.ws_pl = 'xy'
v.si = True
v.si_pl = 'xy'
srwl_bl.SRWLBeamline(_name=v.name).calc_all(v, op)
main()
epilogue()
| radiasoft/sirepo | tests/template/srw_generate_data/mask-example.py | Python | apache-2.0 | 26,164 | [
"Gaussian"
] | 47f0f5f5b57a08613359e65738ca257a8ec02409fa5afc17a8449c58bca651bf |
#!/usr/bin/en python
import os
import sys
import glob
import json
import numpy as np
import pandas as pd
from lxml import objectify
def read_xml_sidecar(filepath):
"""
Read a CMTK xml sidecar file.
Returns
=======
lxml.objectify
"""
abs_path = os.path.abspath(filepath)
with open(abs_path, 'rb') as fi:
lines = fi.readlines()
lines.insert(1, '<root>')
lines.append('</root>')
string = ''.join(lines)
strip_ge = string.replace('dicom:GE:', '')
strip_dicom = strip_ge.replace('dicom:','')
result = objectify.fromstring(strip_dicom)
return result
def get_array(array_string):
"""
Parse an array from XML string
Returns
=======
np.array
"""
l = array_string.text.split(' ')
return np.fromiter(l, np.float)
def get_gradient_table(parsed_sidecar, decimals=None):
"""
Get the bvector table for a single image
Returns
=======
np.array (rounded to 1 decimal)
"""
b_vector = get_array(parsed_sidecar.mr.dwi.bVector)
b_vector_image = get_array(parsed_sidecar.mr.dwi.bVectorImage)
b_vector_standard = get_array(parsed_sidecar.mr.dwi.bVectorStandard)
if not decimals:
decimals = 1
return np.around([b_vector,
b_vector_image,
b_vector_standard],
decimals=decimals)
def get_cases(cases_root, case=None):
"""
Get a list of cases from root dir, optionally for a single case
"""
match = 'NCANDA_S*'
if case:
match = case
return glob.glob(os.path.join(cases_root, match))
def get_dti_stack(case, arm=None, event=None):
if arm:
path = os.path.join(case, arm)
else:
path = os.path.join(case, '*')
if event:
path = os.path.join(path, event)
else:
path = os.path.join(path,'*')
path = os.path.join(path, 'diffusion/native/dti60b1000/*.xml')
return glob.glob(path)
def get_all_gradients(dti_stack, decimals=None):
"""
Parses a list of dti sidecar files for subject.
Returns
=======
list of np.array
"""
gradiets_per_frame = list()
for xml in dti_stack:
sidecar = read_xml_sidecar(xml)
gradiets_per_frame.append(get_gradient_table(sidecar,
decimals=decimals))
return gradiets_per_frame
def get_site_scanner(site):
"""
Returns the "ground truth" case for gradients.
"""
site_scanner = dict(A='Siemens',
B='GE',
C='GE',
D='Siemens',
E='GE')
return site_scanner.get(site)
def get_ground_truth_gradients(args=None):
"""
Return a dictionary for scanner:gratient
"""
# Choose arbitrary cases for ground truth
test_path = '/fs/ncanda-share/pipeline/cases'
scanner_subject = dict(Siemens='NCANDA_S00061',
GE='NCANDA_S00033')
# Paths to scanner specific gradients
siemens_path = os.path.join(test_path, scanner_subject.get('Siemens'))
ge_path = os.path.join(test_path, scanner_subject.get('GE'))
# Get ground truth for standard baseline
test_arm = 'standard'
test_event = 'baseline'
# Gets files for each scanner
siemens_stack = get_dti_stack(siemens_path, arm=test_arm, event=test_event)
ge_stack = get_dti_stack(ge_path, arm=test_arm, event=test_event)
siemens_stack.sort()
ge_stack.sort()
# Parse the xml files to get scanner specific gradients per frame
siemens_gradients = get_all_gradients(siemens_stack, decimals=args.decimals)
ge_gradients = get_all_gradients(ge_stack, decimals=args.decimals)
return dict(Siemens=siemens_gradients, GE=ge_gradients)
def main(args=None):
# Get the gradient tables for all cases and compare to ground truth
cases = get_cases(args.base_dir, case=args.case)
# Demographics from pipeline to grab case to scanner mapping
demo_path = '/fs/ncanda-share/pipeline/summaries/demographics.csv'
demographics = pd.read_csv(demo_path, index_col=['subject',
'arm',
'visit'])
gradient_map = get_ground_truth_gradients(args=args)
for case in cases:
if args.verbose:
print("Processing: {}".format(case))
# Get the case's site
sid = os.path.basename(case)
site = demographics.loc[sid, args.arm, args.event].site
scanner = get_site_scanner(site)
gradients = gradient_map.get(scanner)
case_dti = os.path.join(args.base_dir, case)
case_stack = get_dti_stack(case_dti, arm=args.arm, event=args.event)
case_stack.sort()
case_gradients = get_all_gradients(case_stack, decimals=args.decimals)
errors = list()
for idx, frame in enumerate(case_gradients):
# if there is a frame that doesn't match, report it.
if not (gradients[idx]==frame).all():
errors.append(idx)
if errors:
key = os.path.join(case, args.arm, args.event, 'diffusion/native/dti60b1000')
result = dict(subject_site_id=key,
frames=errors,
error="Gradient tables do not match for frames.")
print(json.dumps(result, sort_keys=True))
if __name__ == '__main__':
import argparse
formatter = argparse.RawDescriptionHelpFormatter
default = 'default: %(default)s'
parser = argparse.ArgumentParser(prog="check_gradient_tables.py",
description=__doc__,
formatter_class=formatter)
parser.add_argument('-a', '--arm', dest="arm",
help="Study arm. {}".format(default),
default='standard')
parser.add_argument('-b', '--base-dir', dest="base_dir",
help="Study base directory. {}".format(default),
default='/fs/ncanda-share/pipeline/cases')
parser.add_argument('-d', '--decimals', dest="decimals",
help="Number of decimals. {}".format(default),
default=3)
parser.add_argument('-e', '--event', dest="event",
help="Study event. {}".format(default),
default='baseline')
parser.add_argument('-c', '--case', dest="case",
help="Study case. {}".format(default),
default=None)
parser.add_argument('-v', '--verbose', dest="verbose",
help="Turn on verbose", action='store_true')
argv = parser.parse_args()
sys.exit(main(args=argv))
| abonil91/ncanda-data-integration | scripts/xnat/check_gradient_tables.py | Python | bsd-3-clause | 6,786 | [
"VisIt"
] | 70b23d9a1bb7347960f26760aed7a3891509e2ef2a2551d6f5d03b36fcc624c5 |
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from setuptools import setup
from glob import glob
__version__ = "0.2.0-dev"
classes = """
Development Status :: 3 - Alpha
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Software Development :: Libraries :: Application Frameworks
Topic :: Software Development :: Libraries :: Python Modules
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: Implementation :: CPython
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
"""
with open('README.rst') as f:
long_description = f.read()
classifiers = [s.strip() for s in classes.split('\n') if s]
setup(name='qiita-spots',
version=__version__,
long_description=long_description,
license="BSD",
description='Qiita: Spot Patterns',
author="Qiita development team",
author_email="qiita.help@gmail.com",
url='https://github.com/biocore/qiita',
test_suite='nose.collector',
packages=['qiita_core',
'qiita_db',
'qiita_db/handlers',
'qiita_db/metadata_template',
'qiita_pet',
'qiita_pet/uimodules',
'qiita_pet/handlers',
'qiita_pet/handlers/study_handlers',
'qiita_pet/handlers/api_proxy',
'qiita_ware'
],
include_package_data=True,
package_data={
'qiita_core': [
'support_files/config_test.cfg'
'support_files/server.crt',
'support_files/server.csr',
'support_files/server.key'
],
'qiita_db': [
'support_files/*.sql',
'support_files/patches/*.sql',
'support_files/patches/python_patches/*.py',
'support_files/test_data/preprocessed_data/*',
'support_files/test_data/processed_data/*',
'support_files/test_data/raw_data/*',
'support_files/test_data/analysis/*',
'support_files/test_data/reference/*',
'support_files/test_data/job/*.txt',
'support_files/test_data/job/2_test_folder/*',
'support_files/test_data/uploads/1/a_folder/*.txt',
'support_files/test_data/uploads/1/.hidden_file.txt',
'support_files/test_data/uploads/1/uploaded_file.txt',
'support_files/test_data/templates/*',
'support_files/work_data/*'],
'qiita_pet': [
'static/css/*.css', 'static/img/*.png',
'static/img/*.gif', 'static/img/*.ico',
'static/js/*.js', 'static/vendor/css/*.css',
'static/vendor/css/images/*.png',
'static/vendor/css/*.png',
'static/vendor/fonts/glyphicons*.*',
'static/vendor/images/*.png',
'static/vendor/js/*.js',
'results/admin/jobname/*.html',
'templates/*.html',
'templates/study_description_templates/*.html',
'support_files/config_portal.cfg',
'support_files/doc/Makefile',
'support_files/doc/README.md',
'support_files/doc/source/conf.py',
'support_files/doc/source/*.rst',
'support_files/doc/source/tutorials/*.rst',
'support_files/doc/source/admin/*.rst',
'support_files/doc/source/qiita-philosophy/*.rst',
'support_files/doc/source/admin/images/*.png',
'support_files/doc/source/tutorials/images/*.png',
'support_files/doc/source/qiita-philosophy/images/*.png',
'support_files/doc/source/_static/*.png'
]},
scripts=glob('scripts/*'),
extras_require={'test': ["nose >= 0.10.1", "pep8", 'mock']},
install_requires=['psycopg2', 'click >= 3.3', 'future',
'bcrypt', 'pandas >= 0.17', 'numpy >= 1.7',
'tornado==3.1.1', 'toredis', 'redis', 'six',
'ipython[all] >= 2.4.1, < 2.5', 'pyparsing',
'h5py >= 2.3.1', 'biom-format', 'natsort', 'networkx',
'scikit-bio >= 0.2.3, < 0.3.0', 'wtforms == 2.0.1',
'qiime >= 1.9.0, < 1.10.0', 'moi',
'sphinx-bootstrap-theme', 'Sphinx >= 1.2.2',
'gitpython'],
classifiers=classifiers
)
| squirrelo/qiita | setup.py | Python | bsd-3-clause | 4,890 | [
"scikit-bio"
] | 15047999cb2be5fd4ab28b12b93b7af065535202349fa201e9db6709bb8eca00 |
"""
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/sklearn/linear_model/bayes.py | Python | gpl-2.0 | 15,248 | [
"Gaussian"
] | ad9552c34e7f60c363fed263829c9976e7576c8f14796fc033398048c63b14d4 |
from os.path import abspath, basename, join, exists
from os.path import dirname
from os.path import relpath
from os import listdir, sep
from re import findall
from io import open
from ..staging import COMMAND_VERSION_FILENAME
from ..action_mapper import FileActionMapper
from ..action_mapper import path_type
from ..action_mapper import MessageAction
from ..util import PathHelper
from ..util import directory_files
from logging import getLogger
log = getLogger(__name__)
def submit_job(client, client_job_description, job_config=None):
"""
"""
file_stager = FileStager(client, client_job_description, job_config)
rebuilt_command_line = file_stager.get_command_line()
job_id = file_stager.job_id
launch_kwds = dict(
command_line=rebuilt_command_line,
dependencies_description=client_job_description.dependencies_description,
env=client_job_description.env,
)
if file_stager.job_config:
launch_kwds["job_config"] = file_stager.job_config
remote_staging = {}
remote_staging_actions = file_stager.transfer_tracker.remote_staging_actions
if remote_staging_actions:
remote_staging["setup"] = remote_staging_actions
# Somehow make the following optional.
remote_staging["action_mapper"] = file_stager.action_mapper.to_dict()
remote_staging["client_outputs"] = client_job_description.client_outputs.to_dict()
if remote_staging:
launch_kwds["remote_staging"] = remote_staging
client.launch(**launch_kwds)
return job_id
class FileStager(object):
"""
Objects of the FileStager class interact with an LWR client object to
stage the files required to run jobs on a remote LWR server.
**Parameters**
client : JobClient
LWR client object.
client_job_description : client_job_description
Description of client view of job to stage and execute remotely.
"""
def __init__(self, client, client_job_description, job_config):
"""
"""
self.client = client
self.command_line = client_job_description.command_line
self.config_files = client_job_description.config_files
self.input_files = client_job_description.input_files
self.output_files = client_job_description.output_files
self.tool_id = client_job_description.tool.id
self.tool_version = client_job_description.tool.version
self.tool_dir = abspath(client_job_description.tool.tool_dir)
self.working_directory = client_job_description.working_directory
self.version_file = client_job_description.version_file
self.arbitrary_files = client_job_description.arbitrary_files
self.rewrite_paths = client_job_description.rewrite_paths
# Setup job inputs, these will need to be rewritten before
# shipping off to remote LWR server.
self.job_inputs = JobInputs(self.command_line, self.config_files)
self.action_mapper = FileActionMapper(client)
self.__handle_setup(job_config)
self.transfer_tracker = TransferTracker(client, self.path_helper, self.action_mapper, self.job_inputs, rewrite_paths=self.rewrite_paths)
self.__initialize_referenced_tool_files()
if self.rewrite_paths:
self.__initialize_referenced_arbitrary_files()
self.__upload_tool_files()
self.__upload_input_files()
self.__upload_working_directory_files()
self.__upload_arbitrary_files()
if self.rewrite_paths:
self.__initialize_output_file_renames()
self.__initialize_task_output_file_renames()
self.__initialize_config_file_renames()
self.__initialize_version_file_rename()
self.__handle_rewrites()
self.__upload_rewritten_config_files()
def __handle_setup(self, job_config):
if not job_config:
job_config = self.client.setup(self.tool_id, self.tool_version)
self.new_working_directory = job_config['working_directory']
self.new_outputs_directory = job_config['outputs_directory']
# Default configs_directory to match remote working_directory to mimic
# behavior of older LWR servers.
self.new_configs_directory = job_config.get('configs_directory', self.new_working_directory)
self.remote_separator = self.__parse_remote_separator(job_config)
self.path_helper = PathHelper(self.remote_separator)
# If remote LWR server assigned job id, use that otherwise
# just use local job_id assigned.
galaxy_job_id = self.client.job_id
self.job_id = job_config.get('job_id', galaxy_job_id)
if self.job_id != galaxy_job_id:
# Remote LWR server assigned an id different than the
# Galaxy job id, update client to reflect this.
self.client.job_id = self.job_id
self.job_config = job_config
def __parse_remote_separator(self, job_config):
separator = job_config.get("system_properties", {}).get("separator", None)
if not separator: # Legacy LWR
separator = job_config["path_separator"] # Poorly named
return separator
def __initialize_referenced_tool_files(self):
self.referenced_tool_files = self.job_inputs.find_referenced_subfiles(self.tool_dir)
def __initialize_referenced_arbitrary_files(self):
referenced_arbitrary_path_mappers = dict()
for mapper in self.action_mapper.unstructured_mappers():
mapper_pattern = mapper.to_pattern()
# TODO: Make more sophisticated, allow parent directories,
# grabbing sibbling files based on patterns, etc...
paths = self.job_inputs.find_pattern_references(mapper_pattern)
for path in paths:
if path not in referenced_arbitrary_path_mappers:
referenced_arbitrary_path_mappers[path] = mapper
for path, mapper in referenced_arbitrary_path_mappers.iteritems():
action = self.action_mapper.action(path, path_type.UNSTRUCTURED, mapper)
unstructured_map = action.unstructured_map(self.path_helper)
self.arbitrary_files.update(unstructured_map)
def __upload_tool_files(self):
for referenced_tool_file in self.referenced_tool_files:
self.transfer_tracker.handle_transfer(referenced_tool_file, path_type.TOOL)
def __upload_arbitrary_files(self):
for path, name in self.arbitrary_files.iteritems():
self.transfer_tracker.handle_transfer(path, path_type.UNSTRUCTURED, name=name)
def __upload_input_files(self):
for input_file in self.input_files:
self.__upload_input_file(input_file)
self.__upload_input_extra_files(input_file)
def __upload_input_file(self, input_file):
if self.__stage_input(input_file):
if exists(input_file):
self.transfer_tracker.handle_transfer(input_file, path_type.INPUT)
else:
message = "LWR: __upload_input_file called on empty or missing dataset." + \
" So such file: [%s]" % input_file
log.debug(message)
def __upload_input_extra_files(self, input_file):
files_path = "%s_files" % input_file[0:-len(".dat")]
if exists(files_path) and self.__stage_input(files_path):
for extra_file_name in directory_files(files_path):
extra_file_path = join(files_path, extra_file_name)
remote_name = self.path_helper.remote_name(relpath(extra_file_path, dirname(files_path)))
self.transfer_tracker.handle_transfer(extra_file_path, path_type.INPUT, name=remote_name)
def __upload_working_directory_files(self):
# Task manager stages files into working directory, these need to be
# uploaded if present.
working_directory_files = listdir(self.working_directory) if exists(self.working_directory) else []
for working_directory_file in working_directory_files:
path = join(self.working_directory, working_directory_file)
self.transfer_tracker.handle_transfer(path, path_type.WORKDIR)
def __initialize_version_file_rename(self):
version_file = self.version_file
if version_file:
remote_path = self.path_helper.remote_join(self.new_outputs_directory, COMMAND_VERSION_FILENAME)
self.transfer_tracker.register_rewrite(version_file, remote_path, path_type.OUTPUT)
def __initialize_output_file_renames(self):
for output_file in self.output_files:
remote_path = self.path_helper.remote_join(self.new_outputs_directory, basename(output_file))
self.transfer_tracker.register_rewrite(output_file, remote_path, path_type.OUTPUT)
def __initialize_task_output_file_renames(self):
for output_file in self.output_files:
name = basename(output_file)
task_file = join(self.working_directory, name)
remote_path = self.path_helper.remote_join(self.new_working_directory, name)
self.transfer_tracker.register_rewrite(task_file, remote_path, path_type.OUTPUT_WORKDIR)
def __initialize_config_file_renames(self):
for config_file in self.config_files:
remote_path = self.path_helper.remote_join(self.new_configs_directory, basename(config_file))
self.transfer_tracker.register_rewrite(config_file, remote_path, path_type.CONFIG)
def __handle_rewrites(self):
"""
For each file that has been transferred and renamed, updated
command_line and configfiles to reflect that rewrite.
"""
self.transfer_tracker.rewrite_input_paths()
def __upload_rewritten_config_files(self):
for config_file, new_config_contents in self.job_inputs.config_files.items():
self.transfer_tracker.handle_transfer(config_file, type=path_type.CONFIG, contents=new_config_contents)
def get_command_line(self):
"""
Returns the rewritten version of the command line to execute suitable
for remote host.
"""
return self.job_inputs.command_line
def __stage_input(self, file_path):
# If we have disabled path rewriting, just assume everything needs to be transferred,
# else check to ensure the file is referenced before transferring it.
return (not self.rewrite_paths) or self.job_inputs.path_referenced(file_path)
class JobInputs(object):
"""
Abstractions over dynamic inputs created for a given job (namely the command to
execute and created configfiles).
**Parameters**
command_line : str
Local command to execute for this job. (To be rewritten.)
config_files : str
Config files created for this job. (To be rewritten.)
>>> import tempfile
>>> tf = tempfile.NamedTemporaryFile()
>>> def setup_inputs(tf):
... open(tf.name, "w").write(u"world /path/to/input the rest")
... inputs = JobInputs(u"hello /path/to/input", [tf.name])
... return inputs
>>> inputs = setup_inputs(tf)
>>> inputs.rewrite_paths(u"/path/to/input", u'C:\\input')
>>> inputs.command_line == u'hello C:\\\\input'
True
>>> inputs.config_files[tf.name] == u'world C:\\\\input the rest'
True
>>> tf.close()
>>> tf = tempfile.NamedTemporaryFile()
>>> inputs = setup_inputs(tf)
>>> inputs.find_referenced_subfiles('/path/to') == [u'/path/to/input']
True
>>> inputs.path_referenced('/path/to')
True
>>> inputs.path_referenced(u'/path/to')
True
>>> inputs.path_referenced('/path/to/input')
True
>>> inputs.path_referenced('/path/to/notinput')
False
>>> tf.close()
"""
def __init__(self, command_line, config_files):
self.command_line = command_line
self.config_files = {}
for config_file in config_files or []:
config_contents = _read(config_file)
self.config_files[config_file] = config_contents
def find_pattern_references(self, pattern):
referenced_files = set()
for input_contents in self.__items():
referenced_files.update(findall(pattern, input_contents))
return list(referenced_files)
def find_referenced_subfiles(self, directory):
"""
Return list of files below specified `directory` in job inputs. Could
use more sophisticated logic (match quotes to handle spaces, handle
subdirectories, etc...).
**Parameters**
directory : str
Full path to directory to search.
"""
pattern = r"(%s%s\S+)" % (directory, sep)
return self.find_pattern_references(pattern)
def path_referenced(self, path):
pattern = r"%s" % path
found = False
for input_contents in self.__items():
if findall(pattern, input_contents):
found = True
break
return found
def rewrite_paths(self, local_path, remote_path):
"""
Rewrite references to `local_path` with `remote_path` in job inputs.
"""
self.__rewrite_command_line(local_path, remote_path)
self.__rewrite_config_files(local_path, remote_path)
def __rewrite_command_line(self, local_path, remote_path):
self.command_line = self.command_line.replace(local_path, remote_path)
def __rewrite_config_files(self, local_path, remote_path):
for config_file, contents in self.config_files.items():
self.config_files[config_file] = contents.replace(local_path, remote_path)
def __items(self):
items = [self.command_line]
items.extend(self.config_files.values())
return items
class TransferTracker(object):
def __init__(self, client, path_helper, action_mapper, job_inputs, rewrite_paths):
self.client = client
self.path_helper = path_helper
self.action_mapper = action_mapper
self.job_inputs = job_inputs
self.rewrite_paths = rewrite_paths
self.file_renames = {}
self.remote_staging_actions = []
def handle_transfer(self, path, type, name=None, contents=None):
action = self.__action_for_transfer(path, type, contents)
if action.staging_needed:
local_action = action.staging_action_local
if local_action:
response = self.client.put_file(path, type, name=name, contents=contents)
get_path = lambda: response['path']
else:
job_directory = self.client.job_directory
assert job_directory, "job directory required for action %s" % action
if not name:
name = basename(path)
self.__add_remote_staging_input(action, name, type)
get_path = lambda: job_directory.calculate_path(name, type)
register = self.rewrite_paths or type == 'tool' # Even if inputs not rewritten, tool must be.
if register:
self.register_rewrite(path, get_path(), type, force=True)
elif self.rewrite_paths:
path_rewrite = action.path_rewrite(self.path_helper)
if path_rewrite:
self.register_rewrite(path, path_rewrite, type, force=True)
# else: # No action for this file
def __add_remote_staging_input(self, action, name, type):
input_dict = dict(
name=name,
type=type,
action=action.to_dict(),
)
self.remote_staging_actions.append(input_dict)
def __action_for_transfer(self, path, type, contents):
if contents:
# If contents loaded in memory, no need to write out file and copy,
# just transfer.
action = MessageAction(contents=contents, client=self.client)
else:
if not exists(path):
message = "handle_tranfer called on non-existent file - [%s]" % path
log.warn(message)
raise Exception(message)
action = self.__action(path, type)
return action
def register_rewrite(self, local_path, remote_path, type, force=False):
action = self.__action(local_path, type)
if action.staging_needed or force:
self.file_renames[local_path] = remote_path
def rewrite_input_paths(self):
"""
For each file that has been transferred and renamed, updated
command_line and configfiles to reflect that rewrite.
"""
for local_path, remote_path in self.file_renames.items():
self.job_inputs.rewrite_paths(local_path, remote_path)
def __action(self, path, type):
return self.action_mapper.action(path, type)
def _read(path):
"""
Utility method to quickly read small files (config files and tool
wrappers) into memory as bytes.
"""
input = open(path, "r", encoding="utf-8")
try:
return input.read()
finally:
input.close()
__all__ = [submit_job]
| jmchilton/lwr | lwr/lwr_client/staging/up.py | Python | apache-2.0 | 17,093 | [
"Galaxy"
] | 752112fd069692b7edfb514328c5266e33d61218f80281fe2120c0495fb34647 |
"""
network/shape.py
Network class methods to deal with the each population's cell shape (morphology)
Contributors: salvadordura@gmail.com
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from neuron import h
# -----------------------------------------------------------------------------
# Calculate segment coordinates from 3d point coordinates
# -----------------------------------------------------------------------------
def calcSegCoords(self):
from .. import sim
if sim.cfg.createNEURONObj:
# Calculate relative seg coords for 1 cell per pop,
for pop in list(self.pops.values()):
if pop.cellModelClass == sim.CompartCell:
pop.calcRelativeSegCoords()
# Calculate abs seg coords for all cells
for cell in sim.net.compartCells:
cell.calcAbsSegCoords()
# -----------------------------------------------------------------------------
# Add 3D points to sections with simplified geometry
# -----------------------------------------------------------------------------
def defineCellShapes(self):
from .. import sim
if sim.cfg.createNEURONObj:
sim.net.compartCells = [c for c in sim.net.cells if type(c) is sim.CompartCell]
h.define_shape()
for cell in sim.net.compartCells:
cell.updateShape()
| thekerrlab/netpyne | netpyne/network/shape.py | Python | mit | 1,508 | [
"NEURON"
] | ce59e3c0bd1291284c76cd98d4b5d56fa60f2c77edb897ce0a97e7d324bee29e |
from __future__ import print_function
from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2
import caffe
def res_group(bottom, ks, nout, stride, pad, dropout, weight_filler=dict(type='msra'), project=False):
# branch 1
branch1 = bottom
if project == True:
branch1 = L.Convolution(bottom, kernel_size=1, stride=1, num_output=nout, pad=0, bias_term=False, weight_filler=weight_filler)
branch1 = L.BatchNorm(branch1, in_place=False, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
branch1 = L.Scale(branch1, bias_term=True, in_place=True, filler=dict(value=1), bias_filler=dict(value=0))
branch1 = L.ReLU(branch1, in_place=True)
branch1 = L.Dropout(branch1, dropout_ratio=dropout)
# branch 2
branch2 = L.Convolution(bottom, kernel_size=ks, stride=stride, num_output=nout, pad=pad, bias_term=True, weight_filler=weight_filler, bias_filler=dict(type='constant', value=0))
branch2 = L.BatchNorm(branch2, in_place=False, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
branch2 = L.Scale(branch2, bias_term=True, in_place=True, filler=dict(value=1), bias_filler=dict(value=0))
branch2 = L.ReLU(branch2, in_place=True)
branch2 = L.Dropout(branch2, dropout_ratio=dropout)
branch2 = L.Convolution(branch2, kernel_size=ks, stride=stride, num_output=nout, pad=pad, bias_term=True, weight_filler=weight_filler, bias_filler=dict(type='constant', value=0))
branch2 = L.BatchNorm(branch2, in_place=False, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
branch2 = L.Scale(branch2, bias_term=True, in_place=True, filler=dict(value=1), bias_filler=dict(value=0))
branch2 = L.ReLU(branch2, in_place=True)
branch2 = L.Dropout(branch2, dropout_ratio=dropout)
# add
fuse = L.Eltwise(branch1, branch2)
return fuse
def dense_group(bottom, ks, nout, stride, pad, dropout, weight_filler=dict(type='msra')):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride, num_output=nout, pad=pad, bias_term=True, weight_filler=weight_filler, bias_filler=dict(type='constant', value=0))
batch_norm = L.BatchNorm(conv, in_place=False, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
scale = L.Scale(batch_norm, bias_term=True, in_place=True, filler=dict(value=1), bias_filler=dict(value=0))
prelu = L.ReLU(scale, in_place=True)
drop = L.Dropout(prelu, dropout_ratio=dropout)
branch1 = bottom
branch2 = drop
return branch1, branch2
def add_layer(bottom, num_filter, dropout, res=False, project=False):
if res == False:
branch1, branch2 = dense_group(bottom, ks=3, nout=num_filter, stride=1, pad=1, dropout=dropout)
concate = L.Concat(branch1, branch2, axis=1)
return concate
else:
return res_group(bottom, ks=3, nout=num_filter, stride=1, pad=1, dropout=dropout, project=project)
def transition(bottom, num_filter, dropout, weight_filler=dict(type='msra')):
conv = L.Convolution(bottom, kernel_size=1, stride=1,
num_output=num_filter, pad=0, bias_term=False, weight_filler=weight_filler, bias_filler=dict(type='constant', value=0))
batch_norm = L.BatchNorm(conv, in_place=False, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
scale = L.Scale(batch_norm, bias_term=True, in_place=True, filler=dict(value=1), bias_filler=dict(value=0))
prelu = L.ReLU(scale, in_place=True)
if dropout > 1e-6:
drop = L.Dropout(prelu, dropout_ratio=dropout)
return drop
return prelu
def upsampleVGG(bottom, dropout, nout, upsample):
# weight_filler = dict(type='gaussian', std=0.01)
weight_filler = dict(type='msra')
if upsample <= 1:
if upsample == 1:
s, k, p = 1, 3, 1
else:
s, k, p = 2, 4, 1
conv = L.Convolution(bottom, kernel_size=k, stride=s, num_output=nout, pad=p, bias_term=True, weight_filler=weight_filler)
batch_norm = L.BatchNorm(conv, in_place=False, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
scale = L.Scale(batch_norm, bias_term=True, in_place=True, filler=dict(value=1), bias_filler=dict(value=0))
prelu = L.ReLU(scale, in_place=True)
drop = L.Dropout(prelu, dropout_ratio=dropout)
return drop
k = upsample * 2
s = upsample
conv = L.Convolution(bottom, kernel_size=1, stride=1, num_output=nout, pad=0, bias_term=False, weight_filler=weight_filler)
batch_norm = L.BatchNorm(conv, in_place=False, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
scale = L.Scale(batch_norm, bias_term=True, in_place=True, filler=dict(value=1), bias_filler=dict(value=0))
prelu = L.ReLU(scale, in_place=True)
deconv = L.Deconvolution(prelu, convolution_param=dict(kernel_size=k, stride=s, num_output=nout, group=nout, pad=(k-s)/2, bias_term=False, weight_filler=dict(type='bilinear')), param=[dict(lr_mult=0, decay_mult=0)])
drop = L.Dropout(deconv, dropout_ratio=dropout)
return drop
def make_VGG(bottom):
# stage 1
nout=64
conv1_1 = L.Convolution(bottom, name='conv1_1', kernel_size=3,num_output=nout, pad=1)
relu1_1 = L.ReLU(conv1_1, name='relu1_1')
conv1_2 = L.Convolution(relu1_1, name='conv1_2', kernel_size=3,num_output=nout, pad=1)
relu1_2 = L.ReLU(conv1_2, name='relu1_2')
pool1 = L.Pooling(relu1_2, name='pool1', pooling_param=dict(pool=P.Pooling.MAX, kernel_size=2, stride=2))
# stage 2
nout=128
conv2_1 = L.Convolution(pool1, name='conv2_1', kernel_size=3,num_output=nout, pad=1)
relu2_1 = L.ReLU(conv2_1, name='relu2_1')
conv2_2 = L.Convolution(relu2_1, name='conv2_2', kernel_size=3,num_output=nout, pad=1)
relu2_2 = L.ReLU(conv2_2, name='relu2_2')
pool2 = L.Pooling(relu2_2, name='pool2', pooling_param=dict(pool=P.Pooling.MAX, kernel_size=2, stride=2))
# stage 3
nout=256
conv3_1 = L.Convolution(pool2, name='conv3_1', kernel_size=3,num_output=nout, pad=1)
relu3_1 = L.ReLU(conv3_1, name='relu3_1')
conv3_2 = L.Convolution(relu3_1, name='conv3_2', kernel_size=3,num_output=nout, pad=1)
relu3_2 = L.ReLU(conv3_2, name='relu3_2')
conv3_3 = L.Convolution(relu3_2, name='conv3_3', kernel_size=3,num_output=nout, pad=1)
relu3_3 = L.ReLU(conv3_2, name='relu3_3')
pool3 = L.Pooling(relu3_2, name='pool3', pooling_param=dict(pool=P.Pooling.MAX, kernel_size=2, stride=2))
# stage 4
nout=512
conv4_1 = L.Convolution(pool3, name='conv4_1', kernel_size=3,num_output=nout, pad=1)
relu4_1 = L.ReLU(conv4_1, name='relu4_1')
conv4_2 = L.Convolution(relu4_1, name='conv4_2', kernel_size=3,num_output=nout, pad=1)
relu4_2 = L.ReLU(conv4_2, name='relu4_2')
conv4_3 = L.Convolution(relu4_2, name='conv4_3', kernel_size=3,num_output=nout, pad=1)
relu4_3 = L.ReLU(conv4_2, name='relu4_3')
pool4 = L.Pooling(relu4_2, name='pool4', pooling_param=dict(pool=P.Pooling.MAX, kernel_size=2, stride=2))
# stage 5
nout=512
conv5_1 = L.Convolution(pool4, name='conv5_1', kernel_size=3,num_output=nout, pad=1)
relu5_1 = L.ReLU(conv5_1, name='relu5_1')
conv5_2 = L.Convolution(relu5_1, name='conv5_2', kernel_size=3,num_output=nout, pad=1)
relu5_2 = L.ReLU(conv5_2, name='relu5_2')
conv5_3 = L.Convolution(relu5_2, name='conv5_3', kernel_size=3,num_output=nout, pad=1)
relu5_3 = L.ReLU(conv5_2, name='relu5_3')
pool5 = L.Pooling(relu5_2, name='pool5', pooling_param=dict(pool=P.Pooling.MAX, kernel_size=2, stride=2))
return pool1, pool2, pool3, pool4, pool5
def AbdNet():
growth_rate = 16
dropout = 0.2
vgg_nout = 64
N = 5
nchannels = 16
imsize = 256
msra = dict(type='msra')
gs_1e_2 = dict(type='gaussian', std=0.01)
# n = caffe.NetSpec()
data, data2, albedo_diff_gt, albedo_gt = L.Python(ntop=4, \
python_param=dict(\
module='image_layer3_gradient',\
layer='ImageLayer3',\
param_str="{{'data_dir': '/home/albertxavier/dataset/sintel/images/', 'tops': ['data', 'data2', 'albedo_diff_gt', 'albedo_gt'],'seed': 1337,'split': 'train', 'list_file':'train_two_folds_split_scene.txt', 'mean_bgr': (104.00699, 116.66877, 122.67892), 'crop_size':({imsize},{imsize})}}".format(imsize=imsize)\
)\
)
pool1, pool2, pool3, pool4, pool5 = make_VGG(data)
# scale 2
model = L.Convolution(data2, kernel_size=4, stride=2,
num_output=96, pad=1, bias_term=True, weight_filler=msra, bias_filler=dict(type='constant', value=0))
model = L.BatchNorm(model, in_place=False, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
model = L.Scale(model, bias_term=True, in_place=True, filler=dict(value=1), bias_filler=dict(value=0))
model = L.ReLU(model, in_place=True)
model = L.Pooling(model, pooling_param=dict(pool=P.Pooling.MAX, kernel_size=2, stride=2))
model = L.Dropout(model, dropout_ratio=dropout)
# concat VGG
vgg1 = upsampleVGG(pool1, upsample = 2/4, dropout=dropout, nout=vgg_nout)
vgg2 = upsampleVGG(pool2, upsample = 4/4, dropout=dropout, nout=vgg_nout)
vgg3 = upsampleVGG(pool3, upsample = 8/4, dropout=dropout, nout=vgg_nout)
vgg4 = upsampleVGG(pool4, upsample = 16/4, dropout=dropout, nout=vgg_nout)
vgg5 = upsampleVGG(pool5, upsample = 32/4, dropout=dropout, nout=vgg_nout)
model = L.Concat(model, vgg1, vgg2, vgg3, vgg4, vgg5, axis=1)
# block 1: dense
for i in range(N):
model = add_layer(model, growth_rate, dropout)
nchannels += growth_rate
model = transition(model, nchannels, dropout, weight_filler=msra)
# block 2: dense
for i in range(N):
model = add_layer(model, growth_rate, dropout)
nchannels += growth_rate
model = transition(model, nchannels, dropout, weight_filler=msra)
# block 3: res
# nchannels = int(nchannels * 0.6)
# for i in range(N):
# if i == 0: project = True
# else: project = False
# model = add_layer(bottom, nchannels, dropout, project=project)
block 3: dense
for i in range(N):
model = add_layer(model, growth_rate, dropout)
nchannels += growth_rate
model = transition(model, nchannels, dropout, weight_filler=msra)
# deep supervision
model_deep = L.Convolution(model, kernel_size=1, stride=1, num_output=96, pad=0, bias_term=False, weight_filler=gs_1e_2, param=[dict(lr_mult=1, decay_mult=1)])
model_deep = L.Deconvolution(model_deep, convolution_param=dict(kernel_size=8, stride=4, num_output=3, pad=2, bias_term=True, weight_filler=dict(type='gaussian', std=0.001), bias_filler=dict(type='constant', value=0)), param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)])
loss_deep = L.Python(\
model_deep, albedo_gt,\
loss_weight=1.0, ntop=1,\
python_param=dict(\
module='l2loss',\
layer='L2LossLayer',\
)\
)
# model = L.Concat(model, model_deep, propagate_down=[True, False])
# block 4
for i in range(N):
model = add_layer(model, growth_rate, dropout)
nchannels += growth_rate
model = transition(model, nchannels, dropout=0., weight_filler=msra)
# fuse feature
model = L.Convolution(model, kernel_size=1, stride=1, num_output=96, pad=0, bias_term=False, weight_filler=gs_1e_2, bias_filler=dict(type='constant'))
# upsample
model = L.Deconvolution(model, convolution_param=dict(kernel_size=8, stride=4, num_output=6, pad=2, bias_term=True, weight_filler=dict(type='gaussian', std=0.001), bias_filler=dict(type='constant', value=0)), param=[dict(lr_mult=10, decay_mult=1), dict(lr_mult=20, decay_mult=0)])
# loss
loss = L.Python(\
model, albedo_diff_gt,\
loss_weight=1.0, ntop=1,\
python_param=dict(\
module='l2loss-gradient-hist',\
layer='L2LossLayer',\
param_str="{'display': True}"\
)\
)
return to_proto(loss, loss_deep)
def make_net(suffix=""):
# with open('vgg.txt', 'r') as f:
# vgg = f.read()
with open('train_albedonet_{}.prototxt'.format(suffix), 'w') as f:
f.write(str(AbdNet()))
def make_solver(suffix=""):
s = caffe_pb2.SolverParameter()
s.random_seed = 0xCAFFE
s.train_net = 'train_albedonet_{}.prototxt'.format(suffix)
# s.test_net.append('test_densenet.prototxt')
# s.test_interval = 800
# s.test_iter.append(200)
s.max_iter = 100000
s.type = 'Nesterov'
s.display = 1
s.base_lr = 0.02
s.momentum = 0.9
s.weight_decay = 1e-4
s.iter_size = 2
s.lr_policy='multistep'
s.gamma = 0.1
s.stepvalue.append(int(0.15 * s.max_iter))
s.stepvalue.append(int(0.30 * s.max_iter))
s.stepvalue.append(int(0.80 * s.max_iter))
s.solver_mode = caffe_pb2.SolverParameter.GPU
s.snapshot_prefix = './snapshot/albedonet_{}'.format(suffix)
s.snapshot = 5000
solver_path = 'solver_{}.prototxt'.format(suffix)
with open(solver_path, 'w') as f:
f.write(str(s))
def make_train_bash(suffix=''):
s = \
"""#!/usr/bin/env sh
$CAFFE_ROOT/build/tools/caffe train \\
-solver solver_{}.prototxt \\
-weights /home/albertxavier/caffe_model/vgg16.caffemodel \\
-gpu 0
""".format(suffix)
path = 'train_{}.sh'.format(suffix)
with open(path, 'w') as f:
f.write(str(s))
if __name__ == '__main__':
suffix = "deep_supervision"
make_net(suffix)
make_solver(suffix)
make_train_bash(suffix) | albertxavier001/graduation-project | caffe/3 deep supervision pure dense/make_albedo_net.py | Python | mit | 13,847 | [
"Gaussian"
] | 694f4a578d4c310aed94a77cab03dae3d308a00e55bf3eb52f06fc0089f7cf6c |
"""Basic UI test that checks if the stack analysis is visible for the newly created project."""
from splinter import Browser
import time
import os
from urllib.parse import urljoin
SLEEP_BETWEEN_PAGES = 15
SLEEP_BEFORE_CLICK = 15
class Context:
"""Class that holds context for the UI tests."""
def __init__(self, server, username, password):
"""Initialize the attributes holding server URL, user name, and password."""
self.browser = None
self.space_name = None
self.server = server
self.username = username
self.password = password
def check_env_variable(env_var_name):
"""Check if the given environment variable is present."""
assert os.environ.get(env_var_name), \
'The environment variable {v} should be set properly'.format(
v=env_var_name)
def check_setup():
"""Check if all required environment variables are present."""
check_env_variable('TARGET_SERVER')
check_env_variable('OPENSHIFT_USERNAME')
check_env_variable('OPENSHIFT_PASSWORD')
def front_page(context):
"""Go to the Openshift.io front page and click the Login button."""
print("Front page")
url = context.server
context.browser.visit(url)
time.sleep(SLEEP_BEFORE_CLICK)
login_button = context.browser.find_by_css('button#login').first
assert login_button.visible
assert login_button.value == 'LOG IN'
time.sleep(SLEEP_BEFORE_CLICK)
login_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def login_page(context):
"""Login into the Openshift.io using the provided username and password."""
print("Login page")
username_input = context.browser.find_by_id('username').first
password_input = context.browser.find_by_id('password').first
assert username_input.visible
assert password_input.visible
context.browser.fill('username', context.username)
context.browser.fill('password', context.password)
login_button = context.browser.find_by_id('kc-login').first
assert login_button.visible
time.sleep(SLEEP_BEFORE_CLICK)
login_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def get_all_existing_space_names(browser):
"""Return list of names of Spaces."""
spaces = browser.find_by_xpath("//div[@class='space-item']/h2/a")
assert spaces is not None
names = [space.value for space in spaces]
print("Already created Spaces")
print(" ".join(names))
return names
def generate_space_prefix():
"""Generate prefix for the new Space.
The prefix is based on the current local time, so very probably it will be
unique for given user (if not, the index will be updated).
"""
localtime = time.localtime()
return time.strftime("test%Y-%m-%d-")
def space_name(prefix, index):
"""Construct name of space from the prefix and its index."""
return "{p}{i}".format(p=prefix, i=index)
def is_space_name_unique(prefix, index, space_names):
"""Check if the name of the Space is unique."""
name = space_name(prefix, index)
return name not in space_names
def generate_unique_space_name(space_names):
"""Generate a name for a Space.
The name is based on current date and is unique (by adding a small index to the date).
"""
prefix = generate_space_prefix()
index = 1
while not is_space_name_unique(prefix, index, space_names):
index += 1
return space_name(prefix, index)
def create_new_space_step_1(context):
"""Perform the first step to create new Space."""
print('Create new Space: step 1')
new_space_button = context.browser.find_by_text('New').first
assert new_space_button is not None
time.sleep(SLEEP_BEFORE_CLICK)
new_space_button.click()
name_input = context.browser.find_by_id('name').first
assert name_input.visible
context.browser.fill('name', context.space_name)
create_space_button = context.browser.find_by_id('createSpaceButton').first
assert create_space_button.visible
time.sleep(SLEEP_BEFORE_CLICK)
create_space_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_2(context):
"""Perform the second step to create new Space."""
print('Create new Space: step 2')
time.sleep(15)
quick_start_button = context.browser.find_by_text('Quickstart').first
assert quick_start_button is not None
time.sleep(SLEEP_BEFORE_CLICK)
quick_start_button.mouse_over()
time.sleep(SLEEP_BEFORE_CLICK)
quick_start_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_3(context):
"""Perform third step to create new Space."""
print('Create new Space: step 3')
time.sleep(15)
next_button = context.browser.find_by_id('forge-next-button').first
assert next_button is not None
print(next_button.text)
time.sleep(SLEEP_BEFORE_CLICK)
next_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_4(context):
"""Perform fourth step to create new Space."""
print('Create new Space: step 4')
release_radio = context.browser.find_by_value('Release').first
assert release_radio is not None
time.sleep(SLEEP_BEFORE_CLICK)
release_radio.click()
next_button = context.browser.find_by_id('forge-next-button').first
assert next_button is not None
print(next_button.text)
time.sleep(SLEEP_BEFORE_CLICK)
next_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_5(context):
"""Perform fifth step to create new Space."""
print('Create new Space: step 5')
next_button = context.browser.find_by_id('forge-next-button').first
assert next_button is not None
print(next_button.text)
time.sleep(SLEEP_BEFORE_CLICK)
next_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_6(context):
"""Perform sixth step to create new Space."""
print('Create new Space: step 6')
finish_button = context.browser.find_by_id('forge-finish-button').first
assert finish_button is not None
print(finish_button.text)
time.sleep(SLEEP_BEFORE_CLICK)
finish_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def create_new_space_step_7(context):
"""Perform the last step to create new Space.
Click in the OK button on the last past of the forge wizard.
This step is needed so the repo will be shown on the Space page!
"""
print('Create new Space: step 7')
finish_button = context.browser.find_by_id('forge-ok-button').first
assert finish_button is not None
print(finish_button.text)
time.sleep(SLEEP_BEFORE_CLICK)
finish_button.click()
time.sleep(SLEEP_BETWEEN_PAGES)
def spaces_page(context):
"""Go to the Spaces page with list of available Spaces."""
print("Spaces page")
url = urljoin(context.server, context.username + "/_spaces")
context.browser.visit(url)
space_names = get_all_existing_space_names(context.browser)
new_space_name = generate_unique_space_name(space_names)
context.space_name = new_space_name
print("Unique name for new Space\n " + new_space_name)
create_new_space_step_1(context)
create_new_space_step_2(context)
create_new_space_step_3(context)
create_new_space_step_4(context)
create_new_space_step_5(context)
create_new_space_step_6(context)
create_new_space_step_7(context)
def check_text_presence(context, text):
"""Check if the given text is present on the current web page."""
tag = context.browser.find_by_text(text).first
assert tag is not None
print(" The text '{t}' is found on the page".format(t=text))
def stack_recommendation_on_space_page(context):
"""Check the presence of stack recommendation on the Space page."""
url = urljoin(context.server, context.username + "/" + context.space_name)
print("Going to the Space {s}".format(s=context.space_name))
context.browser.visit(url)
time.sleep(SLEEP_BEFORE_CLICK)
recommendation1 = 'Recommendation: Change io.vertx:vertx-web : 3.4.1'
check_text_presence(context, recommendation1)
recommendation2 = 'Recommendation: Change io.vertx:vertx-core : 3.4.1'
check_text_presence(context, recommendation2)
time.sleep(SLEEP_BETWEEN_PAGES)
def stack_reccomendation_on_pipepines_page(context):
"""Check the presence of stack recommendation on the Pipelines page."""
url = urljoin(context.server, context.username + "/" + context.space_name +
"/create/pipelines")
print("Going to the pipeline page for the Space {s}".format(
s=context.space_name))
context.browser.visit(url)
time.sleep(SLEEP_BEFORE_CLICK)
check_text_presence(context, "Stack Reports")
link = context.browser.find_by_text("Stack Reports")
link.click()
time.sleep(SLEEP_BETWEEN_PAGES)
# TODO - ask why the text is different: Recommendation/Recommended
recommendation1 = 'Recommended - Change io.vertx:vertx-web : 3.4.1'
check_text_presence(context, recommendation1)
recommendation2 = 'Recommended - Change io.vertx:vertx-core : 3.4.1'
check_text_presence(context, recommendation2)
time.sleep(SLEEP_BETWEEN_PAGES)
def stack_recommendation(context):
"""Check the presence of stack recommendation on all relevant pages on OpenShift.io."""
stack_recommendation_on_space_page(context)
stack_reccomendation_on_pipepines_page(context)
def run_tests(engine, server, username, password):
"""Start all UI tests."""
context = Context(server, username, password)
with Browser(engine) as browser:
context.browser = browser
front_page(context)
login_page(context)
spaces_page(context)
# it is really needed to wait for > 10 minutes here
time.sleep(60 * 10)
stack_recommendation(context)
def main():
"""Start all UI tests by using the provided environment variables."""
check_setup()
server = os.environ.get('TARGET_SERVER')
username = os.environ.get('OPENSHIFT_USERNAME')
password = os.environ.get('OPENSHIFT_PASSWORD')
engine = os.environ.get('BROWSER_ENGINE', 'chrome')
print("Using the following browser engine {e}".format(e=engine))
run_tests(engine, server, username, password)
if __name__ == "__main__":
main()
| jpopelka/fabric8-analytics-common | ui-tests/test.py | Python | apache-2.0 | 10,288 | [
"VisIt"
] | 87c02e91f8e598bf45ceef7d9313ad670e051952617c13aeb56f1d9e4b8608a0 |
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
from customdelegate import CustomDelegate, DocumentationMetaclass
from camelot.view.controls import editors
from camelot.core import constants
from camelot.core.utils import variant_to_pyobject
from camelot.view.proxy import ValueLoading
class FloatDelegate( CustomDelegate ):
"""Custom delegate for float values"""
__metaclass__ = DocumentationMetaclass
editor = editors.FloatEditor
def __init__( self,
minimum=constants.camelot_minfloat,
maximum=constants.camelot_maxfloat,
precision=2,
parent=None,
unicode_format=None,
**kwargs ):
"""
:param precision: The number of digits after the decimal point displayed. This defaults
to the precision specified in the definition of the Field.
"""
CustomDelegate.__init__(self,
parent=parent,
precision=precision,
minimum=minimum, maximum=maximum,
**kwargs )
self.minimum = minimum
self.maximum = maximum
self.precision = precision
self.unicode_format = unicode_format
def paint( self, painter, option, index ):
painter.save()
self.drawBackground(painter, option, index)
value = variant_to_pyobject(index.model().data(index, Qt.EditRole))
if value in (None, ValueLoading):
value_str = ''
elif self.unicode_format:
value_str = self.unicode_format(value)
else:
value_str = QtCore.QString("%L1").arg(float(value),0,'f',self.precision)
self.paint_text( painter, option, index, value_str, horizontal_align=Qt.AlignRight )
painter.restore()
| kurtraschke/camelot | camelot/view/controls/delegates/floatdelegate.py | Python | gpl-2.0 | 2,971 | [
"VisIt"
] | 66beec19b22d6b749541931bd58f6adc6755fe40c99eee01a2b8ab5218743228 |
""" ResourceManagementClient
Client to interact with the ResourceManagementDB.
"""
__RCSID__ = '$Id$'
from DIRAC.Core.DISET.RPCClient import RPCClient
def uppercase_first_letter(key):
""" a method that makes the first letter uppercase only (and leaves the remaining letters unaffected)
"""
return key[0].upper() + key[1:]
class ResourceManagementClient(object):
"""
The :class:`ResourceManagementClient` class exposes the :mod:`DIRAC.ResourceManagement`
API. All functions you need are on this client.
It has the 'direct-db-access' functions, the ones of the type:
- insert
- update
- select
- delete
that return parts of the RSSConfiguration stored on the CS, and used everywhere
on the RSS module. Finally, and probably more interesting, it exposes a set
of functions, badly called 'boosters'. They are 'home made' functions using the
basic database functions that are interesting enough to be exposed.
The client will ALWAYS try to connect to the DB, and in case of failure, to the
XML-RPC server ( namely :class:`ResourceManagementDB` and
:class:`ResourceManagementHancler` ).
You can use this client on this way
>>> from DIRAC.ResourceManagementSystem.Client.ResourceManagementClient import ResourceManagementClient
>>> rsClient = ResourceManagementClient()
All functions calling methods exposed on the database or on the booster are
making use of some syntactic sugar, in this case a decorator that simplifies
the client considerably.
"""
def _prepare(self, sendDict):
# remove unnecessary key generated by locals()
del sendDict['self']
# make each key name uppercase to match database column names (case sensitive)
for key, value in sendDict.items():
del sendDict[key]
if value:
sendDict.update({uppercase_first_letter(key): value})
return sendDict
# AccountingCache Methods ....................................................
def selectAccountingCache(self, name=None, plotType=None, plotName=None,
result=None, dateEffective=None, lastCheckTime=None, meta=None):
'''
Gets from PolicyResult all rows that match the parameters given.
:Parameters:
**name** - `[, string, list]`
name of an individual of the grid topology
**plotType** - `[, string, list]`
the plotType name (e.g. 'Pilot')
**plotName** - `[, string, list]`
the plot name
**result** - `[, string, list]`
command result
**dateEffective** - `[, datetime, list]`
time-stamp from which the result is effective
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the result was checked
**meta** - `dict`
metadata for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").select('AccountingCache', self._prepare(locals()))
def addOrModifyAccountingCache(self, name=None, plotType=None, plotName=None,
result=None, dateEffective=None, lastCheckTime=None):
'''
Adds or updates-if-duplicated to AccountingCache. Using `name`, `plotType`
and `plotName` to query the database, decides whether to insert or update the
table.
:Parameters:
**name** - `string`
name of an individual of the grid topology
**plotType** - `string`
the plotType name (e.g. 'Pilot')
**plotName** - `string`
the plot name
**result** - `string`
command result
**dateEffective** - `datetime`
time-stamp from which the result is effective
**lastCheckTime** - `datetime`
time-stamp setting last time the result was checked
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").addOrModify('AccountingCache', self._prepare(locals()))
def deleteAccountingCache(self, name=None, plotType=None, plotName=None,
result=None, dateEffective=None, lastCheckTime=None):
'''
Deletes from AccountingCache all rows that match the parameters given.
:Parameters:
**name** - `string`
name of an individual of the grid topology
**plotType** - `string`
the plotType name (e.g. 'Pilot')
**plotName** - `string`
the plot name
**result** - `string`
command result
**dateEffective** - `datetime`
time-stamp from which the result is effective
**lastCheckTime** - `datetime`
time-stamp setting last time the result was checked
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").delete('AccountingCache', self._prepare(locals()))
# GGUSTicketsCache Methods ...................................................
def selectGGUSTicketsCache(self, gocSite=None, link=None, openTickets=None,
tickets=None, lastCheckTime=None, meta=None):
'''
Gets from GGUSTicketsCache all rows that match the parameters given.
:Parameters:
**gocSite** - `string`
**link** - `string`
url to the details
**openTickets** - `integer`
**tickets** - `string`
**lastCheckTime** - `datetime`
time-stamp setting last time the result was checked
**meta** - `dict`
metadata for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").select('GGUSTicketsCache', self._prepare(locals()))
def deleteGGUSTicketsCache(self, gocSite=None, link=None, openTickets=None,
tickets=None, lastCheckTime=None):
'''
Deletes from GGUSTicketsCache all rows that match the parameters given.
:Parameters:
**gocSite** - `string`
**link** - `string`
url to the details
**openTickets** - `integer`
**tickets** - `string`
**lastCheckTime** - `datetime`
time-stamp setting last time the result was checked
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").delete('GGUSTicketsCache', self._prepare(locals()))
def addOrModifyGGUSTicketsCache(self, gocSite=None, link=None, openTickets=None,
tickets=None, lastCheckTime=None):
'''
Adds or updates-if-duplicated to GGUSTicketsCache all rows that match the parameters given.
:Parameters:
**gocSite** - `string`
**link** - `string`
url to the details
**openTickets** - `integer`
**tickets** - `string`
**lastCheckTime** - `datetime`
time-stamp setting last time the result was checked
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").addOrModify('GGUSTicketsCache', self._prepare(locals()))
# DowntimeCache Methods ......................................................
def selectDowntimeCache(self, downtimeID=None, element=None, name=None,
startDate=None, endDate=None, severity=None,
description=None, link=None, dateEffective=None,
lastCheckTime=None, gOCDBServiceType=None, meta=None):
'''
Gets from DowntimeCache all rows that match the parameters given.
:Parameters:
**downtimeID** - [, `string`, `list`]
unique id for the downtime
**element** - [, `string`, `list`]
valid element in the topology ( Site, Resource, Node )
**name** - [, `string`, `list`]
name of the element where the downtime applies
**startDate** - [, `datetime`, `list`]
starting time for the downtime
**endDate** - [, `datetime`, `list`]
ending time for the downtime
**severity** - [, `string`, `list`]
severity assigned by the gocdb
**description** - [, `string`, `list`]
brief description of the downtime
**link** - [, `string`, `list`]
url to the details
**dateEffective** - [, `datetime`, `list`]
time when the entry was created in this database
**lastCheckTime** - [, `datetime`, `list`]
time-stamp setting last time the result was checked
**gOCDBServiceType** - `string`
service type assigned by gocdb
**meta** - `dict`
metadata for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").select('DowntimeCache', self._prepare(locals()))
def deleteDowntimeCache(self, downtimeID=None, element=None, name=None,
startDate=None, endDate=None, severity=None,
description=None, link=None, dateEffective=None,
lastCheckTime=None, gOCDBServiceType=None):
'''
Deletes from DowntimeCache all rows that match the parameters given.
:Parameters:
**downtimeID** - [, `string`, `list`]
unique id for the downtime
**element** - [, `string`, `list`]
valid element in the topology ( Site, Resource, Node )
**name** - [, `string`, `list`]
name of the element where the downtime applies
**startDate** - [, `datetime`, `list`]
starting time for the downtime
**endDate** - [, `datetime`, `list`]
ending time for the downtime
**severity** - [, `string`, `list`]
severity assigned by the gocdb
**description** - [, `string`, `list`]
brief description of the downtime
**link** - [, `string`, `list`]
url to the details
**dateEffective** - [, `datetime`, `list`]
time when the entry was created in this database
**lastCheckTime** - [, `datetime`, `list`]
time-stamp setting last time the result was checked
**gOCDBServiceType** - `string`
service type assigned by gocdb
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").delete('DowntimeCache', self._prepare(locals()))
def addOrModifyDowntimeCache(self, downtimeID=None, element=None, name=None,
startDate=None, endDate=None, severity=None,
description=None, link=None, dateEffective=None,
lastCheckTime=None, gOCDBServiceType=None):
'''
Adds or updates-if-duplicated to DowntimeCache. Using `downtimeID` to query
the database, decides whether to insert or update the table.
:Parameters:
**downtimeID** - `string`
unique id for the downtime
**element** - `string`
valid element in the topology ( Site, Resource, Node )
**name** - `string`
name of the element where the downtime applies
**startDate** - `datetime`
starting time for the downtime
**endDate** - `datetime`
ending time for the downtime
**severity** - `string`
severity assigned by the gocdb
**description** - `string`
brief description of the downtime
**link** - `string`
url to the details
**dateEffective** - `datetime`
time when the entry was created in this database
**lastCheckTime** - `datetime`
time-stamp setting last time the result was checked
**gOCDBServiceType** - `string`
service type assigned by gocdb
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").addOrModify('DowntimeCache', self._prepare(locals()))
# JobCache Methods ...........................................................
def selectJobCache(self, site=None, maskStatus=None, efficiency=None,
status=None, lastCheckTime=None, meta=None):
'''
Gets from JobCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site element
**maskStatus** - `[, string, list ]`
maskStatus for the site
**efficiency** - `[, float, list ]`
job efficiency ( successful / total )
**status** - `[, string, list ]`
status for the site computed
**lastCheckTime** - `[, datetime, list ]`
time-stamp setting last time the result was checked
**meta** - `dict`
metadata for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").select('JobCache', self._prepare(locals()))
def deleteJobCache(self, site=None, maskStatus=None, efficiency=None,
status=None, lastCheckTime=None):
'''
Deletes from JobCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site element
**maskStatus** - `[, string, list ]`
maskStatus for the site
**efficiency** - `[, float, list ]`
job efficiency ( successful / total )
**status** - `[, string, list ]`
status for the site computed
**lastCheckTime** - `[, datetime, list ]`
time-stamp setting last time the result was checked
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").delete('JobCache', self._prepare(locals()))
def addOrModifyJobCache(self, site=None, maskStatus=None, efficiency=None,
status=None, lastCheckTime=None):
'''
Adds or updates-if-duplicated to JobCache. Using `site` to query
the database, decides whether to insert or update the table.
:Parameters:
**site** - `[, string, list ]`
name of the site element
**maskStatus** - `[, string, list ]`
maskStatus for the site
**efficiency** - `[, float, list ]`
job efficiency ( successful / total )
**status** - `[, string, list ]`
status for the site computed
**lastCheckTime** - `[, datetime, list ]`
time-stamp setting last time the result was checked
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").addOrModify('JobCache', self._prepare(locals()))
# TransferCache Methods ......................................................
def selectTransferCache(self, sourceName=None, destinationName=None, metric=None,
value=None, lastCheckTime=None, meta=None):
'''
Gets from TransferCache all rows that match the parameters given.
:Parameters:
**elementName** - `[, string, list ]`
name of the element
**direction** - `[, string, list ]`
the element taken as Source or Destination of the transfer
**metric** - `[, string, list ]`
measured quality of failed transfers
**value** - `[, float, list ]`
percentage
**lastCheckTime** - `[, float, list ]`
time-stamp setting last time the result was checked
**meta** - `dict`
metadata for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").select('TransferCache', self._prepare(locals()))
def deleteTransferCache(self, sourceName=None, destinationName=None, metric=None,
value=None, lastCheckTime=None):
'''
Deletes from TransferCache all rows that match the parameters given.
:Parameters:
**elementName** - `[, string, list ]`
name of the element
**direction** - `[, string, list ]`
the element taken as Source or Destination of the transfer
**metric** - `[, string, list ]`
measured quality of failed transfers
**value** - `[, float, list ]`
percentage
**lastCheckTime** - `[, float, list ]`
time-stamp setting last time the result was checked
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").delete('TransferCache', self._prepare(locals()))
def addOrModifyTransferCache(self, sourceName=None, destinationName=None, metric=None,
value=None, lastCheckTime=None):
'''
Adds or updates-if-duplicated to TransferCache. Using `elementName`, `direction`
and `metric` to query the database, decides whether to insert or update the table.
:Parameters:
**elementName** - `string`
name of the element
**direction** - `string`
the element taken as Source or Destination of the transfer
**metric** - `string`
measured quality of failed transfers
**value** - `float`
percentage
**lastCheckTime** - `datetime`
time-stamp setting last time the result was checked
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").addOrModify('TransferCache', self._prepare(locals()))
# PilotCache Methods .........................................................
def selectPilotCache(self, site=None, cE=None, pilotsPerJob=None,
pilotJobEff=None, status=None, lastCheckTime=None, meta=None):
'''
Gets from TransferCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site
**cE** - `[, string, list ]`
name of the CE of 'Multiple' if all site CEs are considered
**pilotsPerJob** - `[, float, list ]`
measure calculated
**pilotJobEff** - `[, float, list ]`
percentage
**status** - `[, float, list ]`
status of the CE / Site
**lastCheckTime** - `[, datetime, list ]`
measure calculated
**meta** - `dict`
metadata for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").select('PilotCache', self._prepare(locals()))
def deletePilotCache(self, site=None, cE=None, pilotsPerJob=None,
pilotJobEff=None, status=None, lastCheckTime=None):
'''
Deletes from TransferCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site
**cE** - `[, string, list ]`
name of the CE of 'Multiple' if all site CEs are considered
**pilotsPerJob** - `[, float, list ]`
measure calculated
**pilotJobEff** - `[, float, list ]`
percentage
**status** - `[, float, list ]`
status of the CE / Site
**lastCheckTime** - `[, datetime, list ]`
measure calculated
:return: S_OK() || S_ERROR() '''
return RPCClient("ResourceStatus/ResourceManagement").delete('PilotCache', self._prepare(locals()))
def addOrModifyPilotCache(self, site=None, cE=None, pilotsPerJob=None,
pilotJobEff=None, status=None, lastCheckTime=None):
'''
Adds or updates-if-duplicated to PilotCache. Using `site` and `cE`
to query the database, decides whether to insert or update the table.
:Parameters:
**site** - `string`
name of the site
**cE** - `string`
name of the CE of 'Multiple' if all site CEs are considered
**pilotsPerJob** - `float`
measure calculated
**pilotJobEff** - `float`
percentage
**status** - `string`
status of the CE / Site
**lastCheckTime** - `datetime`
measure calculated
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").addOrModify('PilotCache', self._prepare(locals()))
# PolicyResult Methods .......................................................
def selectPolicyResult(self, element=None, name=None, policyName=None,
statusType=None, status=None, reason=None,
lastCheckTime=None, meta=None):
'''
Gets from PolicyResult all rows that match the parameters given.
:Parameters:
**granularity** - `[, string, list]`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `[, string, list]`
name of the element
**policyName** - `[, string, list]`
name of the policy
**statusType** - `[, string, list]`
it has to be a valid status type for the given granularity
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `[, string, list]`
decision that triggered the assigned status
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the policy result was checked
**meta** - `dict`
metadata for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").select('PolicyResult', self._prepare(locals()))
def deletePolicyResult(self, element=None, name=None, policyName=None,
statusType=None, status=None, reason=None,
dateEffective=None, lastCheckTime=None):
'''
Deletes from PolicyResult all rows that match the parameters given.
:Parameters:
**granularity** - `[, string, list]`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `[, string, list]`
name of the element
**policyName** - `[, string, list]`
name of the policy
**statusType** - `[, string, list]`
it has to be a valid status type for the given granularity
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `[, string, list]`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the policy result is effective
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the policy result was checked
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").delete('PolicyResult', self._prepare(locals()))
def addOrModifyPolicyResult(self, element=None, name=None, policyName=None,
statusType=None, status=None, reason=None,
dateEffective=None, lastCheckTime=None):
'''
Adds or updates-if-duplicated to PolicyResult. Using `name`, `policyName` and
`statusType` to query the database, decides whether to insert or update the table.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `string`
name of the element
**policyName** - `string`
name of the policy
**statusType** - `string`
it has to be a valid status type for the given element
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the policy result is effective
**lastCheckTime** - `datetime`
time-stamp setting last time the policy result was checked
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").addOrModify('PolicyResult', self._prepare(locals()))
# SpaceTokenOccupancyCache Methods ...........................................
def selectSpaceTokenOccupancyCache(self, endpoint=None, token=None,
total=None, guaranteed=None, free=None,
lastCheckTime=None, meta=None):
'''
Gets from SpaceTokenOccupancyCache all rows that match the parameters given.
:Parameters:
**endpoint** - `[, string, list]`
srm endpoint
**token** - `[, string, list]`
name of the token
**total** - `[, integer, list]`
total terabytes
**guaranteed** - `[, integer, list]`
guaranteed terabytes
**free** - `[, integer, list]`
free terabytes
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `dict`
metadata for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").select('SpaceTokenOccupancyCache', self._prepare(locals()))
def deleteSpaceTokenOccupancyCache(self, endpoint=None, token=None,
total=None, guaranteed=None, free=None,
lastCheckTime=None):
'''
Deletes from SpaceTokenOccupancyCache all rows that match the parameters given.
:Parameters:
**endpoint** - `[, string, list]`
srm endpoint
**token** - `[, string, list]`
name of the token
**total** - `[, integer, list]`
total terabytes
**guaranteed** - `[, integer, list]`
guaranteed terabytes
**free** - `[, integer, list]`
free terabytes
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").delete('SpaceTokenOccupancyCache', self._prepare(locals()))
def addOrModifySpaceTokenOccupancyCache(self, endpoint=None, token=None,
total=None, guaranteed=None, free=None,
lastCheckTime=None):
'''
Adds or updates-if-duplicated to SpaceTokenOccupancyCache. Using `site` and `token`
to query the database, decides whether to insert or update the table.
:Parameters:
**endpoint** - `[, string, list]`
srm endpoint
**token** - `string`
name of the token
**total** - `integer`
total terabytes
**guaranteed** - `integer`
guaranteed terabytes
**free** - `integer`
free terabytes
**lastCheckTime** - `datetime`
time-stamp from which the result is effective
:return: S_OK() || S_ERROR()
'''
return RPCClient("ResourceStatus/ResourceManagement").addOrModify('SpaceTokenOccupancyCache',
self._prepare(locals()))
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| arrabito/DIRAC | ResourceStatusSystem/Client/ResourceManagementClient.py | Python | gpl-3.0 | 27,731 | [
"DIRAC"
] | a6b0268ea9acd3e77ebb27508642997fe7f9f6e5fc2c5085c0ecce8aa5815402 |
from functools import partial
from client.player import Player
from client.updater import fetchClientUpdate
from config import Settings
import fa
from fa.factions import Factions
'''
Created on Dec 1, 2011
@author: thygrrr
'''
from PyQt4 import QtCore, QtGui, QtNetwork, QtWebKit
from PyQt4.QtCore import QDataStream
from types import IntType, FloatType, ListType, DictType
from client import ClientState, GAME_PORT_DEFAULT, LOBBY_HOST, \
LOBBY_PORT, LOCAL_REPLAY_PORT
import logging
logger = logging.getLogger(__name__)
import util
import secondaryServer
import json
import sys
import replays
import time
import os
import random
import notificatation_system as ns
FormClass, BaseClass = util.loadUiType("client/client.ui")
class mousePosition(object):
def __init__(self, parent):
self.parent = parent
self.onLeftEdge = False
self.onRightEdge = False
self.onTopEdge = False
self.onBottomEdge = False
self.cursorShapeChange = False
self.warning_buttons = dict()
def computeMousePosition(self, pos):
self.onLeftEdge = pos.x() < 8
self.onRightEdge = pos.x() > self.parent.size().width() - 8
self.onTopEdge = pos.y() < 8
self.onBottomEdge = pos.y() > self.parent.size().height() - 8
self.onTopLeftEdge = self.onTopEdge and self.onLeftEdge
self.onBottomLeftEdge = self.onBottomEdge and self.onLeftEdge
self.onTopRightEdge = self.onTopEdge and self.onRightEdge
self.onBottomRightEdge = self.onBottomEdge and self.onRightEdge
self.onEdges = self.onLeftEdge or self.onRightEdge or self.onTopEdge or self.onBottomEdge
def resetToFalse(self):
self.onLeftEdge = False
self.onRightEdge = False
self.onTopEdge = False
self.onBottomEdge = False
self.cursorShapeChange = False
def isOnEdge(self):
return self.onEdges
class ClientWindow(FormClass, BaseClass):
'''
This is the main lobby client that manages the FAF-related connection and data,
in particular players, games, ranking, etc.
Its UI also houses all the other UIs for the sub-modules.
'''
topWidget = QtGui.QWidget()
#These signals are emitted when the client is connected or disconnected from FAF
connected = QtCore.pyqtSignal()
disconnected = QtCore.pyqtSignal()
#This signal is emitted when the client is done rezising
doneresize = QtCore.pyqtSignal()
#These signals notify connected modules of game state changes (i.e. reasons why FA is launched)
viewingReplay = QtCore.pyqtSignal(QtCore.QUrl)
#Game state controls
gameEnter = QtCore.pyqtSignal()
gameExit = QtCore.pyqtSignal()
#These signals propagate important client state changes to other modules
statsInfo = QtCore.pyqtSignal(dict)
tourneyTypesInfo = QtCore.pyqtSignal(dict)
tutorialsInfo = QtCore.pyqtSignal(dict)
tourneyInfo = QtCore.pyqtSignal(dict)
modInfo = QtCore.pyqtSignal(dict)
gameInfo = QtCore.pyqtSignal(dict)
modVaultInfo = QtCore.pyqtSignal(dict)
coopInfo = QtCore.pyqtSignal(dict)
avatarList = QtCore.pyqtSignal(list)
playerAvatarList = QtCore.pyqtSignal(dict)
usersUpdated = QtCore.pyqtSignal(list)
localBroadcast = QtCore.pyqtSignal(str, str)
autoJoin = QtCore.pyqtSignal(list)
channelsUpdated = QtCore.pyqtSignal(list)
replayVault = QtCore.pyqtSignal(dict)
coopLeaderBoard = QtCore.pyqtSignal(dict)
#These signals are emitted whenever a certain tab is activated
showReplays = QtCore.pyqtSignal()
showMaps = QtCore.pyqtSignal()
showGames = QtCore.pyqtSignal()
showTourneys = QtCore.pyqtSignal()
showLadder = QtCore.pyqtSignal()
showChat = QtCore.pyqtSignal()
showMods = QtCore.pyqtSignal()
showCoop = QtCore.pyqtSignal()
joinGameFromURL = QtCore.pyqtSignal(str)
matchmakerInfo = QtCore.pyqtSignal(dict)
def __init__(self, *args, **kwargs):
BaseClass.__init__(self, *args, **kwargs)
logger.debug("Client instantiating")
# Hook to Qt's application management system
QtGui.QApplication.instance().aboutToQuit.connect(self.cleanup)
#Init and wire the TCP Network socket to communicate with faforever.com
# This is the evil stream API.
self.socket = QtNetwork.QTcpSocket()
self.socket.readyRead.connect(self.readFromServer)
self.socket.disconnected.connect(self.disconnectedFromServer)
self.socket.error.connect(self.socketError)
self.blockSize = 0
self.useUPnP = False
self.uniqueId = None
self.sendFile = False
self.progress = QtGui.QProgressDialog()
self.progress.setMinimum(0)
self.progress.setMaximum(0)
#Tray icon
self.tray = QtGui.QSystemTrayIcon()
self.tray.setIcon(util.icon("client/tray_icon.png"))
self.tray.show()
self.state = ClientState.NONE
self.session = None
#Timer for resize events
self.resizeTimer = QtCore.QTimer(self)
self.resizeTimer.timeout.connect(self.resized)
self.preferedSize = 0
#Process used to run Forged Alliance (managed in module fa)
fa.instance.started.connect(self.startedFA)
fa.instance.finished.connect(self.finishedFA)
fa.instance.error.connect(self.errorFA)
self.gameInfo.connect(fa.instance.processGameInfo)
#Local Replay Server (and relay)
self.replayServer = fa.replayserver.ReplayServer(self)
#Local Relay Server
self.relayServer = fa.relayserver.RelayServer(self)
#Local proxy servers
self.proxyServer = fa.proxies.proxies(self)
#stat server
self.statsServer = secondaryServer.SecondaryServer("Statistic", 11002, self)
#create user interface (main window) and load theme
self.setupUi(self)
self.setStyleSheet(util.readstylesheet("client/client.css"))
self.windowsTitleLabel = QtGui.QLabel(self)
self.windowsTitleLabel.setText("FA Forever " + util.VERSION_STRING)
self.windowsTitleLabel.setProperty("titleLabel", True)
self.setWindowTitle("FA Forever " + util.VERSION_STRING)
# Frameless
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMinimizeButtonHint)
self.rubberBand = QtGui.QRubberBand(QtGui.QRubberBand.Rectangle)
self.mousePosition = mousePosition(self)
self.installEventFilter(self)
self.minimize = QtGui.QToolButton(self)
self.minimize.setIcon(util.icon("client/minimize-button.png"))
self.maximize = QtGui.QToolButton(self)
self.maximize.setIcon(util.icon("client/maximize-button.png"))
close = QtGui.QToolButton(self)
close.setIcon(util.icon("client/close-button.png"))
self.minimize.setMinimumHeight(10)
close.setMinimumHeight(10)
self.maximize.setMinimumHeight(10)
close.setIconSize(QtCore.QSize(22, 22))
self.minimize.setIconSize(QtCore.QSize(22, 22))
self.maximize.setIconSize(QtCore.QSize(22, 22))
close.setProperty("windowControlBtn", True)
self.maximize.setProperty("windowControlBtn", True)
self.minimize.setProperty("windowControlBtn", True)
self.menu = self.menuBar()
self.topLayout.addWidget(self.menu)
self.topLayout.addWidget(self.windowsTitleLabel)
self.topLayout.addWidget(self.minimize)
self.topLayout.addWidget(self.maximize)
self.topLayout.addWidget(close)
self.topLayout.insertStretch(1, 500)
self.topLayout.setSpacing(0)
self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.maxNormal = False
close.clicked.connect(self.close)
self.minimize.clicked.connect(self.showSmall)
self.maximize.clicked.connect(self.showMaxRestore)
self.moving = False
self.dragging = False
self.draggingHover = False
self.offset = None
self.curSize = None
sizeGrip = QtGui.QSizeGrip(self)
self.mainGridLayout.addWidget(sizeGrip, 2, 2)
#Wire all important signals
self.mainTabs.currentChanged.connect(self.mainTabChanged)
self.topTabs.currentChanged.connect(self.vaultTabChanged)
#Verrry important step!
self.loadSettingsPrelogin()
self.players = {} # Players known to the client, contains the player_info messages sent by the server
self.urls = {}
# Handy reference to the Player object representing the logged-in user.
self.me = None
# names of the client's friends
self.friends = set()
# names of the client's foes
self.foes = set()
self.clanlist = set() # members of clients clan
self.power = 0 # current user power
self.id = 0
self.coloredNicknames = False
#Initialize the Menu Bar according to settings etc.
self.initMenus()
#Load the icons for the tabs
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.whatNewTab), util.icon("client/feed.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.chatTab), util.icon("client/chat.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.gamesTab), util.icon("client/games.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.coopTab), util.icon("client/coop.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.vaultsTab), util.icon("client/mods.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.ladderTab), util.icon("client/ladder.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tourneyTab), util.icon("client/tourney.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.livestreamTab), util.icon("client/twitch.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.replaysTab), util.icon("client/replays.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tutorialsTab), util.icon("client/tutorials.png"))
QtWebKit.QWebSettings.globalSettings().setAttribute(QtWebKit.QWebSettings.PluginsEnabled, True)
#for moderator
self.modMenu = None
def eventFilter(self, obj, event):
if (event.type() == QtCore.QEvent.HoverMove):
self.draggingHover = self.dragging
if self.dragging:
self.resizeWidget(self.mapToGlobal(event.pos()))
else:
if self.maxNormal == False:
self.mousePosition.computeMousePosition(event.pos())
else:
self.mousePosition.resetToFalse()
self.updateCursorShape(event.pos())
return False
def updateCursorShape(self, pos):
if self.mousePosition.onTopLeftEdge or self.mousePosition.onBottomRightEdge:
self.mousePosition.cursorShapeChange = True
self.setCursor(QtCore.Qt.SizeFDiagCursor)
elif self.mousePosition.onTopRightEdge or self.mousePosition.onBottomLeftEdge:
self.setCursor(QtCore.Qt.SizeBDiagCursor)
self.mousePosition.cursorShapeChange = True
elif self.mousePosition.onLeftEdge or self.mousePosition.onRightEdge:
self.setCursor(QtCore.Qt.SizeHorCursor)
self.mousePosition.cursorShapeChange = True
elif self.mousePosition.onTopEdge or self.mousePosition.onBottomEdge:
self.setCursor(QtCore.Qt.SizeVerCursor)
self.mousePosition.cursorShapeChange = True
else:
if self.mousePosition.cursorShapeChange == True:
self.unsetCursor()
self.mousePosition.cursorShapeChange = False
def showSmall(self):
self.showMinimized()
def showMaxRestore(self):
if(self.maxNormal):
self.maxNormal = False
if self.curSize:
self.setGeometry(self.curSize)
else:
self.maxNormal = True
self.curSize = self.geometry()
self.setGeometry(QtGui.QDesktopWidget().availableGeometry(self))
def mouseDoubleClickEvent(self, event):
self.showMaxRestore()
def mouseReleaseEvent(self, event):
self.dragging = False
self.moving = False
if self.rubberBand.isVisible():
self.maxNormal = True
self.curSize = self.geometry()
self.setGeometry(self.rubberBand.geometry())
self.rubberBand.hide()
#self.showMaxRestore()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
if self.mousePosition.isOnEdge() and self.maxNormal == False:
self.dragging = True
return
else :
self.dragging = False
self.moving = True
self.offset = event.pos()
def mouseMoveEvent(self, event):
if self.dragging and self.draggingHover == False:
self.resizeWidget(event.globalPos())
elif self.moving and self.offset != None:
desktop = QtGui.QDesktopWidget().availableGeometry(self)
if event.globalPos().y() == 0:
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
elif event.globalPos().x() == 0:
desktop.setRight(desktop.right() / 2.0)
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
elif event.globalPos().x() == desktop.right():
desktop.setRight(desktop.right() / 2.0)
desktop.moveLeft(desktop.right())
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
else:
self.rubberBand.hide()
if self.maxNormal == True:
self.showMaxRestore()
self.move(event.globalPos() - self.offset)
def resizeWidget(self, globalMousePos):
if globalMousePos.y() == 0:
self.rubberBand.setGeometry(QtGui.QDesktopWidget().availableGeometry(self))
self.rubberBand.show()
else:
self.rubberBand.hide()
origRect = self.frameGeometry()
left, top, right, bottom = origRect.getCoords()
minWidth = self.minimumWidth()
minHeight = self.minimumHeight()
if self.mousePosition.onTopLeftEdge:
left = globalMousePos.x()
top = globalMousePos.y()
elif self.mousePosition.onBottomLeftEdge:
left = globalMousePos.x();
bottom = globalMousePos.y();
elif self.mousePosition.onTopRightEdge:
right = globalMousePos.x()
top = globalMousePos.y()
elif self.mousePosition.onBottomRightEdge:
right = globalMousePos.x()
bottom = globalMousePos.y()
elif self.mousePosition.onLeftEdge:
left = globalMousePos.x()
elif self.mousePosition.onRightEdge:
right = globalMousePos.x()
elif self.mousePosition.onTopEdge:
top = globalMousePos.y()
elif self.mousePosition.onBottomEdge:
bottom = globalMousePos.y()
newRect = QtCore.QRect(QtCore.QPoint(left, top), QtCore.QPoint(right, bottom))
if newRect.isValid():
if minWidth > newRect.width():
if left != origRect.left() :
newRect.setLeft(origRect.left())
else:
newRect.setRight(origRect.right())
if minHeight > newRect.height() :
if top != origRect.top():
newRect.setTop(origRect.top())
else:
newRect.setBottom(origRect.bottom())
self.setGeometry(newRect)
def setup(self):
import chat
import tourneys
import stats
import vault
import games
import tutorials
import modvault
import coop
from chat._avatarWidget import avatarWidget
# Initialize chat
self.chat = chat.Lobby(self)
#build main window with the now active client
self.ladder = stats.Stats(self)
self.games = games.Games(self)
self.tourneys = tourneys.Tourneys(self)
self.vault = vault.MapVault(self)
self.modvault = modvault.ModVault(self)
self.replays = replays.Replays(self)
self.tutorials = tutorials.Tutorials(self)
self.Coop = coop.Coop(self)
self.notificationSystem = ns.NotificationSystem(self)
# set menu states
self.actionNsEnabled.setChecked(self.notificationSystem.settings.enabled)
# Other windows
self.avatarAdmin = self.avatarSelection = avatarWidget(self, None)
# warning setup
self.warning = QtGui.QHBoxLayout()
self.warnPlayer = QtGui.QLabel(self)
self.warnPlayer.setText("A player of your skill level is currently searching for a 1v1 game. Click a faction to join them! ")
self.warnPlayer.setAlignment(QtCore.Qt.AlignHCenter)
self.warnPlayer.setAlignment(QtCore.Qt.AlignVCenter)
self.warnPlayer.setProperty("warning", True)
self.warning.addStretch()
def add_warning_button(faction):
button = QtGui.QToolButton(self)
button.setMaximumSize(25, 25)
button.setIcon(util.icon("games/automatch/%s.png" % faction.to_name()))
button.clicked.connect(self.games.join_ladder_listeners[faction])
self.warning.addWidget(button)
return button
self.warning_buttons = {faction: add_warning_button(faction) for faction in Factions}
self.warning.addStretch()
self.mainGridLayout.addLayout(self.warning, 2, 0)
self.warningHide()
def warningHide(self):
'''
hide the warning bar for matchmaker
'''
self.warnPlayer.hide()
for i in self.warning_buttons.values():
i.hide()
def warningShow(self):
'''
show the warning bar for matchmaker
'''
self.warnPlayer.show()
for i in self.warning_buttons.values():
i.show()
@QtCore.pyqtSlot()
def cleanup(self):
'''
Perform cleanup before the UI closes
'''
self.state = ClientState.SHUTDOWN
self.progress.setWindowTitle("FAF is shutting down")
self.progress.setMinimum(0)
self.progress.setMaximum(0)
self.progress.setValue(0)
self.progress.setCancelButton(None)
self.progress.show()
#Important: If a game is running, offer to terminate it gently
self.progress.setLabelText("Closing ForgedAllianceForever.exe")
if fa.instance.running():
fa.instance.close()
#Terminate Lobby Server connection
if self.socket.state() == QtNetwork.QTcpSocket.ConnectedState:
self.progress.setLabelText("Closing main connection.")
self.socket.disconnectFromHost()
# Clear UPnP Mappings...
if self.useUPnP:
self.progress.setLabelText("Removing UPnP port mappings")
fa.upnp.removePortMappings()
#Terminate local ReplayServer
if self.replayServer:
self.progress.setLabelText("Terminating local replay server")
self.replayServer.close()
self.replayServer = None
#Terminate local ReplayServer
if self.relayServer:
self.progress.setLabelText("Terminating local relay server")
self.relayServer.close()
self.relayServer = None
#Clean up Chat
if self.chat:
self.progress.setLabelText("Disconnecting from IRC")
self.chat.disconnect()
self.chat = None
# Get rid of the Tray icon
if self.tray:
self.progress.setLabelText("Removing System Tray icon")
self.tray.deleteLater()
self.tray = None
#Terminate UI
if self.isVisible():
self.progress.setLabelText("Closing main window")
self.close()
self.progress.close()
def closeEvent(self, event):
logger.info("Close Event for Application Main Window")
self.saveWindow()
if fa.instance.running():
if QtGui.QMessageBox.question(self, "Are you sure?", "Seems like you still have Forged Alliance running!<br/><b>Close anyway?</b>", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.No:
event.ignore()
return
return QtGui.QMainWindow.closeEvent(self, event)
def resizeEvent(self, size):
self.resizeTimer.start(400)
def resized(self):
self.resizeTimer.stop()
self.doneresize.emit()
def initMenus(self):
self.actionLinkMumble.triggered.connect(partial(self.open_url, Settings.get("MUMBLE_URL").format(login=self.login)))
self.actionLink_account_to_Steam.triggered.connect(partial(self.open_url, Settings.get("STEAMLINK_URL")))
self.actionLinkWebsite.triggered.connect(partial(self.open_url, Settings.get("WEBSITE_URL")))
self.actionLinkWiki.triggered.connect(partial(self.open_url, Settings.get("WIKI_URL")))
self.actionLinkForums.triggered.connect(partial(self.open_url, Settings.get("FORUMS_URL")))
self.actionLinkUnitDB.triggered.connect(partial(self.open_url, Settings.get("UNITDB_URL")))
self.actionNsSettings.triggered.connect(lambda : self.notificationSystem.on_showSettings())
self.actionNsEnabled.triggered.connect(lambda enabled : self.notificationSystem.setNotificationEnabled(enabled))
self.actionWiki.triggered.connect(partial(self.open_url, Settings.get("WIKI_URL")))
self.actionReportBug.triggered.connect(partial(self.open_url, Settings.get("TICKET_URL")))
self.actionShowLogs.triggered.connect(self.linkShowLogs)
self.actionTechSupport.triggered.connect(partial(self.open_url, Settings.get("SUPPORT_URL")))
self.actionAbout.triggered.connect(self.linkAbout)
self.actionClearCache.triggered.connect(self.clearCache)
self.actionClearSettings.triggered.connect(self.clearSettings)
self.actionClearGameFiles.triggered.connect(self.clearGameFiles)
self.actionSetGamePath.triggered.connect(self.switchPath)
self.actionSetGamePort.triggered.connect(self.switchPort)
self.actionSetMumbleOptions.triggered.connect(self.setMumbleOptions)
#Toggle-Options
self.actionSetAutoLogin.triggered.connect(self.updateOptions)
self.actionSetSoundEffects.triggered.connect(self.updateOptions)
self.actionSetOpenGames.triggered.connect(self.updateOptions)
self.actionSetJoinsParts.triggered.connect(self.updateOptions)
self.actionSetLiveReplays.triggered.connect(self.updateOptions)
self.actionSaveGamelogs.triggered.connect(self.updateOptions)
self.actionColoredNicknames.triggered.connect(self.updateOptions)
self.actionActivateMumbleSwitching.triggered.connect(self.saveMumbleSwitching)
#Init themes as actions.
themes = util.listThemes()
for theme in themes:
action = self.menuTheme.addAction(str(theme))
action.triggered.connect(self.switchTheme)
action.theme = theme
action.setCheckable(True)
if util.getTheme() == theme:
action.setChecked(True)
# Nice helper for the developers
self.menuTheme.addSeparator()
self.menuTheme.addAction("Reload Stylesheet", lambda: self.setStyleSheet(util.readstylesheet("client/client.css")))
@QtCore.pyqtSlot()
def updateOptions(self):
self.autologin = self.actionSetAutoLogin.isChecked()
self.soundeffects = self.actionSetSoundEffects.isChecked()
self.opengames = self.actionSetOpenGames.isChecked()
self.joinsparts = self.actionSetJoinsParts.isChecked()
self.livereplays = self.actionSetLiveReplays.isChecked()
self.gamelogs = self.actionSaveGamelogs.isChecked()
self.coloredNicknames = self.actionColoredNicknames.isChecked()
self.saveChat()
self.saveCredentials()
@QtCore.pyqtSlot()
def switchTheme(self):
util.setTheme(self.sender().theme, True)
@QtCore.pyqtSlot()
def switchPath(self):
fa.wizards.Wizard(self).exec_()
@QtCore.pyqtSlot()
def switchPort(self):
import loginwizards
loginwizards.gameSettingsWizard(self).exec_()
@QtCore.pyqtSlot()
def setMumbleOptions(self):
import loginwizards
loginwizards.mumbleOptionsWizard(self).exec_()
@QtCore.pyqtSlot()
def clearSettings(self):
result = QtGui.QMessageBox.question(None, "Clear Settings", "Are you sure you wish to clear all settings, login info, etc. used by this program?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if (result == QtGui.QMessageBox.Yes):
util.settings.clear()
util.settings.sync()
QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.")
QtGui.QApplication.quit()
@QtCore.pyqtSlot()
def clearGameFiles(self):
util.clearDirectory(util.BIN_DIR)
util.clearDirectory(util.GAMEDATA_DIR)
@QtCore.pyqtSlot()
def clearCache(self):
changed = util.clearDirectory(util.CACHE_DIR)
if changed:
QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.")
QtGui.QApplication.quit()
@QtCore.pyqtSlot()
def open_url(self, url):
QtGui.QDesktopServices.openUrl(url)
@QtCore.pyqtSlot()
def linkShowLogs(self):
util.showInExplorer(util.LOG_DIR)
@QtCore.pyqtSlot()
def linkAbout(self):
dialog = util.loadUi("client/about.ui")
dialog.exec_()
def saveCredentials(self):
util.settings.beginGroup("user")
util.settings.setValue("user/remember", self.remember) #always remember to remember
if self.remember:
util.settings.setValue("user/login", self.login)
util.settings.setValue("user/password", self.password)
util.settings.setValue("user/autologin", self.autologin) #only autologin if remembering
else:
util.settings.setValue("user/login", None)
util.settings.setValue("user/password", None)
util.settings.setValue("user/autologin", False)
util.settings.endGroup()
util.settings.sync()
def clearAutologin(self):
self.autologin = False
self.actionSetAutoLogin.setChecked(False)
util.settings.beginGroup("user")
util.settings.setValue("user/autologin", False)
util.settings.endGroup()
util.settings.sync()
def saveWindow(self):
util.settings.beginGroup("window")
util.settings.setValue("geometry", self.saveGeometry())
util.settings.endGroup()
util.settings.beginGroup("ForgedAlliance")
util.settings.setValue("app/falogs", self.gamelogs)
util.settings.endGroup()
def savePort(self):
util.settings.beginGroup("ForgedAlliance")
util.settings.setValue("app/gameport", self.gamePort)
util.settings.setValue("app/upnp", self.useUPnP)
util.settings.endGroup()
util.settings.sync()
def saveMumble(self):
util.settings.beginGroup("Mumble")
util.settings.setValue("app/mumble", self.enableMumble)
util.settings.endGroup()
util.settings.sync()
@QtCore.pyqtSlot()
def saveMumbleSwitching(self):
self.activateMumbleSwitching = self.actionActivateMumbleSwitching.isChecked()
util.settings.beginGroup("Mumble")
util.settings.setValue("app/activateMumbleSwitching", self.activateMumbleSwitching)
util.settings.endGroup()
util.settings.sync()
def saveChat(self):
util.settings.beginGroup("chat")
util.settings.setValue("soundeffects", self.soundeffects)
util.settings.setValue("livereplays", self.livereplays)
util.settings.setValue("opengames", self.opengames)
util.settings.setValue("joinsparts", self.joinsparts)
util.settings.setValue("coloredNicknames", self.coloredNicknames)
util.settings.endGroup()
def loadSettingsPrelogin(self):
util.settings.beginGroup("user")
self.login = util.settings.value("user/login")
self.password = util.settings.value("user/password")
self.remember = (util.settings.value("user/remember") == "true")
# This is the new way we do things.
self.autologin = (util.settings.value("user/autologin") == "true")
self.actionSetAutoLogin.setChecked(self.autologin)
util.settings.endGroup()
def loadSettings(self):
#Load settings
util.settings.beginGroup("window")
geometry = util.settings.value("geometry", None)
if geometry:
self.restoreGeometry(geometry)
util.settings.endGroup()
util.settings.beginGroup("ForgedAlliance")
self.gamePort = int(util.settings.value("app/gameport", GAME_PORT_DEFAULT))
self.useUPnP = (util.settings.value("app/upnp", "true") == "true")
self.gamelogs = (util.settings.value("app/falogs", "false") == "true")
self.actionSaveGamelogs.setChecked(self.gamelogs)
util.settings.endGroup()
util.settings.beginGroup("Mumble")
if util.settings.value("app/mumble", "firsttime") == "firsttime":
# The user has never configured mumble before. Be a little intrusive and ask him if he wants to use it.
if QtGui.QMessageBox.question(self, "Enable Voice Connector?", "FA Forever can connect with <a href=\"http://mumble.sourceforge.net/\">Mumble</a> to support the automatic setup of voice connections between you and your team mates. Would you like to enable this feature? You can change the setting at any time by going to options -> settings -> Voice", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:
util.settings.setValue("app/mumble", "true")
else:
util.settings.setValue("app/mumble", "false")
if util.settings.value("app/activateMumbleSwitching", "firsttime") == "firsttime":
util.settings.setValue("app/activateMumbleSwitching", "true")
self.enableMumble = (util.settings.value("app/mumble", "false") == "true")
self.activateMumbleSwitching = (util.settings.value("app/activateMumbleSwitching", "false") == "true")
util.settings.endGroup()
self.actionActivateMumbleSwitching.setChecked(self.activateMumbleSwitching)
self.loadChat()
def loadChat(self):
try:
util.settings.beginGroup("chat")
self.soundeffects = (util.settings.value("soundeffects", "true") == "true")
self.opengames = (util.settings.value("opengames", "true") == "true")
self.joinsparts = (util.settings.value("joinsparts", "false") == "true")
self.livereplays = (util.settings.value("livereplays", "true") == "true")
self.coloredNicknames = (util.settings.value("coloredNicknames", "false") == "true")
util.settings.endGroup()
self.actionColoredNicknames.setChecked(self.coloredNicknames)
self.actionSetSoundEffects.setChecked(self.soundeffects)
self.actionSetLiveReplays.setChecked(self.livereplays)
self.actionSetOpenGames.setChecked(self.opengames)
self.actionSetJoinsParts.setChecked(self.joinsparts)
except:
pass
def doConnect(self):
if not self.replayServer.doListen(LOCAL_REPLAY_PORT):
return False
if not self.relayServer.doListen():
return False
self.progress.setCancelButtonText("Cancel")
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(False)
self.progress.setAutoReset(False)
self.progress.setModal(1)
self.progress.setWindowTitle("Connecting...")
self.progress.setLabelText("Establishing connection to {}:{}".format(LOBBY_HOST, LOBBY_PORT))
self.progress.show()
# Begin connecting.
self.socket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1)
self.socket.connectToHost(LOBBY_HOST, LOBBY_PORT)
while (self.socket.state() != QtNetwork.QAbstractSocket.ConnectedState) and self.progress.isVisible():
QtGui.QApplication.processEvents()
self.state = ClientState.NONE
self.localIP = str(self.socket.localAddress().toString())
# #Perform Version Check first
if not self.socket.state() == QtNetwork.QAbstractSocket.ConnectedState:
self.progress.close() # in case it was still showing...
# We either cancelled or had a TCP error, meaning the connection failed..
if self.progress.wasCanceled():
logger.warn("doConnect() aborted by user.")
else:
logger.error("doConnect() failed with clientstate " + str(self.state) + ", socket errorstring: " + self.socket.errorString())
return False
else:
return True
def reconnect(self):
''' try to reconnect to the server'''
self.socket.disconnected.disconnect(self.disconnectedFromServer)
self.socket.disconnectFromHost()
self.socket.disconnected.connect(self.disconnectedFromServer)
self.progress.setCancelButtonText("Cancel")
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(False)
self.progress.setAutoReset(False)
self.progress.setModal(1)
self.progress.setWindowTitle("Re-connecting...")
self.progress.setLabelText("Re-establishing connection ...")
self.progress.show()
# Begin connecting.
self.socket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1)
self.socket.connectToHost(LOBBY_HOST, LOBBY_PORT)
while (self.socket.state() != QtNetwork.QAbstractSocket.ConnectedState) and self.progress.isVisible():
QtGui.QApplication.processEvents()
self.state = ClientState.NONE
self.localIP = str(self.socket.localAddress().toString())
# #Perform Version Check first
if not self.socket.state() == QtNetwork.QAbstractSocket.ConnectedState:
self.progress.close() # in case it was still showing...
# We either cancelled or had a TCP error, meaning the connection failed..
if self.progress.wasCanceled():
logger.warn("doConnect() aborted by user.")
else:
logger.error("doConnect() failed with clientstate " + str(self.state) + ", socket errorstring: " + self.socket.errorString())
return False
else:
self.send(dict(command="hello", version=0, login=self.login, password=self.password, unique_id=self.uniqueId, local_ip=self.localIP, session=self.session))
return True
def waitSession(self):
self.progress.setLabelText("Setting up Session...")
self.send(dict(command="ask_session"))
start = time.time()
while self.session == None and self.progress.isVisible() :
QtGui.QApplication.processEvents()
if time.time() - start > 15 :
break
if not self.session :
if self.progress.wasCanceled():
logger.warn("waitSession() aborted by user.")
else :
logger.error("waitSession() failed with clientstate " + str(self.state) + ", socket errorstring: " + self.socket.errorString())
QtGui.QMessageBox.critical(self, "Notice from Server", "Unable to get a session : <br> Server under maintenance.<br><br>Please retry in some minutes.")
return False
self.uniqueId = util.uniqueID(self.login, self.session)
self.loadSettings()
#
# Voice connector (This isn't supposed to be here, but I need the settings to be loaded before I can determine if we can hook in the mumbleConnector
#
if self.enableMumble:
self.progress.setLabelText("Setting up Mumble...")
import mumbleconnector
self.mumbleConnector = mumbleconnector.MumbleConnector(self)
return True
def doLogin(self):
#Determine if a login wizard needs to be displayed and do so
if not self.autologin or not self.password or not self.login:
import loginwizards
if not loginwizards.LoginWizard(self).exec_():
return False;
self.progress.setLabelText("Logging in...")
self.progress.reset()
self.progress.show()
self.login = self.login.strip()
logger.info("Attempting to login as: " + str(self.login))
self.state = ClientState.NONE
if not self.uniqueId :
QtGui.QMessageBox.warning(QtGui.QApplication.activeWindow(), "Unable to login", "It seems that you miss some important DLL.<br>Please install :<br><a href =\"http://www.microsoft.com/download/en/confirmation.aspx?id=8328\">http://www.microsoft.com/download/en/confirmation.aspx?id=8328</a> and <a href = \"http://www.microsoft.com/en-us/download/details.aspx?id=17851\">http://www.microsoft.com/en-us/download/details.aspx?id=17851</a><br><br>You probably have to restart your computer after installing them.<br><br>Please visit this link in case of problems : <a href=\"http://forums.faforever.com/forums/viewforum.php?f=3\">http://forums.faforever.com/forums/viewforum.php?f=3</a>", QtGui.QMessageBox.Close)
return False
else:
self.send(dict(command="hello", version=0, login=self.login, password=self.password, unique_id=self.uniqueId, local_ip=self.localIP))
while (not self.state) and self.progress.isVisible():
QtGui.QApplication.processEvents()
if self.progress.wasCanceled():
logger.warn("Login aborted by user.")
return False
self.progress.close()
if self.state == ClientState.OUTDATED :
logger.warn("Client is OUTDATED.")
elif self.state == ClientState.ACCEPTED:
logger.info("Login accepted.")
# update what's new page
self.whatNewsView.setUrl(QtCore.QUrl("http://www.faforever.com/?page_id=114&username={user}&pwdhash={pwdhash}".format(user=self.login, pwdhash=self.password)))
# live streams
self.LivestreamWebView.setUrl(QtCore.QUrl("http://www.faforever.com/?page_id=974"))
util.crash.CRASH_REPORT_USER = self.login
if self.useUPnP:
fa.upnp.createPortMapping(self.localIP, self.gamePort, "UDP")
#success: save login data (if requested) and carry on
self.actionSetAutoLogin.setChecked(self.autologin)
self.updateOptions()
self.progress.close()
self.connected.emit()
return True
elif self.state == ClientState.REJECTED:
logger.warning("Login rejected.")
#seems that there isa bug in a key ..
util.settings.beginGroup("window")
util.settings.remove("geometry")
util.settings.endGroup()
self.clearAutologin()
return self.doLogin() #Just try to login again, slightly hackish but I can get away with it here, I guess.
else:
# A more profound error has occurred (cancellation or disconnection)
return False
def isFriend(self, name):
'''
Convenience function for other modules to inquire about a user's friendliness.
'''
return name in self.friends
def isFoe(self, name):
'''
Convenience function for other modules to inquire about a user's foeliness.
'''
return name in self.foes
def isPlayer(self, name):
'''
Convenience function for other modules to inquire about a user's civilian status.
'''
return name in self.players or name == self.login
#Color table used by the following method
# CAVEAT: This will break if the theme is loaded after the client package is imported
colors = json.loads(util.readfile("client/colors.json"))
randomcolors = json.loads(util.readfile("client/randomcolors.json"))
def getUserColor(self, name):
'''
Returns a user's color depending on their status with relation to the FAF client
'''
if name == self.login:
return self.getColor("self")
elif name in self.friends:
return self.getColor("friend")
elif name in self.foes:
return self.getColor("foe")
elif name in self.clanlist:
return self.getColor("clan")
else:
if self.coloredNicknames:
return self.getRandomColor(name)
if name in self.players:
return self.getColor("player")
return self.getColor("default")
def getRandomColor(self, name):
'''Generate a random color from a name'''
random.seed(name)
return random.choice(self.randomcolors)
def getColor(self, name):
if name in self.colors:
return self.colors[name]
else:
return self.colors["default"]
@QtCore.pyqtSlot()
def startedFA(self):
'''
Slot hooked up to fa.instance when the process has launched.
It will notify other modules through the signal gameEnter().
'''
logger.info("FA has launched in an attached process.")
self.gameEnter.emit()
@QtCore.pyqtSlot(int)
def finishedFA(self, exit_code):
'''
Slot hooked up to fa.instance when the process has ended.
It will notify other modules through the signal gameExit().
'''
if not exit_code:
logger.info("FA has finished with exit code: " + str(exit_code))
else:
logger.warn("FA has finished with exit code: " + str(exit_code))
self.gameExit.emit()
@QtCore.pyqtSlot(int)
def errorFA(self, error_code):
'''
Slot hooked up to fa.instance when the process has failed to start.
'''
if error_code == 0:
logger.error("FA has failed to start")
QtGui.QMessageBox.critical(self, "Error from FA", "FA has failed to start.")
elif error_code == 1:
logger.error("FA has crashed or killed after starting")
else:
text = "FA has failed to start with error code: " + str(error_code)
logger.error(text)
QtGui.QMessageBox.critical(self, "Error from FA", text)
self.gameExit.emit()
@QtCore.pyqtSlot(int)
def mainTabChanged(self, index):
'''
The main visible tab (module) of the client's UI has changed.
In this case, other modules may want to load some data or cease
particularly CPU-intensive interactive functionality.
LATER: This can be rewritten as a simple Signal that each module can then individually connect to.
'''
new_tab = self.mainTabs.widget(index)
if new_tab is self.gamesTab:
self.showGames.emit()
if new_tab is self.chatTab:
self.showChat.emit()
if new_tab is self.replaysTab:
self.showReplays.emit()
if new_tab is self.ladderTab:
self.showLadder.emit()
if new_tab is self.tourneyTab:
self.showTourneys.emit()
if new_tab is self.coopTab:
self.showCoop.emit()
@QtCore.pyqtSlot(int)
def vaultTabChanged(self, index):
new_tab = self.topTabs.widget(index)
if new_tab is self.mapsTab:
self.showMaps.emit()
if new_tab is self.modsTab:
self.showMods.emit()
def joinGameFromURL(self, url):
'''
Tries to join the game at the given URL
'''
logger.debug("joinGameFromURL: " + url.toString())
if fa.instance.available():
add_mods = []
try:
modstr = url.queryItemValue("mods")
add_mods = json.loads(modstr) # should be a list
except:
logger.info("Couldn't load urlquery value 'mods'")
if fa.check.game(self):
if fa.check.check(url.queryItemValue("mod"), url.queryItemValue("map"), sim_mods=add_mods):
self.send(dict(command="game_join", uid=int(url.queryItemValue("uid")), gameport=self.gamePort))
def writeToServer(self, action, *args, **kw):
'''
Writes data to the deprecated stream API. Do not use.
'''
logger.debug("Client: " + action)
block = QtCore.QByteArray()
out = QtCore.QDataStream(block, QtCore.QIODevice.ReadWrite)
out.setVersion(QtCore.QDataStream.Qt_4_2)
out.writeUInt32(0)
out.writeQString(action)
out.writeQString(self.login or "")
out.writeQString(self.session or "")
for arg in args :
if type(arg) is IntType:
out.writeInt(arg)
elif isinstance(arg, basestring):
out.writeQString(arg)
elif type(arg) is FloatType:
out.writeFloat(arg)
elif type(arg) is ListType:
out.writeQVariantList(arg)
elif type(arg) is DictType:
out.writeQString(json.dumps(arg))
elif type(arg) is QtCore.QFile :
arg.open(QtCore.QIODevice.ReadOnly)
fileDatas = QtCore.QByteArray(arg.readAll())
out.writeInt(fileDatas.size())
out.writeRawData(fileDatas)
# This may take a while. We display the progress bar so the user get a feedback
self.sendFile = True
self.progress.setLabelText("Sending file to server")
self.progress.setCancelButton(None)
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(True)
self.progress.setMinimum(0)
self.progress.setMaximum(100)
self.progress.setModal(1)
self.progress.setWindowTitle("Uploading in progress")
self.progress.show()
arg.close()
else:
logger.warn("Uninterpreted Data Type: " + str(type(arg)) + " sent as str: " + str(arg))
out.writeQString(str(arg))
out.device().seek(0)
out.writeUInt32(block.size() - 4)
self.bytesToSend = block.size() - 4
self.socket.write(block)
def serverTimeout(self):
if self.timeout == 0:
logger.info("Connection timeout - Checking if server is alive.")
self.writeToServer("PING")
self.timeout = self.timeout + 1
else:
self.socket.abort()
@QtCore.pyqtSlot()
def readFromServer(self):
ins = QtCore.QDataStream(self.socket)
ins.setVersion(QtCore.QDataStream.Qt_4_2)
while ins.atEnd() == False :
if self.blockSize == 0:
if self.socket.bytesAvailable() < 4:
return
self.blockSize = ins.readUInt32()
if self.socket.bytesAvailable() < self.blockSize:
return
action = ins.readQString()
logger.info("Server: '%s'" % action)
if action == "PING":
self.writeToServer("PONG")
self.blockSize = 0
return
try:
self.dispatch(json.loads(action))
except:
logger.error("Error dispatching JSON: " + action, exc_info=sys.exc_info())
self.blockSize = 0
@QtCore.pyqtSlot()
def disconnectedFromServer(self):
logger.warn("Disconnected from lobby server.")
if self.state == ClientState.ACCEPTED:
QtGui.QMessageBox.warning(QtGui.QApplication.activeWindow(), "Disconnected from FAF", "The lobby lost the connection to the FAF server.<br/><b>You might still be able to chat.<br/>To play, try reconnecting a little later!</b>", QtGui.QMessageBox.Close)
#Clear the online users lists
oldplayers = self.players.keys()
self.players = {}
self.urls = {}
self.usersUpdated.emit(oldplayers)
self.disconnected.emit()
self.mainTabs.setCurrentIndex(0)
for i in range(2, self.mainTabs.count()):
self.mainTabs.setTabEnabled(i, False)
self.mainTabs.setTabText(i, "offline")
self.state = ClientState.DROPPED
@QtCore.pyqtSlot(QtNetwork.QAbstractSocket.SocketError)
def socketError(self, error):
logger.error("TCP Socket Error: " + self.socket.errorString())
if self.state > ClientState.NONE: # Positive client states deserve user notification.
QtGui.QMessageBox.critical(None, "TCP Error", "A TCP Connection Error has occurred:<br/><br/><b>" + self.socket.errorString() + "</b>", QtGui.QMessageBox.Close)
self.progress.cancel()
@QtCore.pyqtSlot()
def forwardLocalBroadcast(self, source, message):
self.localBroadcast.emit(source, message)
def manage_power(self):
''' update the interface accordingly to the power of the user'''
if self.power >= 1 :
if self.modMenu == None :
self.modMenu = self.menu.addMenu("Administration")
actionAvatar = QtGui.QAction("Avatar manager", self.modMenu)
actionAvatar.triggered.connect(self.avatarManager)
self.modMenu.addAction(actionAvatar)
def requestAvatars(self, personal):
if personal :
self.send(dict(command="avatar", action="list_avatar"))
else :
self.send(dict(command="admin", action="requestavatars"))
def joinChannel(self, username, channel):
'''Join users to a channel'''
self.send(dict(command="admin", action="join_channel", user_ids=[self.players[username].id], channel=channel))
def closeFA(self, username):
'''Close FA remotly'''
self.send(dict(command="admin", action="closeFA", user_id=self.players[username].id))
def closeLobby(self, username):
'''Close lobby remotly'''
self.send(dict(command="admin", action="closelobby", user_id=self.players[username].id))
def addFriend(self, friend_name):
'''Adding a new friend by user'''
self.friends.add(friend_name)
self.send(dict(command="social_add", friend=self.players[friend_name].id))
self.usersUpdated.emit([friend_name])
def addFoe(self, foe_name):
'''Adding a new foe by user'''
self.foes.add(foe_name)
self.send(dict(command="social_add", foe=self.players[foe_name].id))
self.usersUpdated.emit([foe_name])
def remFriend(self, friend_name):
'''Removal of a friend by user'''
self.friends.remove(friend_name)
self.send(dict(command="social_remove", friend=self.players[friend_name].id))
self.usersUpdated.emit([friend_name])
def remFoe(self, foe_name):
'''Removal of a foe by user'''
self.foes.remove(foe_name)
self.send(dict(command="social_remove", foe=self.players[foe_name].id))
self.usersUpdated.emit([foe_name])
#
# JSON Protocol v2 Implementation below here
#
def send(self, message):
data = json.dumps(message)
logger.info("Outgoing JSON Message: " + data)
self.writeToServer(data)
def dispatch(self, message):
'''
A fairly pythonic way to process received strings as JSON messages.
'''
if "command" in message:
cmd = "handle_" + message['command']
if hasattr(self, cmd):
getattr(self, cmd)(message)
else:
logger.error("Unknown JSON command: %s" % message['command'])
raise ValueError
else:
logger.debug("No command in message.")
def handle_stats(self, message):
self.statsInfo.emit(message)
def handle_session(self, message):
self.session = str(message["session"])
def handle_update(self, message):
# Mystereous voodoo nonsense.
# fix a problem with Qt.
util.settings.beginGroup("window")
util.settings.remove("geometry")
util.settings.endGroup()
logger.warn("Server says that Updating is needed.")
self.progress.close()
self.state = ClientState.OUTDATED
fetchClientUpdate(message["update"])
def handle_welcome(self, message):
self.id = message["id"]
self.login = message["login"]
logger.debug("Login success")
self.state = ClientState.ACCEPTED
def handle_registration_response(self, message):
if message["result"] == "SUCCESS":
self.state = ClientState.CREATED
return
self.state = ClientState.REJECTED
self.handle_notice({"style": "notice", "text": message["error"]})
def handle_game_launch(self, message):
logger.info("Handling game_launch via JSON " + str(message))
silent = False
if 'args' in message:
arguments = message['args']
else:
arguments = []
# Do some special things depending of the reason of the game launch.
rank = False
# HACK: Ideally, this comes from the server, too. LATER: search_ranked message
if message["featured_mod"] == "ladder1v1":
arguments.append('/' + self.games.race)
#Player 1v1 rating
arguments.append('/mean')
arguments.append(str(self.players[self.login]["ladder_rating_mean"]))
arguments.append('/deviation')
arguments.append(str(self.players[self.login]["ladder_rating_deviation"]))
# Launch the auto lobby
self.relayServer.init_mode = 1
else :
#Player global rating
arguments.append('/mean')
arguments.append(str(self.players[self.login]["rating_mean"]))
arguments.append('/deviation')
arguments.append(str(self.players[self.login]["rating_deviation"]))
if self.me.country is not None:
arguments.append('/country ')
arguments.append(self.me.country)
# Launch the normal lobby
self.relayServer.init_mode = 0
if self.me.clan is not None:
arguments.append('/clan')
arguments.append(self.me.clan)
# Ensure we have the map
if "mapname" in message:
fa.check.map(message['mapname'], force=True, silent=silent)
if "sim_mods" in message:
fa.mods.checkMods(message['sim_mods'])
# Writing a file for options
if "options" in message:
filename = os.path.join(util.CACHE_DIR, "options.lua")
options = QtCore.QFile(filename)
options.open(QtCore.QIODevice.WriteOnly | QtCore.QIODevice.Text)
numOpt = 0
options.write("Options = { ")
lenopt = len(message['options'])
for option in message['options'] :
if option == True :
options.write("'1'")
else :
options.write("'0'")
numOpt = numOpt + 1
if lenopt != numOpt :
options.write(", ")
options.write(" }")
options.close()
#Experimental UPnP Mapper - mappings are removed on app exit
if self.useUPnP:
fa.upnp.createPortMapping(self.localIP, self.gamePort, "UDP")
info = dict(uid=message['uid'], recorder=self.login, featured_mod=message[modkey], game_time=time.time())
fa.run(game_info, self.relayServer.serverPort(), arguments)
def handle_coop_info(self, message):
self.coopInfo.emit(message)
def handle_tournament_types_info(self, message):
self.tourneyTypesInfo.emit(message)
def handle_tournament_info(self, message):
self.tourneyInfo.emit(message)
def handle_tutorials_info(self, message):
self.tutorialsInfo.emit(message)
def handle_mod_info(self, message):
self.modInfo.emit(message)
def handle_game_info(self, message):
self.gameInfo.emit(message)
def handle_modvault_list_info(self, message):
modList = message["modList"]
for mod in modList:
self.handle_modvault_info(mod)
def handle_modvault_info(self, message):
self.modVaultInfo.emit(message)
def handle_replay_vault(self, message):
self.replayVault.emit(message)
def handle_coop_leaderboard(self, message):
self.coopLeaderBoard.emit(message)
def handle_matchmaker_info(self, message):
if "action" in message:
self.matchmakerInfo.emit(message)
elif "potential" in message:
if message["potential"] :
self.warningShow()
else:
self.warningHide()
def handle_avatar(self, message):
if "avatarlist" in message :
self.avatarList.emit(message["avatarlist"])
def handle_admin(self, message):
if "avatarlist" in message :
self.avatarList.emit(message["avatarlist"])
elif "player_avatar_list" in message :
self.playerAvatarList.emit(message)
def handle_social(self, message):
if "friends" in message:
self.friends = set(message["friends"])
self.usersUpdated.emit(self.players.keys())
if "foes" in message:
self.foes = set(message["foes"])
self.usersUpdated.emit(self.players.keys())
if "channels" in message:
# Add a delay to the notification system (insane cargo cult)
self.notificationSystem.disabledStartup = False
self.channelsUpdated.emit(message["channels"])
if "autojoin" in message:
self.autoJoin.emit(message["autojoin"])
if "power" in message:
self.power = message["power"]
self.manage_power()
def handle_player_info(self, message):
players = message["players"]
# Firstly, find yourself. Things get easier one "me" is assigned.
for player in players:
if player["login"] == self.login:
self.me = Player(player)
for player in players:
name = player["login"]
new_player = Player(player)
self.players[name] = new_player
self.usersUpdated.emit([name])
if new_player.clan == self.me.clan:
self.clanlist.add(name)
def avatarManager(self):
self.requestAvatars(0)
self.avatarSelection.show()
def handle_notice(self, message):
if "text" in message:
if message["style"] == "error" :
if self.state != ClientState.NONE :
QtGui.QMessageBox.critical(self, "Error from Server", message["text"])
else :
QtGui.QMessageBox.critical(self, "Login Failed", message["text"])
self.state = ClientState.REJECTED
elif message["style"] == "warning":
QtGui.QMessageBox.warning(self, "Warning from Server", message["text"])
elif message["style"] == "scores":
self.tray.showMessage("Scores", message["text"], QtGui.QSystemTrayIcon.Information, 3500)
self.localBroadcast.emit("Scores", message["text"])
else:
QtGui.QMessageBox.information(self, "Notice from Server", message["text"])
if message["style"] == "kill":
logger.info("Server has killed your Forged Alliance Process.")
fa.instance.kill()
if message["style"] == "kick":
logger.info("Server has kicked you from the Lobby.")
self.cleanup()
| madformuse/client | src/client/_clientwindow.py | Python | gpl-3.0 | 60,723 | [
"VisIt"
] | 8af7791e873571e5ee76df9c542d7c5d72c2415ea0d1d27ca39b6db79c970832 |
import pytest
from functools import partial
import numpy as np
import psi4
from psi4.driver import qcdb
pytestmark = pytest.mark.quick
_arrs = {
'a1234_14': np.arange(4),
'blip14': np.arange(4) + [0., 0.02, 0.005, 0.02],
'a1234_22': np.arange(4).reshape((2, 2)),
'blip22': (np.arange(4) + [0., 0.02, 0.005, 0.02]).reshape((2, 2)),
'iblip14': np.arange(4) + [0, 1, 0, 1],
'iblip22': (np.arange(4) + [0, 1, 0, 1]).reshape((2, 2)),
}
_dcts = {
'ell': {'a': _arrs['a1234_14'], 'b': {'ba': _arrs['a1234_14'], 'bb': _arrs['a1234_22']}},
'elll': {'a': _arrs['a1234_14'], 'b': {'ba': _arrs['a1234_14'], 'bb': _arrs['a1234_22'], 'bc': 4}},
'ellnone': {'a': _arrs['a1234_14'], 'b': {'ba': _arrs['a1234_14'], 'bb': _arrs['a1234_22'], 'bc': None}},
'ellshort': {'a': np.arange(3), 'b': {'ba': _arrs['a1234_14'], 'bb': _arrs['a1234_22']}},
'blipell': {'a': _arrs['blip14'], 'b': {'ba': _arrs['a1234_14'], 'bb': _arrs['blip22']}},
} # yapf: disable
_mats = {
'dimvec': psi4.core.Vector.from_array([np.arange(4), np.arange(3), np.arange(5)]),
'dimvecnear': psi4.core.Vector.from_array([np.arange(4), np.array([0.0001, 1.0001, 2.0001]), np.arange(5)]),
'dimvecdim': psi4.core.Vector.from_array([np.arange(4), np.arange(3)]),
'dimmat': psi4.core.Matrix.from_array([np.arange(4).reshape(2, 2), np.zeros((0, 3)), np.arange(16).reshape(4, 4)]),
'dimmatnamed': psi4.core.Matrix.from_array([np.arange(4).reshape(2, 2), np.zeros((0, 3)), np.arange(16).reshape(4, 4)], "Name"),
'dimmatshape': psi4.core.Matrix.from_array([np.arange(4).reshape(2, 2), np.ones((0, 3)), np.arange(16).reshape(2, 8)]),
'dimmatdim': psi4.core.Matrix.from_array([np.arange(4).reshape(2, 2), np.ones((0, 3))]),
'dimmatnear': psi4.core.Matrix.from_array([np.array([[0.0001, 1.0001], [2.0001, 3.0001]]), np.zeros((0, 3)), np.arange(16).reshape(4, 4)]),
} # yapf: disable
@pytest.mark.parametrize(
"fn, args, kwargs",
[
# scalar int
(psi4.compare_integers, [1, 1, 'labeled'], {}),
(psi4.compare_integers, [1, 1], {'verbose': 2}),
(psi4.compare_strings, ['a', 'a', 'labeled'], {}),
(psi4.compare_strings, ['a', 'a'], {'verbose': 2}),
(psi4.compare, [1, 1, 'labeled'], {}),
(psi4.compare, [1, 1], {'quiet': True}),
# array int
(psi4.compare, [_arrs['a1234_14'], _arrs['a1234_14'], 'labeled'], {}),
(psi4.compare, [_arrs['a1234_14'], _arrs['a1234_14']], {'quiet': True}),
# scalar float
(psi4.compare_values, [4.0, 4.001, 2, 'psi4 api'], {}),
(psi4.compare_values, [4.0, 4.0000001, 'qcel api'], {}),
(psi4.compare_values, [4.0, 4.001, 2], {}),
(psi4.compare_values, [4.0, 4.001], {'atol': 1.e-2}),
(psi4.compare_values, [4.0, 4.0000001], {}),
# array float
(psi4.compare_values, [_arrs['a1234_22'], _arrs['a1234_22'], 'labeled'], {}),
(psi4.compare_values, [_arrs['a1234_22'], _arrs['a1234_22']], {'quiet': True}),
(psi4.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 0.1, 'labeled'], {}),
(psi4.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 0.1], {'quiet': True}),
(psi4.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 'labeled'], {'atol': 0.1}),
(psi4.compare_values, [_arrs['a1234_22'], _arrs['blip22']], {'atol': 0.1, 'quiet': True}),
(psi4.compare_arrays, [[-1.2, 1.2], [-1.2, 1.20000000002]], {}),
(psi4.compare_arrays, [[-1.2, 1.2], [-1.2, 1.20000000002], 6], {}),
# Psi4 arrays
(psi4.compare_vectors, [_mats['dimvec'], _mats['dimvec'], 'labeled'], {}),
(psi4.compare_vectors, [_mats['dimvec'], _mats['dimvecnear'], 2], {}),
(psi4.compare_vectors, [_mats['dimvec'], _mats['dimvecnear']], {'atol': 0.001}),
(psi4.compare_matrices, [_mats['dimmat'], _mats['dimmat'], 'labeled'], {}),
(psi4.compare_matrices, [_mats['dimmat'], _mats['dimmatnear'], 2], {}),
(psi4.compare_matrices, [_mats['dimmat'], _mats['dimmatnear']], {'atol': 0.001}),
(psi4.compare_matrices, [_mats['dimmat'], _mats['dimmatnamed']], {'check_name': False}),
# dicts
(psi4.compare_recursive, [_dcts['ell'], _dcts['ell'], 'labeled'], {}),
(psi4.compare_recursive, [_dcts['ell'], _dcts['ell']], {}),
]) # yapf: disable
def test_psi4_compare_true(fn, args, kwargs):
assert fn(*args, **kwargs)
@pytest.mark.parametrize(
"fn, args, kwargs",
[
# scalar int
(qcdb.compare_integers, [1, 1, 'labeled'], {}),
(qcdb.compare_integers, [1, 1], {'verbose': 2}),
(qcdb.compare_strings, ['a', 'a', 'labeled'], {}),
(qcdb.compare_strings, ['a', 'a'], {'verbose': 2}),
(qcdb.compare, [1, 1, 'labeled'], {}),
(qcdb.compare, [1, 1], {'quiet': True}),
# array int
(qcdb.compare, [_arrs['a1234_14'], _arrs['a1234_14'], 'labeled'], {}),
(qcdb.compare, [_arrs['a1234_14'], _arrs['a1234_14']], {'quiet': True}),
# scalar float
(qcdb.compare_values, [4.0, 4.001, 2, 'qcdb api'], {}),
(qcdb.compare_values, [4.0, 4.0000001, 'qcel api'], {}),
(qcdb.compare_values, [4.0, 4.001, 2], {}),
(qcdb.compare_values, [4.0, 4.001], {'atol': 1.e-2}),
(qcdb.compare_values, [4.0, 4.0000001], {}),
# array float
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['a1234_22'], 'labeled'], {}),
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['a1234_22']], {'quiet': True}),
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 0.1, 'labeled'], {}),
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 0.1], {'quiet': True}),
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 'labeled'], {'atol': 0.1}),
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['blip22']], {'atol': 0.1, 'quiet': True}),
(qcdb.compare_arrays, [[-1.2, 1.2], [-1.2, 1.20000000002]], {}),
(qcdb.compare_arrays, [[-1.2, 1.2], [-1.2, 1.20000000002], 6], {}),
# dicts
(qcdb.compare_recursive, [_dcts['ell'], _dcts['ell'], 'labeled'], {}),
(qcdb.compare_recursive, [_dcts['ell'], _dcts['ell']], {}),
]) # yapf: disable
def test_qcdb_compare_true(fn, args, kwargs):
assert fn(*args, **kwargs)
@pytest.mark.parametrize(
"fn, args, kwargs",
[
# scalar int
(psi4.compare_integers, [1, 2, 'labeled'], {}),
(psi4.compare_integers, [1, 2], {'verbose': 2}),
(psi4.compare_strings, ['a', 'b', 'labeled'], {}),
(psi4.compare_strings, ['a', 'b'], {'verbose': 2}),
(psi4.compare, [1, 2, 'labeled'], {}),
(psi4.compare, [1, 2], {'quiet': True}),
# array int
(psi4.compare, [_arrs['a1234_14'], _arrs['iblip14'], 'labeled'], {}),
(psi4.compare, [_arrs['a1234_14'], _arrs['iblip14']], {'quiet': True}),
# scalar float
(psi4.compare_values, [4.0, 4.1, 2, 'psi4 api'], {}),
(psi4.compare_values, [4.0, 4.0001, 'qcel api'], {}),
(psi4.compare_values, [4.0, 4.1, 2], {}),
(psi4.compare_values, [4.0, 4.1], {'atol': 1.e-2}),
(psi4.compare_values, [4.0, 4.0001], {}),
(psi4.compare_values, [4.0, 4.001, 4], {'atol': 1.e-1}), # arg trumps kwarg
# array float
(psi4.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 'labeled'], {}),
(psi4.compare_values, [_arrs['a1234_22'], _arrs['blip22']], {'quiet': True}),
(psi4.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 7, 'labeled'], {}),
(psi4.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 7], {'quiet': True}),
(psi4.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 'labeled'], {'atol': 1e-7}),
(psi4.compare_values, [_arrs['a1234_22'], _arrs['blip22']], {'atol': 1e-7, 'quiet': True}),
(psi4.compare_arrays, [[-1.2, 1.2], [-1.2, 1.2002]], {}),
(psi4.compare_arrays, [[-1.2, 1.2], [-1.2, 1.20000000002], 12], {}),
(psi4.compare_arrays, [[-1.2, 1.2], [-1.2, 1.2002, 2.4]], {}),
# Psi4 arrays
(psi4.compare_vectors, [_mats['dimvec'], _mats['dimvecdim']], {}),
(psi4.compare_vectors, [_mats['dimvec'], _mats['dimvecnear'], 6], {}),
(psi4.compare_vectors, [_mats['dimvec'], _mats['dimvecnear']], {}),
(psi4.compare_matrices, [_mats['dimmat'], _mats['dimmatshape'], 4, 'labeled'], {}),
(psi4.compare_matrices, [_mats['dimmat'], _mats['dimmatdim']], {}),
(psi4.compare_matrices, [_mats['dimmat'], _mats['dimmatnear'], 6], {}),
(psi4.compare_matrices, [_mats['dimmat'], _mats['dimmatnear']], {}),
(psi4.compare_matrices, [_mats['dimmat'], _mats['dimmatnamed']], {'check_name': True}),
# dicts
(psi4.compare_recursive, [_dcts['elll'], _dcts['ell']], {}),
]) # yapf: disable
def test_psi4_compare_raise(fn, args, kwargs):
with pytest.raises(psi4.TestComparisonError):
fn(*args, **kwargs)
@pytest.mark.parametrize(
"fn, args, kwargs",
[
# scalar int
(qcdb.compare_integers, [1, 2, 'labeled'], {}),
(qcdb.compare_integers, [1, 2], {'verbose': 2}),
(qcdb.compare_strings, ['a', 'b', 'labeled'], {}),
(qcdb.compare_strings, ['a', 'b'], {'verbose': 2}),
(qcdb.compare, [1, 2, 'labeled'], {}),
(qcdb.compare, [1, 2], {'quiet': True}),
# array int
(qcdb.compare, [_arrs['a1234_14'], _arrs['iblip14'], 'labeled'], {}),
(qcdb.compare, [_arrs['a1234_14'], _arrs['iblip14']], {'quiet': True}),
# scalar float
(qcdb.compare_values, [4.0, 4.1, 2, 'qcdb api'], {}),
(qcdb.compare_values, [4.0, 4.0001, 'qcel api'], {}),
(qcdb.compare_values, [4.0, 4.1, 2], {}),
(qcdb.compare_values, [4.0, 4.1], {'atol': 1.e-2}),
(qcdb.compare_values, [4.0, 4.0001], {}),
(qcdb.compare_values, [4.0, 4.001, 4], {'atol': 1.e-1}), # arg trumps kwarg
# array float
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 'labeled'], {}),
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['blip22']], {'quiet': True}),
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 7, 'labeled'], {}),
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 7], {'quiet': True}),
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['blip22'], 'labeled'], {'atol': 1e-7}),
(qcdb.compare_values, [_arrs['a1234_22'], _arrs['blip22']], {'atol': 1e-7, 'quiet': True}),
(qcdb.compare_arrays, [[-1.2, 1.2], [-1.2, 1.2002]], {}),
(qcdb.compare_arrays, [[-1.2, 1.2], [-1.2, 1.20000000002], 12], {}),
(qcdb.compare_arrays, [[-1.2, 1.2], [-1.2, 1.2002, 2.4]], {}),
# dicts
(qcdb.compare_recursive, [_dcts['elll'], _dcts['ell']], {}),
]) # yapf: disable
def test_qcdb_compare_raise(fn, args, kwargs):
with pytest.raises(qcdb.TestComparisonError):
fn(*args, **kwargs)
@pytest.mark.parametrize(
"fn,args,kwargs",
[
(psi4.compare_recursive, [_dcts['ell'], _dcts['ell'], 4], {}),
(qcdb.compare_recursive, [_dcts['ell'], _dcts['ell'], 4], {}),
(qcdb.compare_matrices, [None, None], {}),
(qcdb.compare_dicts, [None, None], {}),
]) # yapf: disable
def test_compare_upgrade(fn, args, kwargs):
with pytest.raises(qcdb.UpgradeHelper):
fn(*args, **kwargs)
def _true_false_handler(passfail, label, message, return_message=False, quiet=False):
print(f""" {label:.<66}{'PASSED' if passfail else 'FAILED'}""")
return passfail
_tf_compare_integers = partial(qcdb.testing._psi4_compare_integers, return_handler=_true_false_handler)
def test_alt_handler_compare_true():
assert _tf_compare_integers(1, 1) is True
def test_alt_handler_compare_false():
assert _tf_compare_integers(1, 2) is False
| psi4/psi4 | tests/pytests/test_testing.py | Python | lgpl-3.0 | 11,875 | [
"Psi4"
] | ab08d793fa49e43611df8b2ff5c777c42e2d17b29898bae707b9ec3b8811cfe5 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to store, generate, and manipulate material interfaces.
"""
import warnings
from pymatgen.analysis.interfaces import CoherentInterfaceBuilder # noqa
from pymatgen.core.interface import Interface # noqa
__author__ = "Eric Sivonxay, Shyam Dwaraknath, and Kyle Bystrom"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Kyle Bystrom"
__email__ = "kylebystrom@gmail.com"
__date__ = "5/29/2019"
__status__ = "Prototype"
warnings.warn(
"The substrate_analyzer module is being moved to the interfaces submodule in analysis."
" These imports will break in Pymatgen 2023",
category=FutureWarning,
stacklevel=2,
)
| materialsproject/pymatgen | pymatgen/analysis/interface.py | Python | mit | 797 | [
"pymatgen"
] | ec1d85acffee96ba4dcf6341d5d8eecfa26cb149e002563aae0fdcbc28c982f0 |
# -*- coding: utf-8 -*-
#
# Moonstone is platform for processing of medical images (DICOM).
# Copyright (C) 2009-2011 by Neppo Tecnologia da Informação LTDA
# and Aevum Softwares LTDA
#
# This file is part of Moonstone.
#
# Moonstone is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import vtk
import math
#
# up
# (point2) *
# |
# |
# left | right
# |
# |
# (origin) *-------------*(point1)
# down
class Slice():
def __init__(self, origin, p1, p2, width, height):
logging.debug("In Slice::__init__()")
self._p1 = p1
self._p2 = p2
self._width = width
self._origin = origin
self._height = height
self._plane = vtk.vtkPlaneSource()
self._plane.SetOrigin(self._origin)
self._plane.SetPoint1(self._p1)
self._plane.SetPoint2(self._p2)
self._plane.Update()
self.normalizePlaneSize()
self.createBoundsPlanes()
self._resliceTransform = None
def normalizePlaneSize(self):
# print "----------------------------------------------------------------"
# print "BEFORE:", self._plane
distance = math.sqrt(vtk.vtkMath.Distance2BetweenPoints(self._plane.GetOrigin(), self._plane.GetPoint1()))
scale = self._width/distance
self._plane.SetPoint1([(p1 - o) * scale + o for p1, o in zip(self._plane.GetPoint1(), self._plane.GetOrigin())])
distance = math.sqrt(vtk.vtkMath.Distance2BetweenPoints(self._plane.GetOrigin(), self._plane.GetPoint2()))
scale = self._height/distance
self._plane.SetPoint2([(p2 - o) * scale + o for p2, o in zip(self._plane.GetPoint2(), self._plane.GetOrigin())])
self._plane.SetOrigin(self._plane.GetOrigin())
self._plane.Update()
# print "AFTER:", self._plane
def createBoundsPlanes(self):
self._leftPlane = vtk.vtkPlaneSource()
self._rightPlane = vtk.vtkPlaneSource()
self._upPlane = vtk.vtkPlaneSource()
self._downPlane = vtk.vtkPlaneSource()
self.updateBoundsPlanes()
def updateBoundsPlanes(self):
point3 = [p1+p2 for p1, p2 in zip(self._plane.GetOrigin(), self._plane.GetNormal())]
self._leftPlane.SetOrigin(self._plane.GetOrigin())
self._leftPlane.SetPoint1(self._plane.GetPoint2())
self._leftPlane.SetPoint2(point3)
self._leftPlane.Push(self._width)
self._rightPlane.SetOrigin(self._leftPlane.GetOrigin())
self._rightPlane.SetPoint1(self._leftPlane.GetPoint1())
self._rightPlane.SetPoint2(self._leftPlane.GetPoint2())
self._leftPlane.Push(-self._width)
self._downPlane.SetOrigin(self._plane.GetOrigin())
self._downPlane.SetPoint1(point3)
self._downPlane.SetPoint2(self._plane.GetPoint1())
self._downPlane.Push(self._height)
self._upPlane.SetOrigin(self._downPlane.GetOrigin())
self._upPlane.SetPoint1(self._downPlane.GetPoint1())
self._upPlane.SetPoint2(self._downPlane.GetPoint2())
self._downPlane.Push(-self._height)
def isPointInProjection(self, point):
sideDistance = vtk.vtkPlane.DistanceToPlane(point, self._leftPlane.GetNormal(), self._leftPlane.GetOrigin()) +\
vtk.vtkPlane.DistanceToPlane(point, self._rightPlane.GetNormal(), self._rightPlane.GetOrigin())
if sideDistance - self._width > 0.01:
return False
return True
#comparing only side planes
# upDistance = vtk.vtkPlane.DistanceToPlane(point, self._upPlane.GetNormal(), self._upPlane.GetOrigin()) +\
# vtk.vtkPlane.DistanceToPlane(point, self._downPlane.GetNormal(), self._downPlane.GetOrigin())
#
# return upDistance <= self._height
def getPointDistance(self, point):
return vtk.vtkPlane.DistanceToPlane(point, self._plane.GetNormal(), self._plane.GetOrigin())
def getCenterDistance(self, point):
return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point, self._plane.GetCenter()))
def getBaseCenterDistance(self, point):
return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point, self.getBaseCenter()))
def getOriginDistance(self, point):
return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point, self._plane.GetOrigin()))
def getBaseCenter(self):
result = [(p1+p2)/2.0 for p1, p2 in zip(self._plane.GetOrigin(), self._plane.GetPoint1())]
return result
@property
def plane(self):
return self._plane
@property
def width(self):
return self._width
def setWidth(self, width):
self._width = width
self.normalizePlaneSize()
@property
def height(self):
return self._height
def setHeight(self, height):
self._height = height
self.normalizePlaneSize()
@property
def origin(self):
return self._origin
def setOrigin(self, origin):
self._origin = origin
self._plane.SetCenter([c + (no - oo) for c, oo, no in zip(self._plane.GetCenter(), self._plane.GetOrigin(), self._origin)])
self._plane.Update()
self.updateBoundsPlanes()
@property
def normal(self):
return self._normal
def setNormal(self, normal):
self._normal = normal
self._plane.SetNormal(self._normal)
self._plane.Update()
self.updateBoundsPlanes()
@property
def center(self):
return self._plane.GetCenter()
def setCenter(self, center):
self._plane.SetCenter(center)
self._plane.Update()
self.updateBoundsPlanes()
def push(self, value):
self._plane.Push(value)
self._plane.Update()
@property
def resliceTransform(self):
return self._resliceTransform
def setResliceTransform(self, resliceTransform):
self._resliceTransform = resliceTransform
def getBounds(self):
origin = self._plane.GetOrigin()
point1 = self._plane.GetPoint1()
point2 = self._plane.GetPoint2()
minx = min(origin[0], point1[0], point2[0]) - 0.000001
maxx = max(origin[0], point1[0], point2[0]) + 0.000001
miny = min(origin[1], point1[1], point2[1]) - 0.000001
maxy = max(origin[1], point1[1], point2[1]) + 0.000001
minz = min(origin[2], point1[2], point2[2]) - 0.000001
maxz = max(origin[2], point1[2], point2[2]) + 0.000001
return [minx, maxx, miny, maxy, minz, maxz]
def save(self):
result = {
"origin" : self._plane.GetOrigin(),
"point1" : self._plane.GetPoint1(),
"point2" : self._plane.GetPoint2(),
"width" : self._width,
"height" : self._height
}
return result
| aevum/moonstone | src/moonstone/bloodstone/scenes/data/slice.py | Python | lgpl-3.0 | 7,771 | [
"VTK"
] | 83bccba576a1a271b58fd94dc1e4ddd9238bad5e091d63028b640f2fc7baab69 |
from vtk.web import testing
from vtk.web.testing import BrowserBasedWebTest
dependencies_met = True
try:
# import modules for automating web testing using a real browser
import selenium
except:
dependencies_met = False
# =============================================================================
# Define a subclass of one of the testing base classes. There are no
# restrictions on the class name, as nothing depends on it.
# =============================================================================
class JavascriptTestsRunner(BrowserBasedWebTest) :
"""
This is a browser-based web test which does not (yet) require image compares,
so it extends BrowserBasedWebTest. It interacts with the TestApp application
by just pushing the test button and then checking if the Javascript tests
have finished in a loop. When the test application indicates that it has
finished, this test requests the results so that the test can be passed or
failed. In the case of failure, the entire contents of the test application
log are printed out for CTest.
"""
def __init__(self, host='localhost', port=8080, **kwargs) :
# Only the author of this test script knows what application is
# being tested and how to get to it.
self.urlPath = '/apps/TestApp'
self.host = host
self.port = port
appUrl = 'http://' + self.host + ':' + str(self.port) + self.urlPath
# Continue with initialization of base classes
BrowserBasedWebTest.__init__(self, url=appUrl, size=(800, 600), **kwargs)
def checkdependencies(self):
if dependencies_met == False:
raise testing.DependencyError("Python module 'selenium' is missing")
def setup(self) :
testing.wait_with_timeout(delay=8)
startButton = self.window.find_element_by_css_selector(".run-tests")
startButton.click()
def postprocess(self) :
# Loop until the javascript-side tests are finished
while True :
# Perform the check to see if tests are finished yet
currentResults = self.window.execute_script("return vtkWeb.testing.getCurrentTestResults();")
if currentResults['finished'] is True :
# Done with tests, check results
testsSucceeded = currentResults['failures'] == 0
if testsSucceeded :
testing.test_pass(self.testname)
else :
testing.test_fail(self.testname)
messageLog = self.window.execute_script("return vtkWeb.testing.getTestLog();");
print "Following is the message log from the tests:"
print messageLog
print
break;
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/Web/Applications/TestApp/test/test_pv_web_testapp_all.py | Python | gpl-3.0 | 2,795 | [
"VTK"
] | 0c2582a16f5b1d15b06f1b9b815d6435868e9de777cc94ca0c8dd780adb32ae7 |
#!/usr/bin/env python
#
# $File: PyParentsChooser.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
from random import randint
def randomChooser(pop, subPop):
males = []
females = []
# identify males and females in each social rank
for rank in range(3):
males.append([x for x in pop.individuals(subPop) \
if x.sex() == sim.MALE and x.rank == rank])
females.append([x for x in pop.individuals(subPop) \
if x.sex() == sim.FEMALE and x.rank == rank])
#
while True:
# choose a rank randomly
rank = int(pop.individual(randint(0, pop.subPopSize(subPop) - 1), subPop).rank)
yield males[rank][randint(0, len(males[rank]) - 1)], \
females[rank][randint(0, len(females[rank]) - 1)]
def setRank(rank):
'The rank of offspring can increase or drop to zero randomly'
# only use rank of the father
return (rank[0] + randint(-1, 1)) % 3
pop = sim.Population(size=[1000, 2000], loci=1, infoFields='rank')
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitInfo(lambda : randint(0, 2), infoFields='rank')
],
matingScheme=sim.HomoMating(
sim.PyParentsChooser(randomChooser),
sim.OffspringGenerator(ops=[
sim.MendelianGenoTransmitter(),
sim.PyTagger(setRank),
])
),
gen = 5
)
| BoPeng/simuPOP | docs/PyParentsChooser.py | Python | gpl-2.0 | 2,369 | [
"VisIt"
] | 25512b53dcaedffec58269b94aab258b4c4f33fa30b05900e5e1398cc0961854 |
#
# model_parser.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import copy
from antlr4 import *
from pynestml.frontend.nestml_error_listener import NestMLErrorListener
from pynestml.generated.PyNestMLLexer import PyNestMLLexer
from pynestml.generated.PyNestMLParser import PyNestMLParser
from pynestml.meta_model.ast_arithmetic_operator import ASTArithmeticOperator
from pynestml.meta_model.ast_assignment import ASTAssignment
from pynestml.meta_model.ast_block import ASTBlock
from pynestml.meta_model.ast_block_with_variables import ASTBlockWithVariables
from pynestml.meta_model.ast_body import ASTBody
from pynestml.meta_model.ast_comparison_operator import ASTComparisonOperator
from pynestml.meta_model.ast_compound_stmt import ASTCompoundStmt
from pynestml.meta_model.ast_constraint import ASTConstraint
from pynestml.meta_model.ast_constraints_block import ASTConstraintsBlock
from pynestml.meta_model.ast_data_type import ASTDataType
from pynestml.meta_model.ast_declaration import ASTDeclaration
from pynestml.meta_model.ast_elif_clause import ASTElifClause
from pynestml.meta_model.ast_else_clause import ASTElseClause
from pynestml.meta_model.ast_equations_block import ASTEquationsBlock
from pynestml.meta_model.ast_expression import ASTExpression
from pynestml.meta_model.ast_for_stmt import ASTForStmt
from pynestml.meta_model.ast_function import ASTFunction
from pynestml.meta_model.ast_function_call import ASTFunctionCall
from pynestml.meta_model.ast_if_clause import ASTIfClause
from pynestml.meta_model.ast_if_stmt import ASTIfStmt
from pynestml.meta_model.ast_input_block import ASTInputBlock
from pynestml.meta_model.ast_input_line import ASTInputLine
from pynestml.meta_model.ast_input_type import ASTInputType
from pynestml.meta_model.ast_logical_operator import ASTLogicalOperator
from pynestml.meta_model.ast_nestml_compilation_unit import ASTNestMLCompilationUnit
from pynestml.meta_model.ast_neuron import ASTNeuron
from pynestml.meta_model.ast_ode_equation import ASTOdeEquation
from pynestml.meta_model.ast_ode_function import ASTOdeFunction
from pynestml.meta_model.ast_ode_shape import ASTOdeShape
from pynestml.meta_model.ast_output_block import ASTOutputBlock
from pynestml.meta_model.ast_parameter import ASTParameter
from pynestml.meta_model.ast_return_stmt import ASTReturnStmt
from pynestml.meta_model.ast_simple_expression import ASTSimpleExpression
from pynestml.meta_model.ast_small_stmt import ASTSmallStmt
from pynestml.meta_model.ast_source_location import ASTSourceLocation
from pynestml.meta_model.ast_stmt import ASTStmt
from pynestml.meta_model.ast_unary_operator import ASTUnaryOperator
from pynestml.meta_model.ast_unit_type import ASTUnitType
from pynestml.meta_model.ast_update_block import ASTUpdateBlock
from pynestml.meta_model.ast_variable import ASTVariable
from pynestml.meta_model.ast_while_stmt import ASTWhileStmt
from pynestml.symbol_table.symbol_table import SymbolTable
from pynestml.utils.ast_utils import ASTUtils
from pynestml.utils.logger import Logger, LoggingLevel
from pynestml.utils.messages import Messages
from pynestml.visitors.ast_builder_visitor import ASTBuilderVisitor
from pynestml.visitors.ast_higher_order_visitor import ASTHigherOrderVisitor
from pynestml.visitors.ast_symbol_table_visitor import ASTSymbolTableVisitor
class ModelParser(object):
"""
This class contains several method used to parse handed over models and returns them as one or more AST trees.
"""
@classmethod
def parse_model(cls, model = None, from_string = False):
"""
Parses a handed over model and returns the meta_model representation of it.
:param model: the path to the file which shall be parsed.
:type model: str
:param from_string: indicates whether the model shall be parsed from string directly
:type from_string: bool
:return: a new ASTNESTMLCompilationUnit object.
:rtype: ASTNestMLCompilationUnit
"""
if from_string:
input_file = InputStream(model)
else:
try:
input_file = FileStream(model)
except IOError:
print('(PyNestML.Parser) File ' + str(model) + ' not found. Processing is stopped!')
return
code, message = Messages.get_start_processing_file(model if isinstance(model, FileStream)
else 'model from string')
Logger.log_message(neuron=None, code=code, message=message, error_position=None, log_level=LoggingLevel.INFO)
# create a lexer and hand over the input
lexer = PyNestMLLexer(input_file)
set_up_lexer_error_reporting(lexer)
# create a token stream
stream = CommonTokenStream(lexer)
stream.fill()
# parse the file
parser = PyNestMLParser(stream)
set_up_parser_error_reporting(parser)
compilation_unit = parser.nestMLCompilationUnit()
# create a new visitor and return the new AST
ast_builder_visitor = ASTBuilderVisitor(stream.tokens)
ast = ast_builder_visitor.visit(compilation_unit)
# create and update the corresponding symbol tables
SymbolTable.initialize_symbol_table(ast.get_source_position())
log_to_restore = copy.deepcopy(Logger.get_log())
counter = Logger.curr_message
# replace all derived variables through a computer processable names: e.g. g_in''' -> g_in__ddd
restore_differential_order = []
for ode in ASTUtils.get_all(ast, ASTOdeEquation):
lhs_variable = ode.get_lhs()
if lhs_variable.get_differential_order() > 0:
lhs_variable.differential_order = lhs_variable.get_differential_order() - 1
restore_differential_order.append(lhs_variable)
for shape in ASTUtils.get_all(ast, ASTOdeShape):
lhs_variable = shape.get_variable()
if lhs_variable.get_differential_order() > 0:
lhs_variable.differential_order = lhs_variable.get_differential_order() - 1
restore_differential_order.append(lhs_variable)
# than replace remaining variables
for variable in ASTUtils.get_all(ast, ASTVariable):
if variable.get_differential_order() > 0:
variable.set_name(variable.get_name() + "__" + "d" * variable.get_differential_order())
variable.differential_order = 0
# now also equations have no ' at lhs. replace every occurrence of last d to ' to compensate
for ode_variable in restore_differential_order:
ode_variable.differential_order = 1
Logger.set_log(log_to_restore, counter)
for neuron in ast.get_neuron_list():
neuron.accept(ASTSymbolTableVisitor())
SymbolTable.add_neuron_scope(neuron.get_name(), neuron.get_scope())
return ast
@classmethod
def parse_expression(cls, string):
# type: (str) -> ASTExpression
(builder, parser) = tokenize(string)
ret = builder.visit(parser.expression())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_declaration(cls, string):
# type: (str) -> ASTDeclaration
(builder, parser) = tokenize(string)
ret = builder.visit(parser.declaration())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_stmt(cls, string):
# type: (str) -> ASTStmt
(builder, parser) = tokenize(string)
ret = builder.visit(parser.stmt())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_assignment(cls, string):
# type: (str) -> ASTAssignment
(builder, parser) = tokenize(string)
ret = builder.visit(parser.assignment())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_bit_operator(cls, string):
# type: (str) -> ASTArithmeticOperator
builder, parser = tokenize(string)
ret = builder.visit(parser.bitOperator())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_block(cls, string):
# type: (str) -> ASTBlock
(builder, parser) = tokenize(string)
ret = builder.visit(parser.block())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_block_with_variables(cls, string):
# type: (str) -> ASTBlockWithVariables
(builder, parser) = tokenize(string)
ret = builder.visit(parser.blockWithVariables())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_body(cls, string):
# type: (str) -> ASTBody
(builder, parser) = tokenize(string)
ret = builder.visit(parser.body())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_comparison_operator(cls, string):
# type: (str) -> ASTComparisonOperator
(builder, parser) = tokenize(string)
ret = builder.visit(parser.comparisonOperator())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_compound_stmt(cls, string):
# type: (str) -> ASTCompoundStmt
(builder, parser) = tokenize(string)
ret = builder.visit(parser.compoundStmt())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_data_type(cls, string):
# type: (str) -> ASTDataType
(builder, parser) = tokenize(string)
ret = builder.visit(parser.dataType())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_elif_clause(cls, string):
# type: (str) -> ASTElifClause
(builder, parser) = tokenize(string)
ret = builder.visit(parser.elifClause())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_else_clause(cls, string):
# type: (str) -> ASTElseClause
(builder, parser) = tokenize(string)
ret = builder.visit(parser.elseClause())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_equations_block(cls, string):
# type: (str) -> ASTEquationsBlock
(builder, parser) = tokenize(string)
ret = builder.visit(parser.equationsBlock())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_for_stmt(cls, string):
# type: (str) -> ASTForStmt
(builder, parser) = tokenize(string)
ret = builder.visit(parser.forStmt())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_function(cls, string):
# type: (str) -> ASTFunction
(builder, parser) = tokenize(string)
ret = builder.visit(parser.function())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_function_call(cls, string):
# type: (str) -> ASTFunctionCall
(builder, parser) = tokenize(string)
ret = builder.visit(parser.functionCall())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_if_clause(cls, string):
# type: (str) -> ASTIfClause
(builder, parser) = tokenize(string)
ret = builder.visit(parser.ifClause())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_if_stmt(cls, string):
# type: (str) -> ASTIfStmt
(builder, parser) = tokenize(string)
ret = builder.visit(parser.ifStmt())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_input_block(cls, string):
# type: (str) -> ASTInputBlock
(builder, parser) = tokenize(string)
ret = builder.visit(parser.inputBlock())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_input_line(cls, string):
# type: (str) -> ASTInputLine
(builder, parser) = tokenize(string)
ret = builder.visit(parser.inputLine())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_input_type(cls, string):
# type: (str) -> ASTInputType
(builder, parser) = tokenize(string)
ret = builder.visit(parser.inputType())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_logic_operator(cls, string):
# type: (str) -> ASTLogicalOperator
(builder, parser) = tokenize(string)
ret = builder.visit(parser.logicalOperator())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_nestml_compilation_unit(cls, string):
# type: (str) -> ASTNestMLCompilationUnit
(builder, parser) = tokenize(string)
ret = builder.visit(parser.nestMLCompilationUnit())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_neuron(cls, string):
# type: (str) -> ASTNeuron
(builder, parser) = tokenize(string)
ret = builder.visit(parser.neuron())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_ode_equation(cls, string):
# type: (str) -> ASTOdeEquation
(builder, parser) = tokenize(string)
ret = builder.visit(parser.odeEquation())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_ode_function(cls, string):
# type: (str) -> ASTOdeFunction
(builder, parser) = tokenize(string)
ret = builder.visit(parser.odeFunction())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_ode_shape(cls, string):
# type: (str) -> ASTOdeShape
(builder, parser) = tokenize(string)
ret = builder.visit(parser.odeShape())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_output_block(cls, string):
# type: (str) -> ASTOutputBlock
(builder, parser) = tokenize(string)
ret = builder.visit(parser.outputBlock())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_parameter(cls, string):
# type: (str) -> ASTParameter
(builder, parser) = tokenize(string)
ret = builder.visit(parser.parameter())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_return_stmt(cls, string):
# type: (str) -> ASTReturnStmt
(builder, parser) = tokenize(string)
ret = builder.visit(parser.returnStmt())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_simple_expression(cls, string):
# type: (str) -> ASTSimpleExpression
(builder, parser) = tokenize(string)
ret = builder.visit(parser.simpleExpression())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_small_stmt(cls, string):
# type: (str) -> ASTSmallStmt
(builder, parser) = tokenize(string)
ret = builder.visit(parser.smallStmt())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_unary_operator(cls, string):
# type: (str) -> ASTUnaryOperator
(builder, parser) = tokenize(string)
ret = builder.visit(parser.unaryOperator())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_unit_type(cls, string):
# type: (str) -> ASTUnitType
(builder, parser) = tokenize(string)
ret = builder.visit(parser.unitType())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_update_block(cls, string):
# type: (str) -> ASTUpdateBlock
(builder, parser) = tokenize(string)
ret = builder.visit(parser.updateBlock())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_variable(cls, string):
# type: (str) -> ASTVariable
(builder, parser) = tokenize(string)
ret = builder.visit(parser.variable())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_while_stmt(cls, string):
# type: (str) -> ASTWhileStmt
(builder, parser) = tokenize(string)
ret = builder.visit(parser.whileStmt())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_constraint(cls, string):
# type: (str) -> ASTConstraint
(builder, parser) = tokenize(string)
ret = builder.visit(parser.constraint())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
@classmethod
def parse_constraint_block(cls, string):
# type: (str) -> ASTConstraintsBlock
(builder, parser) = tokenize(string)
ret = builder.visit(parser.constraintsBlock())
ret.accept(ASTHigherOrderVisitor(log_set_added_source_position))
return ret
def tokenize(string):
# type: (str) -> (ASTBuilderVisitor,PyNestMLParser)
lexer = PyNestMLLexer(InputStream(string))
set_up_lexer_error_reporting(lexer)
# create a token stream
stream = CommonTokenStream(lexer)
stream.fill()
parser = PyNestMLParser(stream)
set_up_parser_error_reporting(parser)
builder = ASTBuilderVisitor(stream.tokens)
return builder, parser
def log_set_added_source_position(node):
node.set_source_position(ASTSourceLocation.get_added_source_position())
def set_up_lexer_error_reporting(lexer):
lexer.removeErrorListeners()
lexer._listeners = [NestMLErrorListener()]
def set_up_parser_error_reporting(parser):
parser._listeners = [NestMLErrorListener()]
| kperun/nestml | pynestml/utils/model_parser.py | Python | gpl-2.0 | 19,642 | [
"NEURON",
"VisIt"
] | ed569215e9e1bafc412abdd4b6bf79edecc9f55ff4789f296af0f26925cab0ea |
# !/usr/local/bin/python3.4.2
# ----Copyright (c) 2017 Carnegie Hall | The MIT License (MIT)----
# ----For the full license terms, please visit https://github.com/CarnegieHall/linked-data/blob/master/LICENSE----
## Argument[0] is script to run
## Argument[1] is path to json query results from data.carnegiehall.org
## Argument[2] is path to json query results from dbpedia
import json
import os
import re
import sys
from rdflib import Graph, URIRef
from rdflib.namespace import SKOS
from rdflib.plugins.serializers.nt import NTSerializer
filePath_1 = sys.argv[1]
filePath_2 = sys.argv[2]
ch_toWikiDict = {}
gWikidata = Graph()
with open(filePath_1, 'rU') as f1, open(filePath_2, 'rU') as f2:
chData = json.load(f1)
dbpData = json.load(f2)
for key in chData:
ch_dbp = chData[str(key)]["dbp"]
ch_wikidata = chData[str(key)]["wikidata"]
if not ch_wikidata:
for item in dbpData["results"]["bindings"]:
dbp = item["s"]["value"]
wikidata = item["o"]["value"]
if dbp == ch_dbp:
chData[key]["wikidata"] = wikidata
gWikidata.add( (URIRef(key), SKOS.exactMatch, URIRef(wikidata)) )
ch_toWikiDict_path = os.path.join(
os.path.dirname(__file__), os.pardir, 'JSON_dicts', 'ch_toWikiDict.json')
wikidata_graph_path = os.path.join(
os.path.dirname(__file__), os.pardir, 'Graphs', 'wikidataGraph_dboMusicalArtist.nt')
with open(ch_toWikiDict_path, 'w') as f1:
json.dump(ch_toWikiDict, f1)
gWikidata.bind("skos", SKOS)
gWikidata = gWikidata.serialize(destination=wikidata_graph_path, format='nt')
print("Finished getting Wikidata URIs")
| CarnegieHall/linked-data | scripts/dbp_findWikidataURI.py | Python | mit | 1,682 | [
"VisIt"
] | 61ce1380429da530ba653c34e65141be9707b65a0481ba0155b5c0d5857ac746 |
#-------------------------------------------------------------------------------------------------------------------#
#
# IB2d is an Immersed Boundary Code (IB) for solving fully coupled
# fluid-structure interaction models. This version of the code is based off of
# Peskin's Immersed Boundary Method Paper in Acta Numerica, 2002.
#
# Author: Nicholas A. Battista, Christopher Strickland
# Email: nick.battista@unc.edu
# Date Created: May 27th, 2015
# Institution: UNC-CH
#
# This code is capable of creating Lagrangian Structures using:
# 1. Springs
# 2. Beams (*torsional springs)
# 3. Target Points
# 4. Muscle-Model (combined Force-Length-Velocity model, "HIll+(Length-Tension)")
#
# One is able to update those Lagrangian Structure parameters, e.g., spring constants, resting lengths, etc
#
# There are a number of built in Examples, mostly used for teaching purposes.
#
# If you would like us to add a specific muscle model, please let Nick (nick.battista@unc.edu) know.
#
#--------------------------------------------------------------------------------------------------------------------#
from pathlib import Path
import numpy as np
import vtk
from vtk.util import numpy_support
from vtk.numpy_interface import dataset_adapter as dsa
def read_Eulerian_Data_From_vtk(path, simNums, strChoice, xy=False):
'''This is to read Eulerian IB2d data, either scalar or vector.'''
filename = Path(path) / (strChoice + '.' + str(simNums) + '.vtk')
data = read_vtk_Structured_Points(str(filename))
if xy:
# reconstruct mesh
origin = data[-2]
spacing = data[-1]
x = np.arange(data[0].shape[0])*spacing[0]+origin[0]
y = np.arange(data[0].shape[1])*spacing[1]+origin[1]
# infer if it was a vector or not and return accordingly
if len(data) == 3:
# scalar
if xy:
return data[0], x, y
else:
return data[0]
else:
# vector
if xy:
return data[0], data[1], x, y
else:
return data[0], data[1]
def read_vtk_Structured_Points(filename):
'''This will read in either Scalar or Vector data!'''
# Check for valid filename (vtk crashes badly if filename is invalid)
if not Path(filename).exists():
raise OSError(2, "No such file", filename)
# Load data
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName(filename)
reader.Update()
vtk_data = reader.GetOutput()
# Get mesh data
mesh_shape = vtk_data.GetDimensions()
origin = vtk_data.GetOrigin()
spacing = vtk_data.GetSpacing()
# Read in data
scalar_data = vtk_data.GetPointData().GetScalars()
if scalar_data is not None:
np_data = numpy_support.vtk_to_numpy(scalar_data)
e_data = np.reshape(np_data, mesh_shape[::-1]).squeeze()
# Indexed [z,y,x] since x changes, then y, then z in the flattened array
return e_data, origin, spacing
else:
vector_data = vtk_data.GetPointData().GetVectors()
np_data = numpy_support.vtk_to_numpy(vector_data)
e_data_X = np.reshape(np_data[:,0], mesh_shape[::-1]).squeeze()
e_data_Y = np.reshape(np_data[:,1], mesh_shape[::-1]).squeeze()
e_data_Z = np.reshape(np_data[:,2], mesh_shape[::-1]).squeeze()
# Each of these are indexed via [z,y,x], since x changes, then y, then z
# in the flattened array.
return e_data_X, e_data_Y, e_data_Z, origin, spacing
def read_Force_Scalar_Data_From_vtk(path, simNums, strChoice):
'''This will read force data on the Lagrangian mesh'''
filename = Path(path) / (strChoice + '.' + str(simNums) + '.vtk')
return read_vtk_Unstructured_Grid_Points(str(filename))
def read_vtk_Unstructured_Grid_Points(filename):
'''This is to read Lagrangian mesh data.'''
# Check for valid filename (vtk crashes badly if filename is invalid)
if not Path(filename).exists():
raise OSError(2, "No such file", filename)
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(filename)
reader.Update()
vtk_data = reader.GetOutput()
py_data = dsa.WrapDataObject(vtk_data)
points = numpy_support.vtk_to_numpy(py_data.Points) # each row is a 2D point
return points
| nickabattista/IB2d | data_analysis/analysis_in_python/DA_Blackbox/read_vtk_data.py | Python | gpl-3.0 | 4,272 | [
"VTK"
] | 3cbb7782a443c42770fd4e763fb41b83ef9289c24a9a74aacace76676f4a6a7c |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
La Niña
Command Line Weather using the http://wunderground.com API
Author: Brian Carter
http://github.com/robotmachine/La_Nina
Originally forked from niño || https://github.com/drbunsen/nino
"""
import os, sys, urllib, urllib.request, http.client, configparser, textwrap, argparse, json
settings = os.path.expanduser("~/.nina")
if os.path.exists(settings):
dotfile = True
elif not os.path.exists(settings):
dotfile = False
config = configparser.ConfigParser()
""" Main """
def main():
parser = argparse.ArgumentParser(description='Niña: Gets Wunderground weather reports. Example: nina 29072', prog='nina')
parser.add_argument('-z','--zip',
action='store', dest='ZIP', default=None,
help='Zip code for weather. If nothing is provided, use favourite zip from config file.')
parser.add_argument('-k','--key',
action='store', dest='APIKEY', default=None,
help='API key from http://www.wunderground.com/weather/api')
parser.add_argument('--edit-config', dest="EDIT",
action='store_true', default=False,
help='Create or edit config file.')
args = parser.parse_args()
EDIT = args.EDIT
APIKEY = args.APIKEY
ZIP = args.ZIP
if EDIT is True:
set_config()
if dotfile is True and APIKEY is None:
config.read(settings)
APIKEY = config['NINA']['APIKEY']
if dotfile is True and ZIP is None:
config.read(settings)
ZIP = config['NINA']['ZIP']
if ZIP is not None and APIKEY is not None:
"""print("Would have called a simple forecast with API key {} and ZIP {}".format(APIKEY, ZIP))"""
simple_forecast(APIKEY, ZIP)
if ZIP is None and APIKEY is None:
set_config(dotfile)
""" Create config file """
def set_config(dotfile):
if dotfile is False:
try:
print(textwrap.dedent("""
A Wunderground API Key is required.
Create one for free here:
http://www.wunderground.com/weather/api
"""))
APIKEY = input("Wunderground API Key: ")
print(textwrap.dedent("""
Set your local ZIP code to use as the default
"""))
ZIP = input("ZIP: ")
if (ZIP == ""):
ZIP = False
config ['NINA'] = {'APIKEY': APIKEY,
'ZIP': ZIP}
with open(settings, 'w') as configfile:
config.write(configfile)
print("Settings saved!")
dotfile = True
main()
except KeyboardInterrupt:
print("\nUser exit.")
quit()
except SyntaxError:
print("\nSyntax Error.")
set_config(dotfile)
elif dotfile is True:
config.read(settings)
APIKEY = config['NINA']['APIKEY']
ZIP = config['NINA']['ZIP']
print(textwrap.dedent("""
Current stored API key is {}
Would you like to change it?
""".format(APIKEY)))
BOOL1 = input("Y/N: ")
if BOOL1 in ['Y', 'y', 'Yes', 'YES', 'yes']:
APIKEY = input("New API key: ")
config ['NINA'] = {'APIKEY': APIKEY}
with open(settings, 'w') as configfile:
config.write(configfile)
else:
print(textwrap.dedent("Keeping {} in config file.".format(APIKEY)))
print(textwrap.dedent("""
Current stored ZIP is {}
Would you like to change it?
""".format(ZIP)))
BOOL2 = input("Y/N: ")
if BOOL2 in ['Y', 'y', 'Yes', 'YES', 'yes']:
ZIP = input("New ZIP: ")
config ['NINA'] = {'ZIP': ZIP}
with open(settings, 'w') as configfile:
config.write(configfile)
else:
print(textwrap.dedent("Keeping {} in config file.".format(ZIP)))
quit()
""" Assemble weather data. """
def weather(data, day_idx):
dat = data['forecast']['txt_forecast']['forecastday'][day_idx]
day = dat['title']
forecast = dat['fcttext']
temps = data['forecast']['simpleforecast']['forecastday'][day_idx]
high = temps['high']['fahrenheit']
low = temps['low']['fahrenheit']
return day, forecast, high, low
""" Format weather data. """
def cli_format(d):
forecast = '\n'.join(textwrap.wrap(d[1], 60))
temp = '{loc}\n{delim}\n\nHigh {high}{deg}, Low {low}{deg}\n\n{forecast}\n'
out = temp.format(loc=d[0],
forecast=forecast,
high=d[2],
low=d[3],
delim=60*'=',
deg='°F')
return out
""" Simple forecast (today, tonight, and tomorrow) """
def simple_forecast(APIKEY, ZIP):
wunder_data = {'api_url': 'http://api.wunderground.com/api/',
'api_key': APIKEY,
'query': '/conditions/forecast/q/',
'zip': ZIP}
url = '{api_url}{api_key}{query}{zip}.json'.format(**wunder_data)
response = urllib.request.urlopen(url)
content = response.read()
data = json.loads(content.decode("utf8"))
""" Test if the provided ZIP code is valid. """
try:
location = data['current_observation']['display_location']['full']
except:
location = False
try:
error = data['response']['error']['description']
except:
error = False
if location is False and error is not False:
print(error)
quit()
elif location is not False and error is False:
print('\n{0}\n'.format(location))
else:
print("Location returned: {}".format(location))
print("Error returned: {}".format(error))
quit()
times = (0, 1, 2)
for time in times:
weather_data = weather(data, time)
cli_output = cli_format(weather_data)
print(cli_output)
main()
| robotmachine/La_Nina | la_nina.py | Python | mit | 5,136 | [
"Brian"
] | e463d6ee1293619c0a5732c448358f1372debee28e085928e629e30a7341896f |
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from xml.etree.ElementTree import Element
from kivy.uix.widget import Widget
from kivy.metrics import dp
from ORCA.utils.TypeConvert import ToInt
from ORCA.utils.TypeConvert import ToFloat
from ORCA.utils.XML import GetXMLTextAttributeVar
from ORCA.widgets.base.Base import cWidgetBase
from ORCA.widgets.core.Border import cBorder
import ORCA.Globals as Globals
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ORCA.ScreenPage import cScreenPage
else:
from typing import TypeVar
cScreenPage = TypeVar("cScreenPage")
__all__ = ['cWidgetBorder']
class cWidgetBorder(cWidgetBase):
"""
WikiDoc:Doc
WikiDoc:Context:Widgets
WikiDoc:Page:Widgets-BORDER
WikiDoc:TOCTitle:Border
= BORDER =
The border widget creates a colored border. You could specify the thickness of the border and the color
There are only a few attributes for the border widget.
The following attributes are additional attributes to common widget attributes
<div style="overflow:auto; ">
{| class="wikitable"
! align="left" | Attribute
! align="left" | Description
|-
|type
|fixed: needs to be "BORDER". Capital letters!
|-
|backgroundcolor
|The background color of the border in hexadecimal RGBA format. It has to start with a pound sign (eg: #ff00ffff). Please use only low capital chars.
|-
|linewidth
|The width of the line, should be a percentage with the same systax os for the width attribute
|}</div>
Below you see an example for a rectangle
<div style="overflow-x: auto;"><syntaxhighlight lang="xml">
<element name="Border 1" type="BORDER" posx="%70" posy="%10" width="%8" height="of:width:self:*1.2" backgroundcolor='#00FF0040' />
</syntaxhighlight></div>
WikiDoc:End
"""
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.uLineWidth: str = u''
self.iLineWidth: int = 1
def InitWidgetFromXml(self,*,oXMLNode:Element,oParentScreenPage:cScreenPage, uAnchor:str) -> bool:
""" Reads further Widget attributes from a xml node """
bRet:bool=self.ParseXMLBaseNode(oXMLNode, oParentScreenPage, uAnchor)
if bRet:
self.uLineWidth = GetXMLTextAttributeVar(oXMLNode=oXMLNode,uTag=u'linewidth', bMandatory=False, uDefault="1.0")
if self.uBackGroundColor=="#00000000":
self.aBackGroundColor = Globals.oTheScreen.oSkin.dSkinAttributes.get('color border')
fPercentage:float=-1.0
if not self.uLineWidth==u'':
if self.uLineWidth.startswith('of:'):
self.iLineWidth=self._ParseDimPosValue(self.uLineWidth)
elif self.uLineWidth[0]==u'%':
fPercentage=ToFloat(self.uLineWidth[1:])
elif self.uLineWidth[0]==u'd':
self.iLineWidth=dp(ToInt(self.uLineWidth[1:]))+self.iAnchorPosX
else:
self.iLineWidth=ToInt(self.uLineWidth)
if not fPercentage == -1.0:
self.iLineWidth = ToInt(self.iAnchorPosX + ((fPercentage / 100) * self.iAnchorWidth) - (self.iWidth * (fPercentage / 100)))
self.iLineWidth = max(1,self.iLineWidth)
return bRet
def Create(self,oParent:Widget) -> bool:
""" creates the Widget """
self.AddArg('background_color',self.aBackGroundColor)
self.AddArg('linewidth',str(self.iLineWidth))
if self.CreateBase(Parent=oParent, Class=cBorder):
self.oParent.add_widget(self.oObject)
return True
return False
| thica/ORCA-Remote | src/ORCA/widgets/Border.py | Python | gpl-3.0 | 4,647 | [
"ORCA"
] | 843857fa4ad138b81f587860356a7645834f8ecbcfb3e1979f93850e474a0df1 |
import pymol
from pymol import cmd
import sys,os
from MoleculeUtils import selectPolarProtons
from MoleculeUtils import selectApolarProtons
from MoleculeUtils import colorCPK
from PackingMeasureUtils import loadPackingPDB
def loadSurfaceInterfacePDB( file,name=None,native=None,wt=None):
print " Loading interface PDB %s"%(file)
if name is None:
name = name = os.path.basename(file)
if name.endswith('.pdb'):
name = name[:-4]
# Call Will's packing PDB loading function
# Note: proteins will be cartoons, cavities will be spheres
# Rosetta radii will be enabled
name = loadPackingPDB(file,name,native)
cavselname = name+"cavities"
protselname = name+"protein"
cmd.hide("lines",protselname)
backbone_colorlist = [ 'forest','gold', 'violet', 'cyan', \
'salmon', 'lime', 'slate', 'magenta', 'orange', 'marine', \
'olive', 'forest', 'firebrick', 'chocolate' ]
curr_bb_color = 0
carbon_colorlist = ['titanium', 'wheat', 'grey', 'pink' ]
curr_carbon_color = 0
# Derive selections for the interface, color by chain
cmd.select("interface", "none")
cmd.select("heavy_interface", "none")
selectPolarProtons("polar_protons")
selectApolarProtons("apolar_protons")
alphabet = list(('abcdefghijklmnopqrstuvwxyz').upper())
for letter in alphabet:
chainname = "chain"+letter
cmd.select( chainname, "chain %s and not hetatm and not symbol w"%(letter) )
# Check whether any protein atoms exist with this chain ID
# JK Later, put in a special "non-interface" case for L/H antibody chains
if cmd.count_atoms("chain%s"%(letter))>0:
interfacename = "interface"+letter
cmd.select("not_this_chain", "not hetatm and not symbol w and not %s"%(chainname) )
cmd.select(interfacename, "byres %s and (not_this_chain around 4.0)"%(chainname) )
cmd.select("heavy_%s"%(interfacename), "%s and not apolar_protons"%(interfacename))
cmd.select("interface", "interface or %s"%(interfacename) )
cmd.select("heavy_interface", "heavy_interface or heavy_%s"%(interfacename) )
cmd.delete("not_this_chain")
cmd.color(backbone_colorlist[curr_bb_color], chainname)
#changing....
colorCPK(interfacename,backbone_colorlist[curr_bb_color])
curr_bb_color = curr_bb_color+1
if(curr_bb_color == len(backbone_colorlist)):
curr_bb_color = 0
#colorCPK(interfacename,carbon_colorlist[curr_carbon_color])
curr_carbon_color = curr_carbon_color+1
if(curr_carbon_color == len(carbon_colorlist)):
curr_carbon_color = 0
cmd.color("white", "%s and polar_protons"%(interfacename))
else:
cmd.delete(chainname)
cmd.delete("apolar_protons")
cmd.delete("polar_protons")
# Show the interface in sticks, colored cpk
#cmd.hide( "cartoon", "interface" )
cmd.show( "sticks", "heavy_interface" )
cmd.zoom("interface")
cmd.create("design", "chain B")
cmd.create("target", "chain A")
cmd.show( "surface", "target" )
cmd.show( "surface", "design" )
cmd.set( "transparency", 0 )
#if loaded together with wt
cmd.load( wt, "wt" )
cmd.create( "wt_A", "wt and chain A" )
cmd.create( "wt_B", "wt and chain B" )
cmd.select("none")
cmd.create( "des_A", "not wt and chain A" )
cmd.show( "surface", "des_A" )
cmd.create( "des_B", "not wt and chain B")
cmd.show( "surface", "des_B" )
cmd.align( "wt_A", "des_A" )
cmd.align( "wt_B", "des_B" )
# cmd.show( "lines", "wt_A" )
# cmd.show( "lines", "wt_B" )
cmd.show( "cartoon", "(not interface) or byres(neighbor(interface)) or byres(neighbor(byres(neighbor(interface))))" )
# Show interface waters as small purple spheres
cmd.select( "interface_water", "(symbol w or resn HOH) and (interface around 8.0)")
if cmd.count_atoms("interface_water")>0:
# Put the waters in a separate object, so that we can scale their radii
newwatername = name+"waters"
cmd.create(newwatername, "interface_water")
cmd.remove("interface_water")
cmd.color("purple", newwatername)
cmd.show( "spheres", newwatername )
cmd.set( "sphere_scale", 0.1, newwatername )
else:
cmd.delete("interface_water")
# Show interface ligands as pink sticks
cmd.select( "interface_hetero", "(not symbol w and not resn HOH) and (hetatm and not symbol w and not resn WSS) and (interface around 4.5)")
if cmd.count_atoms("interface_hetero")>0:
cmd.color("pink", "interface_hetero")
cmd.show( "sticks", "interface_hetero" )
else:
cmd.delete("interface_hetero")
cmd.select("none")
return name
cmd.extend("loadSurfaceInterfacePDB",loadSurfaceInterfacePDB)
def surfaceInterfacePDB():
cmd.hide("lines")
backbone_colorlist = [ 'forest','gold', 'violet', 'cyan', \
'salmon', 'lime', 'slate', 'magenta', 'orange', 'marine', \
'olive', 'forest', 'firebrick', 'chocolate' ]
curr_bb_color = 0
carbon_colorlist = ['titanium', 'wheat', 'grey', 'pink' ]
curr_carbon_color = 0
# Derive selections for the interface, color by chain
cmd.select("interface", "none")
cmd.select("heavy_interface", "none")
selectPolarProtons("polar_protons")
selectApolarProtons("apolar_protons")
alphabet = list(('abcdefghijklmnopqrstuvwxyz').upper())
for letter in alphabet:
chainname = "chain"+letter
cmd.select( chainname, "chain %s and not hetatm and not symbol w"%(letter) )
# Check whether any protein atoms exist with this chain ID
# JK Later, put in a special "non-interface" case for L/H antibody chains
if cmd.count_atoms("chain%s"%(letter))>0:
interfacename = "interface"+letter
cmd.select("not_this_chain", "not hetatm and not symbol w and not %s"%(chainname) )
cmd.select(interfacename, "byres %s and (not_this_chain around 4.0)"%(chainname) )
cmd.select("heavy_%s"%(interfacename), "%s and not apolar_protons"%(interfacename))
cmd.select("interface", "interface or %s"%(interfacename) )
cmd.select("heavy_interface", "heavy_interface or heavy_%s"%(interfacename) )
cmd.delete("not_this_chain")
cmd.color(backbone_colorlist[curr_bb_color], chainname)
#changing....
colorCPK(interfacename,backbone_colorlist[curr_bb_color])
curr_bb_color = curr_bb_color+1
if(curr_bb_color == len(backbone_colorlist)):
curr_bb_color = 0
#colorCPK(interfacename,carbon_colorlist[curr_carbon_color])
curr_carbon_color = curr_carbon_color+1
if(curr_carbon_color == len(carbon_colorlist)):
curr_carbon_color = 0
cmd.color("white", "%s and polar_protons"%(interfacename))
else:
cmd.delete(chainname)
cmd.delete("apolar_protons")
cmd.delete("polar_protons")
# Show the interface in sticks, colored cpk
#cmd.hide( "cartoon", "interface" )
cmd.show( "sticks", "heavy_interface" )
cmd.create("design", "chain B")
cmd.create("target", "chain A")
cmd.show( "surface", "target" )
cmd.show( "surface", "design" )
cmd.set( "transparency", 0 )
cmd.show( "cartoon", "(not interface) or byres(neighbor(interface)) or byres(neighbor(byres(neighbor(interface))))" )
# Show interface waters as small purple spheres
cmd.select( "interface_water", "(symbol w or resn HOH) and (interface around 8.0)")
if cmd.count_atoms("interface_water")>0:
# Put the waters in a separate object, so that we can scale their radii
newwatername = name+"waters"
cmd.create(newwatername, "interface_water")
cmd.remove("interface_water")
cmd.color("purple", newwatername)
cmd.show( "spheres", newwatername )
cmd.set( "sphere_scale", 0.1, newwatername )
else:
cmd.delete("interface_water")
# Show interface ligands as pink sticks
cmd.select( "interface_hetero", "(not symbol w and not resn HOH) and (hetatm and not symbol w and not resn WSS) and (interface around 4.5)")
if cmd.count_atoms("interface_hetero")>0:
cmd.color("pink", "interface_hetero")
cmd.show( "sticks", "interface_hetero" )
else:
cmd.delete("interface_hetero")
# Show polar contacts
cmd.distance("hbonds","interfaceA","interfaceB",3.2,mode=2)
cmd.hide("labels")
# Show vacuum electrostatics
cmd.util.protein_vacuum_esp("design", mode=2, quiet=0)
cmd.util.protein_vacuum_esp("target", mode=2, quiet=0)
cmd.disable("design_e_chg")
cmd.disable("design_e_map")
cmd.disable("design_e_pot")
cmd.disable("target_e_chg")
cmd.disable("target_e_map")
cmd.disable("target_e_pot")
cmd.disable("design")
cmd.disable("target")
cmd.zoom("interface")
cmd.remove("sele")
cmd.select("none")
cmd.extend("surfaceInterfacePDB",surfaceInterfacePDB)
def loadInterfacePDB(file,name=None,native=None,wt=None):
print " Loading interface PDB %s"%(file)
if name is None:
name = name = os.path.basename(file)
if name.endswith('.pdb'):
name = name[:-4]
# Call Will's packing PDB loading function
# Note: proteins will be cartoons, cavities will be spheres
# Rosetta radii will be enabled
name = loadPackingPDB(file,name,native)
cavselname = name+"cavities"
protselname = name+"protein"
cmd.hide("lines",protselname)
backbone_colorlist = ['plutonium','wheat','green', 'yellow', 'violet', 'cyan', \
'salmon', 'lime', 'slate', 'magenta', 'orange', 'marine', \
'olive', 'forest', 'firebrick', 'chocolate' ]
curr_bb_color = 0
carbon_colorlist = ['teal', 'wheat', 'grey', 'pink' ]
curr_carbon_color = 0
# Derive selections for the interface, color by chain
cmd.select("interface", "none")
cmd.select("heavy_interface", "none")
selectPolarProtons("polar_protons")
selectApolarProtons("apolar_protons")
alphabet = list(('abcdefghijklmnopqrstuvwxyz').upper())
for letter in alphabet:
chainname = "chain"+letter
cmd.select( chainname, "chain %s and not hetatm and not symbol w"%(letter) )
# Check whether any protein atoms exist with this chain ID
# JK Later, put in a special "non-interface" case for L/H antibody chains
if cmd.count_atoms("chain%s"%(letter))>0:
interfacename = "interface"+letter
cmd.select("not_this_chain", "not hetatm and not symbol w and not %s"%(chainname) )
cmd.select(interfacename, "byres %s and (not_this_chain around 4.0)"%(chainname) )
cmd.select("heavy_%s"%(interfacename), "%s and not apolar_protons"%(interfacename))
cmd.select("interface", "interface or %s"%(interfacename) )
cmd.select("heavy_interface", "heavy_interface or heavy_%s"%(interfacename) )
cmd.delete("not_this_chain")
cmd.color(backbone_colorlist[curr_bb_color], chainname)
colorCPK(interfacename,backbone_colorlist[curr_bb_color])
curr_bb_color = curr_bb_color+1
if(curr_bb_color == len(backbone_colorlist)):
curr_bb_color = 0
#colorCPK(interfacename,carbon_colorlist[curr_carbon_color])
curr_carbon_color = curr_carbon_color+1
if(curr_carbon_color == len(carbon_colorlist)):
curr_carbon_color = 0
cmd.color("white", "%s and polar_protons"%(interfacename))
else:
cmd.delete(chainname)
cmd.delete("apolar_protons")
cmd.delete("polar_protons")
# Show the interface in sticks, colored cpk
#cmd.hide( "cartoon", "interface" )
cmd.show( "sticks", "heavy_interface" )
cmd.zoom("interface")
cmd.show( "cartoon", "(not interface) or byres(neighbor(interface)) or byres(neighbor(byres(neighbor(interface))))" )
# Show interface waters as small purple spheres
cmd.select( "interface_water", "(symbol w or resn HOH) and (interface around 8.0)")
if cmd.count_atoms("interface_water")>0:
# Put the waters in a separate object, so that we can scale their radii
newwatername = name+"waters"
cmd.create(newwatername, "interface_water")
cmd.remove("interface_water")
cmd.color("purple", newwatername)
cmd.show( "spheres", newwatername )
cmd.set( "sphere_scale", 0.1, newwatername )
else:
cmd.delete("interface_water")
# Show interface ligands as pink sticks
cmd.select( "interface_hetero", "(not symbol w and not resn HOH) and (hetatm and not symbol w and not resn WSS) and (interface around 4.5)")
if cmd.count_atoms("interface_hetero")>0:
cmd.color("pink", "interface_hetero")
cmd.show( "sticks", "interface_hetero" )
else:
cmd.delete("interface_hetero")
cmd.select("none")
return name
cmd.extend("loadInterfacePDB",loadInterfacePDB)
def interfacePDB():
backbone_colorlist = ['plutonium','wheat','green', 'yellow', 'violet', 'cyan', \
'salmon', 'lime', 'slate', 'magenta', 'orange', 'marine', \
'olive', 'forest', 'firebrick', 'chocolate' ]
curr_bb_color = 0
carbon_colorlist = ['teal', 'wheat', 'grey', 'pink' ]
curr_carbon_color = 0
# Derive selections for the interface, color by chain
cmd.select("interface", "none")
cmd.select("heavy_interface", "none")
selectPolarProtons("polar_protons")
selectApolarProtons("apolar_protons")
alphabet = list(('abcdefghijklmnopqrstuvwxyz').upper())
for letter in alphabet:
chainname = "chain"+letter
cmd.select( chainname, "chain %s and not hetatm and not symbol w"%(letter) )
# Check whether any protein atoms exist with this chain ID
# JK Later, put in a special "non-interface" case for L/H antibody chains
if cmd.count_atoms("chain%s"%(letter))>0:
interfacename = "interface"+letter
cmd.select("not_this_chain", "not hetatm and not symbol w and not %s"%(chainname) )
cmd.select(interfacename, "byres %s and (not_this_chain around 4.0)"%(chainname) )
cmd.select("heavy_%s"%(interfacename), "%s and not apolar_protons"%(interfacename))
cmd.select("interface", "interface or %s"%(interfacename) )
cmd.select("heavy_interface", "heavy_interface or heavy_%s"%(interfacename) )
cmd.delete("not_this_chain")
cmd.color(backbone_colorlist[curr_bb_color], chainname)
colorCPK(interfacename,backbone_colorlist[curr_bb_color])
curr_bb_color = curr_bb_color+1
if(curr_bb_color == len(backbone_colorlist)):
curr_bb_color = 0
#colorCPK(interfacename,carbon_colorlist[curr_carbon_color])
curr_carbon_color = curr_carbon_color+1
if(curr_carbon_color == len(carbon_colorlist)):
curr_carbon_color = 0
cmd.color("white", "%s and polar_protons"%(interfacename))
else:
cmd.delete(chainname)
cmd.delete("apolar_protons")
cmd.delete("polar_protons")
# Show the interface in sticks, colored cpk
#cmd.hide( "cartoon", "interface" )
cmd.show( "sticks", "heavy_interface and not hydro" )
cmd.zoom("interface")
cmd.show( "cartoon", "(not interface) or byres(neighbor(interface)) or byres(neighbor(byres(neighbor(interface))))" )
# Show interface waters as small purple spheres
cmd.select( "interface_water", "(symbol w or resn HOH) and (interface around 8.0)")
if cmd.count_atoms("interface_water")>0:
# Put the waters in a separate object, so that we can scale their radii
newwatername = name+"waters"
cmd.create(newwatername, "interface_water")
cmd.remove("interface_water")
cmd.color("purple", newwatername)
cmd.show( "spheres", newwatername )
cmd.set( "sphere_scale", 0.1, newwatername )
else:
cmd.delete("interface_water")
# Show interface ligands as pink sticks
cmd.select( "interface_hetero", "(not symbol w and not resn HOH) and (hetatm and not symbol w and not resn WSS) and (interface around 4.5)")
if cmd.count_atoms("interface_hetero")>0:
cmd.color("pink", "interface_hetero")
cmd.show( "sticks", "interface_hetero" )
else:
cmd.delete("interface_hetero")
# Show polar contacts
#cmd.distance("hbonds","interfaceA","interfaceB",3.2,mode=1)
cmd.distance("hbonds","interfaceA","interfaceB",3.5,mode=2)
#cmd.distance("hbonds","interfaceA","interfaceB",3.2,mode=3)
cmd.hide("labels")
cmd.hide("lines")
cmd.select("none")
cmd.extend("interfacePDB",interfacePDB)
def stix():
backbone_colorlist = ['plutonium','wheat','green', 'yellow', 'violet', 'cyan', \
'salmon', 'lime', 'slate', 'magenta', 'orange', 'marine', \
'olive', 'forest', 'firebrick', 'chocolate' ]
curr_bb_color = 0
carbon_colorlist = ['teal', 'wheat', 'grey', 'pink' ]
curr_carbon_color = 0
# Derive selections for the interface, color by chain
cmd.select("interface", "none")
cmd.select("heavy_interface", "none")
selectPolarProtons("polar_protons")
selectApolarProtons("apolar_protons")
alphabet = list(('abcdefghijklmnopqrstuvwxyz').upper())
for letter in alphabet:
chainname = "chain"+letter
cmd.select( chainname, "chain %s and not hetatm and not symbol w"%(letter) )
# Check whether any protein atoms exist with this chain ID
# JK Later, put in a special "non-interface" case for L/H antibody chains
if cmd.count_atoms("chain%s"%(letter))>0:
interfacename = "interface"+letter
cmd.select("not_this_chain", "not hetatm and not symbol w and not %s"%(chainname) )
cmd.select(interfacename, "byres %s and (not_this_chain around 4.0)"%(chainname) )
cmd.select("heavy_%s"%(interfacename), "%s and not apolar_protons"%(interfacename))
cmd.select("interface", "interface or %s"%(interfacename) )
cmd.select("heavy_interface", "heavy_interface or heavy_%s"%(interfacename) )
cmd.delete("not_this_chain")
cmd.color(backbone_colorlist[curr_bb_color], chainname)
colorCPK(chainname,backbone_colorlist[curr_bb_color])
curr_bb_color = curr_bb_color+1
if(curr_bb_color == len(backbone_colorlist)):
curr_bb_color = 0
#colorCPK(interfacename,carbon_colorlist[curr_carbon_color])
curr_carbon_color = curr_carbon_color+1
if(curr_carbon_color == len(carbon_colorlist)):
curr_carbon_color = 0
cmd.color("white", "%s and polar_protons"%(interfacename))
else:
cmd.delete(chainname)
cmd.delete("apolar_protons")
cmd.delete("polar_protons")
# Show the interface in sticks, colored cpk
#cmd.hide( "cartoon", "interface" )
cmd.show( "sticks", "not hydro" )
cmd.zoom("interface")
cmd.show( "cartoon", "(not interface) or byres(neighbor(interface)) or byres(neighbor(byres(neighbor(interface))))" )
# Show interface waters as small purple spheres
cmd.select( "interface_water", "(symbol w or resn HOH) and (interface around 8.0)")
if cmd.count_atoms("interface_water")>0:
# Put the waters in a separate object, so that we can scale their radii
newwatername = name+"waters"
cmd.create(newwatername, "interface_water")
cmd.remove("interface_water")
cmd.color("purple", newwatername)
cmd.show( "spheres", newwatername )
cmd.set( "sphere_scale", 0.1, newwatername )
else:
cmd.delete("interface_water")
# Show interface ligands as pink sticks
cmd.select( "interface_hetero", "(not symbol w and not resn HOH) and (hetatm and not symbol w and not resn WSS) and (interface around 4.5)")
if cmd.count_atoms("interface_hetero")>0:
cmd.color("pink", "interface_hetero")
cmd.show( "sticks", "interface_hetero" )
else:
cmd.delete("interface_hetero")
# Show polar contacts
#cmd.distance("hbonds","interfaceA","interfaceB",3.2,mode=1)
cmd.distance("hbonds","interfaceA","interfaceB",4.5,mode=2)
#cmd.distance("hbonds","interfaceA","interfaceB",3.2,mode=3)
cmd.hide("labels")
cmd.hide("lines")
cmd.select("none")
cmd.extend("stix",stix)
| weitzner/Dotfiles | pymol_scripts/InterfaceUtils-network.py | Python | mit | 20,814 | [
"PyMOL"
] | c7dcf7ec5ecdc50be2b562ad15d1b72d4cf7f791319dc0023e8b4a5c4b5273c2 |
"""psi4 module
The Sphinx documentation system scans all Python modules for docstrings
and class structure (good) but in doing so imports modules it finds in
input statements and follows any exposed code like the procedures
dictionary (bad for Psi4 b/c of psi4). So, this fake psi4 module
exists to appease Sphinx when it looks for psi4.py to import. Any psi4
commands that aren't protected by functions need to have skeleton versions
here.
"""
class Molecule:
pass
def sldkfjksl():
return 4.4
def plugin_load(sofile):
return 4
class SuperFunctional():
def __init__(self):
pass
@staticmethod
def blank():
return SuperFunctional()
@staticmethod
def build(sstr, iint, iint2):
return SuperFunctional()
def add_c_functional(self, Functional):
pass
def add_x_functional(self, Functional):
pass
def allocate(self):
pass
def ansatz(self):
pass
def c_alpha(self):
pass
def c_functional(self, sstr):
pass
def c_omega(self):
pass
def citation(self):
pass
def deriv(self):
pass
def description(self):
pass
def dispersion(self):
pass
def is_c_hybrid(self):
pass
def is_c_lrc(self):
pass
def is_gga(self):
pass
def is_meta(self):
pass
def is_x_hybrid(self):
pass
def is_x_lrc(self):
pass
def max_points(self):
pass
def name(self):
return 'SuperFunctionalName'
def print_detail(self, iint):
pass
def print_out(self):
pass
def set_c_alpha(self, ffloat):
pass
def set_c_ss_alpha(self, ffloat):
pass
def set_c_os_alpha(self, ffloat):
pass
def set_c_omega(self, ffloat):
pass
def set_citation(self, sstr):
pass
def set_deriv(self, iint):
pass
def set_description(self, sstr):
pass
def set_dispersion(self, Dispersion):
pass
def set_max_points(self, iint):
pass
def set_name(self, sstr):
pass
def set_x_alpha(self, ffloat):
pass
def set_x_omega(self, ffloat):
pass
#def test_functional(self, ):
# pass
def value(self, sstr):
pass
def x_alpha(self):
pass
def x_functional(self, sstr):
pass
def x_omega(self):
pass
class Functional():
def __init__(self):
pass
@staticmethod
def build_base(sstr):
return Functional()
def alpha(self):
pass
def citation(self):
pass
def description(self):
pass
def is_gga(self):
pass
def is_lrc(self):
pass
def is_meta(self):
pass
def lsda_cutoff(self):
pass
def meta_cutoff(self):
pass
def name(self):
pass
def omega(self):
pass
def print_detail(SuperFunctional, iint):
pass
def print_out(self):
pass
def set_alpha(self, ffloat):
pass
def set_citation(self, sstr):
pass
def set_description(self, sstr):
pass
def set_gga(self, bbool):
pass
def set_lsda_cutoff(self, ffloat):
pass
def set_meta(self, bbool):
pass
def set_meta_cutoff(self, ffloat):
pass
def set_name(self, sstr):
pass
def set_omega(self, ffloat):
pass
def set_parameter(self, sstr, ffloat):
pass
class Dispersion():
def __init__(self):
pass
@staticmethod
def build(sstr, ffloat, ffloat1, ffloat2, ffloat3):
return Dispersion()
class Process():
def __init__(self):
pass
Process.environment = {}
Process.environment['PSI_SCRATCH'] = 'nonsense'
| spring01/libPSI | doc/sphinxman/source/psi4.py | Python | gpl-2.0 | 3,840 | [
"Psi4"
] | 0682a7b8764137d97f28f7083afb7fb360562d9eb2242c303b30d0af46380a33 |
"""Reads quantum espresso files. Tested for output on PWSCF v.5.0.2, only
for typical output of input files made with ASE -- that is, ibrav=0."""
import numpy as np
from ase.atoms import Atoms, Atom
from ase import units
from ase.calculators.singlepoint import SinglePointCalculator
def read_espresso_out(fileobj, index):
"""Reads quantum espresso output text files."""
if isinstance(fileobj, str):
fileobj = open(fileobj, 'rU')
lines = fileobj.readlines()
images = []
# Get unit cell info.
bl_line = [line for line in lines if 'bravais-lattice index' in line]
if len(bl_line) != 1:
raise NotImplementedError('Unsupported: unit cell changing.')
bl_line = bl_line[0].strip()
brav_latt_index = bl_line.split('=')[1].strip()
if brav_latt_index != '0':
raise NotImplementedError('Supported only for Bravais-lattice '
'index of 0 (free).')
lp_line = [line for line in lines if 'lattice parameter (alat)' in
line]
if len(lp_line) != 1:
raise NotImplementedError('Unsupported: unit cell changing.')
lp_line = lp_line[0].strip().split('=')[1].strip().split()[0]
lattice_parameter = float(lp_line) * units.Bohr
ca_line_no = [number for (number, line) in enumerate(lines) if
'crystal axes: (cart. coord. in units of alat)' in line]
if len(ca_line_no) != 1:
raise NotImplementedError('Unsupported: unit cell changing.')
ca_line_no = int(ca_line_no[0])
cell = np.zeros((3, 3))
for number, line in enumerate(lines[ca_line_no + 1: ca_line_no + 4]):
line = line.split('=')[1].strip()[1:-1]
values = [eval(value) for value in line.split()]
cell[number, 0] = values[0]
cell[number, 1] = values[1]
cell[number, 2] = values[2]
cell *= lattice_parameter
# Find atomic positions and add to images.
for number, line in enumerate(lines):
key = 'Begin final coordinates' # these just reprint last posn.
if key in line:
break
key = 'Cartesian axes'
if key in line:
atoms = make_atoms(number, lines, key, cell)
images.append(atoms)
key = 'ATOMIC_POSITIONS (crystal)'
if key in line:
atoms = make_atoms(number, lines, key, cell)
images.append(atoms)
return images[index]
def make_atoms(index, lines, key, cell):
"""Scan through lines to get the atomic positions."""
atoms = Atoms()
if key == 'Cartesian axes':
for line in lines[index + 3:]:
entries = line.split()
if len(entries) == 0:
break
symbol = entries[1][:-1]
x = float(entries[6])
y = float(entries[7])
z = float(entries[8])
atoms.append(Atom(symbol, (x, y, z)))
atoms.set_cell(cell)
elif key == 'ATOMIC_POSITIONS (crystal)':
for line in lines[index + 1:]:
entries = line.split()
if len(entries) == 0 or (entries[0] == 'End'):
break
symbol = entries[0][:-1]
x = float(entries[1])
y = float(entries[2])
z = float(entries[3])
atoms.append(Atom(symbol, (x, y, z)))
atoms.set_cell(cell, scale_atoms=True)
# Energy is located after positions.
energylines = [number for number, line in enumerate(lines) if
('!' in line and 'total energy' in line)]
energyline = min([n for n in energylines if n > index])
energy = float(lines[energyline].split()[-2]) * units.Ry
# Forces are located after positions.
forces = np.zeros((len(atoms), 3))
forcelines = [number for number, line in enumerate(lines) if
'Forces acting on atoms (Ry/au):' in line]
forceline = min([n for n in forcelines if n > index])
for line in lines[forceline + 4:]:
words = line.split()
if len(words) == 0:
break
fx = float(words[-3])
fy = float(words[-2])
fz = float(words[-1])
atom_number = int(words[1]) - 1
forces[atom_number] = (fx, fy, fz)
forces *= units.Ry / units.Bohr
calc = SinglePointCalculator(atoms, energy=energy, forces=forces)
atoms.set_calculator(calc)
return atoms
def read_espresso_in(fileobj):
"""Reads espresso input files."""
if isinstance(fileobj, str):
fileobj = open(fileobj, 'rU')
data, extralines = read_fortran_namelist(fileobj)
positions, method = get_atomic_positions(extralines,
n_atoms=data['system']['nat'])
cell = get_cell_parameters(extralines)
if data['system']['ibrav'] == 0:
atoms = build_atoms(positions, method, cell,
data['system']['celldm(1)'])
else:
raise NotImplementedError('ibrav=%i not implemented.' %
data['system']['ibrav'])
return atoms
def build_atoms(positions, method, cell, alat):
"""Creates the atoms for a quantum espresso in file."""
if method != 'crystal':
raise NotImplementedError('Only supported for crystal method of '
'ATOMIC_POSITIONS, not %s.' % method)
atoms = Atoms()
for el, (x, y, z) in positions:
atoms.append(Atom(el, (x, y, z)))
cell *= f2f(alat) * units.Bohr
atoms.set_cell(cell, scale_atoms=True)
return atoms
def get_atomic_positions(lines, n_atoms):
"""Returns the atomic positions of the atoms as an (ordered) list from
the lines of text of the espresso input file."""
atomic_positions = []
line = [n for (n, l) in enumerate(lines) if 'ATOMIC_POSITIONS' in l]
if len(line) == 0:
return None
if len(line) > 1:
raise RuntimeError('More than one ATOMIC_POSITIONS section?')
line_no = line[0]
for line in lines[line_no + 1:line_no + n_atoms + 1]:
el, x, y, z = line.split()
atomic_positions.append([el[:-1], (f2f(x), f2f(y), f2f(z))])
line = lines[line_no]
if '{' in line:
method = line[line.find('{') + 1:line.find('}')]
elif '(' in line:
method = line[line.find('(') + 1:line.find(')')]
else:
method = None
return atomic_positions, method
def get_cell_parameters(lines):
"""Returns the cell parameters as a matrix."""
cell_parameters = np.zeros((3, 3))
line = [n for (n, l) in enumerate(lines) if 'CELL_PARAMETERS' in l]
if len(line) == 0:
return None
if len(line) > 1:
raise RuntimeError('More than one CELL_PARAMETERS section?')
line_no = line[0]
for vector, line in enumerate(lines[line_no + 1:line_no + 4]):
x, y, z = line.split()
cell_parameters[vector] = (f2f(x), f2f(y), f2f(z))
return cell_parameters
def read_fortran_namelist(fileobj):
"""Takes a fortran-namelist formatted file and returns appropriate
dictionaries, followed by lines of text that do not fit this pattern.
"""
data = {}
extralines = []
indict = False
fileobj.seek(0)
for line in fileobj.readlines():
if indict and line.strip().startswith('/'):
indict = False
elif line.strip().startswith('&'):
indict = True
dictname = line.strip()[1:].lower()
data[dictname] = {}
elif (not indict) and (len(line.strip()) > 0):
extralines.append(line)
elif indict:
key, value = line.strip().split('=')
if value.endswith(','):
value = value[:-1]
value = value.strip()
try:
value = eval(value)
except SyntaxError:
value = {'.true.': True, '.false.': False}.get(value, value)
data[dictname][key.strip()] = value
return data, extralines
def f2f(value):
"""Converts a fortran-formatted double precision number (e.g., 2.323d2)
to a python float. value should be a string."""
value = value.replace('d', 'e')
value = value.replace('D', 'e')
return float(value)
| grhawk/ASE | tools/ase/io/espresso.py | Python | gpl-2.0 | 8,127 | [
"ASE",
"CRYSTAL",
"ESPResSo",
"Quantum ESPRESSO"
] | 7e47ed32628574d16a64dae6f81f0d9f471393836ecf75cb49f4b1516cbb1525 |
= Docker How To =
docker run -t -i centos /bin/bash
Hi, Brian,
This seems to be an issue related to Docker running out of disk space. A few
general solutions which seem to work for others include simply waiting and
retrying to pull the image, removing the contents of your /var/lib/docker
directory and restarting your Docker instance (be sure to save anything
crucial first!), or setting DOCKER_STORAGE_OPTIONS in
/etc/sysconfig/docker-storage to an empty value.
If these don't work for your instance, you may want to take a look at the
contents of /var/lib/docker/devicemapper/devicemapper/metadata; see if there
are any apparent errors, and if so we can address them more directly.
As the NameError you're seeing near the bottom, that's an issue with the
install file that has since been resolved - replacing your version of
step1_ecs_singlenode_install.py with the most recent version should prevent
the error in the future.
docker ps
docker exec -it zabbix bash
| loolwv7/scripts | Linux_Tricks/Docker_how_to.py | Python | gpl-3.0 | 1,023 | [
"Brian"
] | 68143809e0a4f8d22d1e0c2bc6267a99741d786903dbb10602547ffeaaca4ee6 |
"""
Ax_Metrics - EROut plugin 'geckoboard_text'
Writes Geckoboard JSON output for various charts for use with
http://www.geckoboard.com.
Contents:
- EROut_geckoboard_text - pages of text, optionally flagged
See:
- https://developer.geckoboard.com/#geck-o-meter
------------------------------------------------------------------------------
Author: Dan Kamins <dos at axonchisel dot net>
Copyright (c) 2014 Dan Kamins, AxonChisel.net
"""
# ----------------------------------------------------------------------------
from .base import EROut_geckoboard
import logging
log = logging.getLogger(__name__)
# ----------------------------------------------------------------------------
# Type numbers used by Geckoboard Text widget:
_GB_TEXT_TYPE_NORMAL = 0 # no flag
_GB_TEXT_TYPE_INFO = 2 # small gray "i" flag
_GB_TEXT_TYPE_ALERT = 1 # serious orange "!" flag
_GB_TEXT_TYPE_BY_COLOR = { # map color to GB text type
'GREEN': _GB_TEXT_TYPE_NORMAL,
'AMBER': _GB_TEXT_TYPE_INFO,
'RED' : _GB_TEXT_TYPE_ALERT,
}
_QFORMAT_RAG_KEY_BY_COLOR = { # map color to QFormat format key name
'GREEN': 'green',
'AMBER': 'amber',
'RED': 'red',
}
# ----------------------------------------------------------------------------
class EROut_geckoboard_text(EROut_geckoboard):
"""
EROut (Extensible Report Outputter) Plugin for Geckoboard Text.
Adds JSON-serializable output to extinfo['jout'] dict.
Typical usage is with collapsed query, default 'LAST' reduce function,
and ghosts disabled. This prevent needless queries from running.
Non-collapsed queries with other reduce functions may be used too.
Each data series of each query processed by this EROut will result in
an additional text page which Geckoboard cycles through automatically.
If the QMetric 'rag' parameter is specified, the text may be flagged
as either important (amber) or very important (red). Otherwise text
is displayed without any flag.
QMetric NEGATIVE 'impact' is addressed properly to support negative
data (e.g. bugs, expenses, etc.).
The QFormat format strings (red, amber, green) support these params:
- qmlabel - label from QMetric
- value - actual value
- amber - amber value cutoff
- red - red value cutoff
Example formats:
red: "DANGER: SENSOR {qmlabel} - {value} OVER LIMIT!"
amber: "Notice: Sensor {qmlabel} - {value} near limit ({red})"
green: "Sensor {qmlabel} OK"
QFormat support (under 'geckoboard_meter' or '_default'):
reduce : (optional) Function from metricdef.FUNCS to reduce
series with. Default 'LAST'.
red : (optional) Format str for red mode.
Only required if 'rag' specified in QMetric.
amber : (optional) Format str for amber mode.
Only required if 'rag' specified in QMetric.
green : Format str for green mode.
More info:
- https://developer.geckoboard.com/#text
Example JSON:
{
"item": [
{
"text": "Unfortunately, as you probably already know, people",
"type": 0
},
{
"text": "As you might know, I am a full time Internet",
"type": 1
}
]
}
"""
#
# Abstract Method Implementations
#
# abstract
def plugin_output(self, mdseries, query=None):
"""
EROut plugins must implement this abstract method.
Invoked to output MultiDataSeries as specified.
Returns nothing. Output target should be configured separately.
"""
log.debug("Outputting %s for query %s", mdseries, query)
self._qfdomain = 'geckoboard_text'
# Iterate MDS, writing each series:
for dseries in mdseries.iter_series():
self._write_series(dseries)
#
# Internal Methods
#
def _write_series(self, dseries):
"""
Write the current DataSeries to output as an item.
(Geckoboard supports up to 10 items (pages of text) in the JSON,
so up to 10 DataSeries can be used, including spread among multiple
queries)
"""
# Prep:
self._dseries = dseries
self._write_series_prep()
# Calculate details:
self._write_series_identify_color()
self._write_series_set_type()
self._write_series_format_text()
# Add overall item to jout:
self.jout['item'].append(self._jitem)
def _write_series_prep(self):
"""Prepare internal data for new DataSeries."""
# Reduce series to single value by reduce func.
# Usually func 'LAST' with collapsed series (Servant option),
# but other operations can be useful too, e.g. AVG, etc.
reduce_func = self._qformat_get('reduce', 'LAST')
self._value = self._dseries.reduce(reduce_func)
# Prep JSON-serializable template to fill in:
self._jitem = {
"text": "",
"type": _GB_TEXT_TYPE_NORMAL,
}
def _write_series_identify_color(self):
"""Set self._color to GREEN,AMBER,RED based on value."""
# Default to GREEN:
self._color = 'GREEN'
if not self.query:
return # no Query, so stay GREEN
# Find first QMetric in QData with metric_id matching series metric:
# (reverse engineering since QData is not passed through MQEngine)
try:
self._qmetric = next(qm for qm in
self.query.qdata.iter_qmetrics()
if qm.metric_id == self._dseries.mdef.id
)
except StopIteration:
return # no QMetric, so stay GREEN (this is not likely)
if not self._qmetric.rag:
return # no 'rag' set on QMetric, so stay GREEN
(rag_c1, rag_c2) = self._qmetric.rag
# If negative impact (e.g. expenses, bugs, ...):
if self._qmetric.impact == 'NEGATIVE':
if self._value >= rag_c1:
self._color = 'RED'
elif self._value >= rag_c2:
self._color = 'AMBER'
# Else normal positive impact (e.g. revenue, sales, ...):
else:
assert self._qmetric.impact == 'POSITIVE'
if self._value <= rag_c1:
self._color = 'RED'
elif self._value <= rag_c2:
self._color = 'AMBER'
def _write_series_set_type(self):
"""Set jitem type based on color."""
self._jitem['type'] = _GB_TEXT_TYPE_BY_COLOR[self._color]
def _write_series_format_text(self):
"""Format jitem text based on color, value, etc.."""
# Default:
self._jitem['text'] = "{0}".format(self._value)
if not self.query or not self._qmetric:
return # no query or QMetric
# Get color format str:
fmtkey = _QFORMAT_RAG_KEY_BY_COLOR[self._color]
fmt = self._qformat_get(fmtkey, None)
if not fmt:
return # no matching color key in QFormat
# Build format params:
params = {
'qmlabel' : self._qmetric.label,
'value' : self._value,
'amber' : "?",
'red' : "?",
}
if self._qmetric.rag:
params['red'] = self._qmetric.rag[0]
params['amber'] = self._qmetric.rag[1]
# Format string:
text = fmt.format(**params)
self._jitem['text'] = text
| axonchisel/ax_metrics | py/axonchisel/metrics/io/erout/plugins/ero_geckoboard/text.py | Python | mit | 7,614 | [
"Amber"
] | 9004f06838e824e003eb020a2be3517369065ee877a75546e62d53af307fb753 |
"""
A robot exclusion rules parser for Python by Philip Semanchuk
Full documentation, examples and a comparison to Python's robotparser module
reside here:
http://NikitaTheSpider.com/python/rerp/
Comments, bug reports, etc. are most welcome via email to:
philip@semanchuk.com
Simple usage examples:
import robotexclusionrulesparser
rerp = robotexclusionrulesparser.RobotExclusionRulesParser()
try:
rerp.fetch('http://www.example.com/robots.txt')
except:
# See the documentation for expected errors
pass
if rerp.is_allowed('CrunchyFrogBot', '/foo.html'):
print "It is OK to fetch /foo.html"
OR supply the contents of robots.txt yourself:
rerp = RobotExclusionRulesParser()
s = open("robots.txt").read()
rerp.parse(s)
if rerp.is_allowed('CrunchyFrogBot', '/foo.html'):
print "It is OK to fetch /foo.html"
The function is_expired() tells you if you need to fetch a fresh copy of
this robots.txt.
if rerp.is_expired():
# Get a new copy
pass
RobotExclusionRulesParser supports __unicode__() and __str()__ so you can print
an instance to see the its rules in robots.txt format.
The comments refer to MK1994, MK1996 and GYM2008. These are:
MK1994 = the 1994 robots.txt draft spec (http://www.robotstxt.org/orig.html)
MK1996 = the 1996 robots.txt draft spec (http://www.robotstxt.org/norobots-rfc.txt)
GYM2008 = the Google-Yahoo-Microsoft extensions announced in 2008
(http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=40360)
This code is released under the following BSD license --
Copyright (c) 2010, Philip Semanchuk
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of robotexclusionrulesparser nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY ITS CONTRIBUTORS ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Philip Semanchuk BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
PY_MAJOR_VERSION = sys.version_info[0]
if PY_MAJOR_VERSION < 3:
from urlparse import urlparse as urllib_urlparse
from urlparse import urlunparse as urllib_urlunparse
from urllib import unquote as urllib_unquote
import urllib2 as urllib_request
import urllib2 as urllib_error
else:
import urllib.request as urllib_request
import urllib.error as urllib_error
from urllib.parse import unquote as urllib_unquote
from urllib.parse import urlparse as urllib_urlparse
from urllib.parse import urlunparse as urllib_urlunparse
import re
import time
import calendar
# rfc822 is deprecated since Python 2.3, but the functions I need from it
# are in email.utils which isn't present until Python 2.5. ???
try:
import email.utils as email_utils
except ImportError:
import rfc822 as email_utils
# These are the different robots.txt syntaxes that this module understands.
# Hopefully this list will never have more than two elements.
MK1996 = 1
GYM2008 = 2
_end_of_line_regex = re.compile(r"(?:\r\n)|\r|\n")
# This regex is a little more generous than the spec because it accepts
# "User-agent" or "Useragent" (without a dash). MK1994/96 permits only the
# former. The regex also doesn't insist that "useragent" is at the exact
# beginning of the line, which makes this code immune to confusion caused
# by byte order markers.
_directive_regex = re.compile("(allow|disallow|user[-]?agent|sitemap|crawl-delay):[ \t]*(.*)", re.IGNORECASE)
# This is the number of seconds in a week that I use to determine the default
# expiration date defined in MK1996.
SEVEN_DAYS = 60 * 60 * 24 * 7
# This controls the max number of bytes read in as a robots.txt file. This
# is just a bit of defensive programming in case someone accidentally sends
# an ISO file in place of their robots.txt. (It happens...) Suggested by
# Dima Brodsky.
MAX_FILESIZE = 100 * 1024 # 100k
# Control characters are everything < 0x20 and 0x7f.
_control_characters_regex = re.compile(r"""[\000-\037]|\0177""")
# Charset extraction regex for pulling the encoding (charset) out of a
# content-type header.
_charset_extraction_regex = re.compile(r"""charset=['"]?(?P<encoding>[^'"]*)['"]?""")
def _raise_error(error, message):
# I have to exec() this code because the Python 2 syntax is invalid
# under Python 3 and vice-versa.
s = "raise "
s += "error, message" if (PY_MAJOR_VERSION == 2) else "error(message)"
exec(s)
def _unquote_path(path):
# MK1996 says, 'If a %xx encoded octet is encountered it is unencoded
# prior to comparison, unless it is the "/" character, which has
# special meaning in a path.'
path = re.sub("%2[fF]", "\n", path)
path = urllib_unquote(path)
return path.replace("\n", "%2F")
def _scrub_data(s):
# Data is either a path or user agent name; i.e. the data portion of a
# robots.txt line. Scrubbing it consists of (a) removing extraneous
# whitespace, (b) turning tabs into spaces (path and UA names should not
# contain tabs), and (c) stripping control characters which, like tabs,
# shouldn't be present. (See MK1996 section 3.3 "Formal Syntax".)
s = _control_characters_regex.sub("", s)
s = s.replace("\t", " ")
return s.strip()
def _parse_content_type_header(header):
media_type = ""
encoding = ""
# A typical content-type looks like this:
# text/plain; charset=UTF-8
# The portion after "text/plain" is optional and often not present.
# ref: http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7
if header:
header = header.strip().lower()
else:
header = ""
chunks = [s.strip() for s in header.split(";")]
media_type = chunks[0]
if len(chunks) > 1:
for parameter in chunks[1:]:
m = _charset_extraction_regex.search(parameter)
if m and m.group("encoding"):
encoding = m.group("encoding")
return media_type.strip(), encoding.strip()
class _Ruleset(object):
""" _Ruleset represents a set of allow/disallow rules (and possibly a
crawl delay) that apply to a set of user agents.
Users of this module don't need this class. It's available at the module
level only because RobotExclusionRulesParser() instances can't be
pickled if _Ruleset isn't visible a the module level.
"""
ALLOW = 1
DISALLOW = 2
def __init__(self):
self.robot_names = [ ]
self.rules = [ ]
self.crawl_delay = None
def __str__(self):
s = self.__unicode__()
if PY_MAJOR_VERSION == 2:
s = s.encode("utf-8")
return s
def __unicode__(self):
d = { self.ALLOW : "Allow", self.DISALLOW : "Disallow" }
s = ''.join( ["User-agent: %s\n" % name for name in self.robot_names] )
if self.crawl_delay:
s += "Crawl-delay: %s\n" % self.crawl_delay
s += ''.join( ["%s: %s\n" % (d[rule_type], path) for rule_type, path in self.rules] )
return s
def add_robot_name(self, bot):
self.robot_names.append(bot)
def add_allow_rule(self, path):
self.rules.append((self.ALLOW, _unquote_path(path)))
def add_disallow_rule(self, path):
self.rules.append((self.DISALLOW, _unquote_path(path)))
def is_not_empty(self):
return bool(len(self.rules)) and bool(len(self.robot_names))
def is_default(self):
return bool('*' in self.robot_names)
def does_user_agent_match(self, user_agent):
match = False
for robot_name in self.robot_names:
# MK1994 says, "A case insensitive substring match of the name
# without version information is recommended." MK1996 3.2.1
# states it even more strongly: "The robot must obey the first
# record in /robots.txt that contains a User-Agent line whose
# value contains the name token of the robot as a substring.
# The name comparisons are case-insensitive."
match = match or (robot_name == '*') or \
(robot_name.lower() in user_agent.lower())
return match
def is_url_allowed(self, url, syntax=GYM2008):
allowed = True
# Schemes and host names are not part of the robots.txt protocol,
# so I ignore them. It is the caller's responsibility to make
# sure they match.
_, _, path, parameters, query, fragment = urllib_urlparse(url)
url = urllib_urlunparse(("", "", path, parameters, query, fragment))
url = _unquote_path(url)
done = False
i = 0
while not done:
rule_type, path = self.rules[i]
if (syntax == GYM2008) and ("*" in path or path.endswith("$")):
# GYM2008-specific syntax applies here
# http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=40360
if path.endswith("$"):
appendix = "$"
path = path[:-1]
else:
appendix = ""
parts = path.split("*")
pattern = "%s%s" % \
(".*".join([re.escape(p) for p in parts]), appendix)
if re.match(pattern, url):
# Ding!
done = True
allowed = (rule_type == self.ALLOW)
else:
# Wildcards are either not present or are taken literally.
if url.startswith(path):
# Ding!
done = True
allowed = (rule_type == self.ALLOW)
# A blank path means "nothing", so that effectively
# negates the value above.
# e.g. "Disallow: " means allow everything
if not path:
allowed = not allowed
i += 1
if i == len(self.rules):
done = True
return allowed
class RobotExclusionRulesParser(object):
"""A parser for robots.txt files."""
def __init__(self):
self._source_url = ""
self.user_agent = None
self.use_local_time = True
self.expiration_date = self._now() + SEVEN_DAYS
self._response_code = 0
self._sitemaps = [ ]
self.__rulesets = [ ]
@property
def source_url(self):
"""The URL from which this robots.txt was fetched. Read only."""
return self._source_url
@property
def response_code(self):
"""The remote server's response code. Read only."""
return self._response_code
@property
def sitemap(self):
"""Deprecated; use 'sitemaps' instead. Returns the sitemap URL present
in the robots.txt, if any. Defaults to None. Read only. """
_raise_error(DeprecationWarning, "The sitemap property is deprecated. Use 'sitemaps' instead.")
@property
def sitemaps(self):
"""The sitemap URLs present in the robots.txt, if any. Defaults
to an empty list. Read only."""
return self._sitemaps
@property
def is_expired(self):
"""True if the difference between now and the last call
to fetch() exceeds the robots.txt expiration.
"""
return self.expiration_date <= self._now()
def _now(self):
if self.use_local_time:
return time.time()
else:
# What the heck is timegm() doing in the calendar module?!?
return calendar.timegm(time.gmtime())
def is_allowed(self, user_agent, url, syntax=GYM2008):
"""True if the user agent is permitted to visit the URL. The syntax
parameter can be GYM2008 (the default) or MK1996 for strict adherence
to the traditional standard.
"""
if PY_MAJOR_VERSION < 3:
# The robot rules are stored internally as Unicode. The two lines
# below ensure that the parameters passed to this function are
# also Unicode. If those lines were not present and the caller
# passed a non-Unicode user agent or URL string to this function,
# Python would silently convert it to Unicode before comparing it
# to the robot rules. Such conversions use the default encoding
# (usually US-ASCII) and if the string couldn't be converted using
# that encoding, Python would raise a UnicodeError later on in the
# guts of this code which would be confusing.
# Converting the strings to Unicode here doesn't make the problem
# go away but it does make the conversion explicit so that
# failures are easier to understand.
if not isinstance(user_agent, unicode):
user_agent = user_agent.decode()
if not isinstance(url, unicode):
url = url.decode()
if syntax not in (MK1996, GYM2008):
_raise_error(ValueError, "Syntax must be MK1996 or GYM2008")
for ruleset in self.__rulesets:
if ruleset.does_user_agent_match(user_agent):
return ruleset.is_url_allowed(url, syntax)
return True
def get_crawl_delay(self, user_agent):
"""Returns a float representing the crawl delay specified for this
user agent, or None if the crawl delay was unspecified or not a float.
"""
# See is_allowed() comment about the explicit unicode conversion.
if (PY_MAJOR_VERSION < 3) and (not isinstance(user_agent, unicode)):
user_agent = user_agent.decode()
for ruleset in self.__rulesets:
if ruleset.does_user_agent_match(user_agent):
return ruleset.crawl_delay
return None
def fetch(self, url, timeout=None):
"""Attempts to fetch the URL requested which should refer to a
robots.txt file, e.g. http://example.com/robots.txt.
"""
# ISO-8859-1 is the default encoding for text files per the specs for
# HTTP 1.0 (RFC 1945 sec 3.6.1) and HTTP 1.1 (RFC 2616 sec 3.7.1).
# ref: http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
encoding = "iso-8859-1"
content = ""
expires_header = None
content_type_header = None
self._response_code = 0
self._source_url = url
if self.user_agent:
req = urllib_request.Request(url, None,
{ 'User-Agent' : self.user_agent })
else:
req = urllib_request.Request(url)
try:
if timeout:
f = urllib_request.urlopen(req, timeout=timeout)
else:
f = urllib_request.urlopen(req)
content = f.read(MAX_FILESIZE)
# As of Python 2.5, f.info() looks like it returns the HTTPMessage
# object created during the connection.
expires_header = f.info().get("expires")
content_type_header = f.info().get("Content-Type")
# As of Python 2.4, this file-like object reports the response
# code, too.
if hasattr(f, "code"):
self._response_code = f.code
else:
self._response_code = 200
f.close()
except urllib_error.URLError:
# This is a slightly convoluted way to get the error instance,
# but it works under Python 2 & 3.
error_instance = sys.exc_info()
if len(error_instance) > 1:
error_instance = error_instance[1]
if hasattr(error_instance, "code"):
self._response_code = error_instance.code
# MK1996 section 3.4 says, "...robots should take note of Expires
# header set by the origin server. If no cache-control directives
# are present robots should default to an expiry of 7 days".
# This code is lazy and looks at the Expires header but not
# Cache-Control directives.
self.expiration_date = None
if self._response_code >= 200 and self._response_code < 300:
# All's well.
if expires_header:
self.expiration_date = email_utils.parsedate_tz(expires_header)
if self.expiration_date:
# About time zones -- the call to parsedate_tz() returns a
# 10-tuple with the time zone offset in the 10th element.
# There are 3 valid formats for HTTP dates, and one of
# them doesn't contain time zone information. (UTC is
# implied since all HTTP header dates are UTC.) When given
# a date that lacks time zone information, parsedate_tz()
# returns None in the 10th element. mktime_tz() interprets
# None in the 10th (time zone) element to mean that the
# date is *local* time, not UTC.
# Therefore, if the HTTP timestamp lacks time zone info
# and I run that timestamp through parsedate_tz() and pass
# it directly to mktime_tz(), I'll get back a local
# timestamp which isn't what I want. To fix this, I simply
# convert a time zone of None to zero. It's much more
# difficult to explain than to fix. =)
# ref: http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
if self.expiration_date[9] == None:
self.expiration_date = self.expiration_date[:9] + (0,)
self.expiration_date = email_utils.mktime_tz(self.expiration_date)
if self.use_local_time:
# I have to do a little more converting to get this
# UTC timestamp into localtime.
self.expiration_date = time.mktime(time.gmtime(self.expiration_date))
#else:
# The expires header was garbage.
if not self.expiration_date: self.expiration_date = self._now() + SEVEN_DAYS
if (self._response_code >= 200) and (self._response_code < 300):
# All's well.
media_type, encoding = _parse_content_type_header(content_type_header)
# RFC 2616 sec 3.7.1 --
# When no explicit charset parameter is provided by the sender,
# media subtypes of the "text" type are defined to have a default
# charset value of "ISO-8859-1" when received via HTTP.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
if not encoding:
encoding = "iso-8859-1"
elif self._response_code in (401, 403):
# 401 or 403 ==> Go away or I will taunt you a second time!
# (according to MK1996)
content = "User-agent: *\nDisallow: /\n"
elif self._response_code == 404:
# No robots.txt ==> everyone's welcome
content = ""
else:
# Uh-oh. I punt this up to the caller.
_raise_error(urllib_error.URLError, self._response_code)
if ((PY_MAJOR_VERSION == 2) and isinstance(content, str)) or \
((PY_MAJOR_VERSION > 2) and (not isinstance(content, str))):
# This ain't Unicode yet! It needs to be.
# Unicode decoding errors are another point of failure that I punt
# up to the caller.
try:
content = content.decode(encoding)
except UnicodeError:
_raise_error(UnicodeError,
"Robots.txt contents are not in the encoding expected (%s)." % encoding)
except (LookupError, ValueError):
# LookupError ==> Python doesn't have a decoder for that encoding.
# One can also get a ValueError here if the encoding starts with
# a dot (ASCII 0x2e). See Python bug 1446043 for details. This
# bug was supposedly fixed in Python 2.5.
_raise_error(UnicodeError,
"I don't understand the encoding \"%s\"." % encoding)
# Now that I've fetched the content and turned it into Unicode, I
# can parse it.
self.parse(content)
def parse(self, s):
"""Parses the passed string as a set of robots.txt rules."""
self._sitemaps = [ ]
self.__rulesets = [ ]
if (PY_MAJOR_VERSION > 2) and (isinstance(s, bytes) or isinstance(s, bytearray)) or \
(PY_MAJOR_VERSION == 2) and (not isinstance(s, unicode)):
s = s.decode("iso-8859-1")
# Normalize newlines.
s = _end_of_line_regex.sub("\n", s)
lines = s.split("\n")
previous_line_was_a_user_agent = False
current_ruleset = None
for line in lines:
line = line.strip()
if line and line[0] == '#':
# "Lines containing only a comment are discarded completely,
# and therefore do not indicate a record boundary." (MK1994)
pass
else:
# Remove comments
i = line.find("#")
if i != -1: line = line[:i]
line = line.strip()
if not line:
# An empty line indicates the end of a ruleset.
if current_ruleset and current_ruleset.is_not_empty():
self.__rulesets.append(current_ruleset)
current_ruleset = None
previous_line_was_a_user_agent = False
else:
# Each non-empty line falls into one of six categories:
# 1) User-agent: blah blah blah
# 2) Disallow: blah blah blah
# 3) Allow: blah blah blah
# 4) Crawl-delay: blah blah blah
# 5) Sitemap: blah blah blah
# 6) Everything else
# 1 - 5 are interesting and I find them with the regex
# below. Category 6 I discard as directed by the MK1994
# ("Unrecognised headers are ignored.")
# Note that 4 & 5 are specific to GYM2008 syntax, but
# respecting them here is not a problem. They're just
# additional information the the caller is free to ignore.
matches = _directive_regex.findall(line)
# Categories 1 - 5 produce two matches, #6 produces none.
if matches:
field, data = matches[0]
field = field.lower()
data = _scrub_data(data)
# Matching "useragent" is a deviation from the
# MK1994/96 which permits only "user-agent".
if field in ("useragent", "user-agent"):
if previous_line_was_a_user_agent:
# Add this UA to the current ruleset
if current_ruleset and data:
current_ruleset.add_robot_name(data)
else:
# Save the current ruleset and start a new one.
if current_ruleset and current_ruleset.is_not_empty():
self.__rulesets.append(current_ruleset)
#else:
# (is_not_empty() == False) ==> malformed
# robots.txt listed a UA line but provided
# no name or didn't provide any rules
# for a named UA.
current_ruleset = _Ruleset()
if data:
current_ruleset.add_robot_name(data)
previous_line_was_a_user_agent = True
elif field == "allow":
previous_line_was_a_user_agent = False
if current_ruleset:
current_ruleset.add_allow_rule(data)
elif field == "sitemap":
previous_line_was_a_user_agent = False
self._sitemaps.append(data)
elif field == "crawl-delay":
# Only Yahoo documents the syntax for Crawl-delay.
# ref: http://help.yahoo.com/l/us/yahoo/search/webcrawler/slurp-03.html
previous_line_was_a_user_agent = False
if current_ruleset:
try:
current_ruleset.crawl_delay = float(data)
except ValueError:
# Invalid crawl-delay -- ignore.
pass
else:
# This is a disallow line
previous_line_was_a_user_agent = False
if current_ruleset:
current_ruleset.add_disallow_rule(data)
if current_ruleset and current_ruleset.is_not_empty():
self.__rulesets.append(current_ruleset)
# Now that I have all the rulesets, I want to order them in a way
# that makes comparisons easier later. Specifically, any ruleset that
# contains the default user agent '*' should go at the end of the list
# so that I only apply the default as a last resort. According to
# MK1994/96, there should only be one ruleset that specifies * as the
# user-agent, but you know how these things go.
not_defaults = [r for r in self.__rulesets if not r.is_default()]
defaults = [r for r in self.__rulesets if r.is_default()]
self.__rulesets = not_defaults + defaults
def __str__(self):
s = self.__unicode__()
if PY_MAJOR_VERSION == 2:
s = s.encode("utf-8")
return s
def __unicode__(self):
if self._sitemaps:
s = "Sitemaps: %s\n\n" % self._sitemaps
else:
s = ""
if PY_MAJOR_VERSION < 3:
s = unicode(s)
# I also need to string-ify each ruleset. The function for doing so
# varies under Python 2/3.
stringify = (unicode if (PY_MAJOR_VERSION == 2) else str)
return s + '\n'.join( [stringify(ruleset) for ruleset in self.__rulesets] )
class RobotFileParserLookalike(RobotExclusionRulesParser):
"""A drop-in replacement for the Python standard library's RobotFileParser
that retains all of the features of RobotExclusionRulesParser.
"""
def __init__(self, url = ""):
RobotExclusionRulesParser.__init__(self)
self._user_provided_url = ""
self.last_checked = None
self.set_url(url)
def set_url(self, url):
# I don't want to stuff this into self._source_url because
# _source_url is set only as a side effect of calling fetch().
self._user_provided_url = url
def read(self):
RobotExclusionRulesParser.fetch(self, self._user_provided_url)
def parse(self, lines):
RobotExclusionRulesParser.parse(self, ''.join(lines))
def can_fetch(self, user_agent, url, syntax=GYM2008):
return RobotExclusionRulesParser.is_allowed(self, user_agent, url, syntax)
def mtime(self):
return self.last_checked
def modified(self):
self.last_checked = time.time()
| chfoo/wpull | wpull/thirdparty/robotexclusionrulesparser.py | Python | gpl-3.0 | 29,545 | [
"VisIt"
] | 4002b26c3647a13b40aa13bbf3ccee270b31fc86f8de17731aa389214d44249e |
""" RabbitMQSync service to synchronize the internal RabbitMQ database.
according to CS content. The whole work is done by the RabbitMQSynchronizer
that is activated when the CS was changed.
"""
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC import S_OK
from DIRAC import gConfig
from DIRAC.FrameworkSystem.Utilities import RabbitMQSynchronizer
class RabbitMQSyncHandler(RequestHandler):
"""Service to synchronize the content of internal RabbitMQ database
with the CS content. The work is done by the RabbitMQSynchronizer
that acts when the CS is changed.
"""
@classmethod
def initializeHandler(cls, _serviceInfo):
"""Handler initialization"""
syncObject = RabbitMQSynchronizer.RabbitMQSynchronizer()
gConfig.addListenerToNewVersionEvent(syncObject.sync)
return S_OK()
| DIRACGrid/DIRAC | src/DIRAC/FrameworkSystem/Service/RabbitMQSyncHandler.py | Python | gpl-3.0 | 859 | [
"DIRAC"
] | 12575fdca68f825e2184f1e5ddcfb43eadfa1bf455c88c342e3cfd1071d927a7 |
import os
from astropy.table import Table
import matplotlib.pyplot as plt
# setup information sources
stack = Table.read(os.path.join(os.environ['ANALYSISDIR'],'stack_test','stack_IR6p1_mom1.fits'))
plotDir = os.path.join(os.environ['ANALYSISDIR'],'plots','radial_plots')
degas = Table.read(os.path.join(os.environ['SCRIPTDIR'],'degas_base.fits'))
if not os.path.exists(plotDir):
os.mkdir(plotDir)
# plot setup
style = {'CO': {'marker':'o','color':'orange','name':'CO'},
'HCN': {'marker':'o','color':'green','name':'HCN'},
'HCOp': {'marker':'o','color':'blue','name':'HCO+'},
'13CO': {'marker':'o','color':'red', 'name':'13CO'},
'C18O': {'marker':'o','color': 'magenta','name':'C18O'},
'comass_mean': {'marker':'o','color':'orange','name':r'$\Sigma_{CO}$ (M$_\odot/pc^2$)'},
'mstar_mean': {'marker':'o','color':'green','name':r'$\Sigma_*$ (M$_\odot/pc^2$)'},
'sfr_mean':{'marker':'o','color':'blue','name':r'$\Sigma_{SFR}$ (M$_\odot/yr/pc^2$)'},
'ratio_HCN_CO': {'marker':'o','color':'green','name':'HCN/$^{12}$CO'},
'ratio_HCOp_CO': {'marker':'o','color':'blue','name':'HCO+/$^{12}$CO'},
'ratio_13CO_CO': {'marker':'o','color':'red','name':'$^{13}$CO/$^{12}$CO'},
'ratio_C18O_CO': {'marker':'o','color':'orange','name':'$C^{18}$O/$^{12}$CO'},
'ratio_HCOp_HCN': {'marker':'o','color':'green','name':'HCO+/HCN'}}
# only look at dr1 galaxies
dr1 = degas['DR1'] == 1
# turn on different plotting
doline = True
doother = True
docoratio = True
doratio = True
# for each dr1 galaxy, show radial trends for each line.
for galaxy in degas[dr1]:
plt.close()
idx = ( (stack['galaxy'] == galaxy['NAME']) \
& (stack['bin_type'] == 'radius'))
# get radius in kpc -- radius stacks in arcsec.
radius = (stack[idx]['bin_mean'] * galaxy['DIST_MPC'] * 1e6 / 206265.0) / 1e3
if doline:
for line in ['CO','HCN','HCOp','13CO','C18O']:
if (galaxy['NAME'] == 'NGC6946') & ((line == '13CO') | (line == 'C18O')):
continue
# determine whether upper or lower limits
uplims = stack[idx]['int_intensity_sum_uplim_'+line]
int_intensity = stack[idx]['int_intensity_sum_'+line]
yerr = stack[idx]['int_intensity_sum_err_'+line]
yerr[uplims] = int_intensity[uplims] * 0.3
plt.errorbar(radius, int_intensity,
yerr = yerr,
uplims = uplims,
marker = style[line]['marker'],
color = style[line]['color'],
linestyle = '--',
label = style[line]['name'])
plt.yscale('log')
plt.legend()
plt.title(galaxy['NAME'])
plt.xlabel(r"Galactocentric Radius (kpc)")
plt.ylabel(r"Stacked Integrated Intensity (K km s$^{-1}$)")
plt.show()
plt.savefig(os.path.join(plotDir,galaxy['NAME']+'_radial_lines.pdf'))
plt.close()
if doother:
for otherdata in ['comass_mean', 'mstar_mean', 'sfr_mean']:
if otherdata == 'sfr_mean':
factor = 1e10
else:
factor = 1.0
plt.plot(radius, stack[idx][otherdata]*factor,
marker = style[otherdata]['marker'],
color = style[otherdata]['color'],
linestyle = '--',
label = style[otherdata]['name'])
plt.yscale('log')
plt.legend()
plt.title(galaxy['NAME'])
plt.xlabel(r"Galactocentric Radius (kpc)")
plt.ylabel(r"Surface Density")
plt.show()
plt.savefig(os.path.join(plotDir,galaxy['NAME']+'_radial_other.pdf'))
plt.close()
if docoratio:
for coratio in ['ratio_HCN_CO','ratio_HCOp_CO','ratio_13CO_CO','ratio_C18O_CO']:
uplims = stack[idx][coratio+'_uplim']
ratio = stack[idx][coratio]
yerr = stack[idx][coratio+'_err']
yerr[uplims] = ratio[uplims] * 0.3
plt.errorbar(radius, ratio,
yerr = yerr,
uplims = uplims,
marker = style[coratio]['marker'],
color = style[coratio]['color'],
linestyle = '--',
label = style[coratio]['name'])
plt.yscale('log')
plt.legend()
plt.title(galaxy['NAME'])
plt.xlabel(r"Galactocentric Radius (kpc)")
plt.ylabel(r"Ratio")
plt.show()
plt.savefig(os.path.join(plotDir,galaxy['NAME']+'_radial_coratio.pdf'))
plt.close()
if doratio:
for myratio in ['ratio_HCOp_HCN']:
uplims = stack[idx][myratio+'_uplim']
lolims = stack[idx][myratio+'_lolim']
ratio = stack[idx][myratio]
yerr = stack[idx][myratio+'_err']
yerr[uplims] = ratio[uplims] *0.3
yerr[lolims] = ratio[lolims] * 0.3
plt.errorbar(radius, ratio,
yerr = yerr,
uplims = uplims,
lolims = lolims,
marker = style[myratio]['marker'],
color = style[myratio]['color'],
linestyle = '--',
label = style[myratio]['name'])
plt.legend()
plt.title(galaxy['NAME'])
plt.xlabel(r"Galactocentric Radius (kpc)")
plt.ylabel(r"Ratio")
plt.show()
plt.savefig(os.path.join(plotDir,galaxy['NAME']+'_radial_ratio.pdf'))
plt.close()
| low-sky/degas | scripts/plot_radial_trends.py | Python | gpl-3.0 | 5,821 | [
"Galaxy"
] | 2736969bd47781141501dc4f350095cc766c75d4a183633f9288c5d8a33c7528 |
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.2-pre-" + "$Revision: 308 $"[11:14] + "-svn"
__license__ = """Copyright (c) 2002-2008, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# reversable htmlentitydefs mappings for Python 2.2
try:
from htmlentitydefs import name2codepoint, codepoint2name
except:
import htmlentitydefs
name2codepoint={}
codepoint2name={}
for (name,codepoint) in htmlentitydefs.entitydefs.iteritems():
if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1]))
name2codepoint[name]=ord(codepoint)
codepoint2name[ord(codepoint)]=name
# BeautifulSoup parser used for parsing microformats from embedded HTML content
# http://www.crummy.com/software/BeautifulSoup/
# feedparser is tested with BeautifulSoup 3.0.x, but it might work with the
# older 2.x series. If it doesn't, and you can figure out why, I'll accept a
# patch and modify the compatibility statement accordingly.
try:
import BeautifulSoup
except:
BeautifulSoup = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(\d+|x[0-9a-fA-F]+);')
if sgmllib.endbracket.search(' <').start(0):
class EndBracketMatch:
endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self,string,index=0):
self.match = self.endbracket.match(string,index)
if self.match: return self
def start(self,n):
return self.match.end(n)
sgmllib.endbracket = EndBracketMatch()
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure']
if key == 'license':
for link in UserDict.__getitem__(self, 'links'):
if link['rel']=='license' and link.has_key('href'):
return link['href']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_cp1252 = {
unichr(128): unichr(8364), # euro sign
unichr(130): unichr(8218), # single low-9 quotation mark
unichr(131): unichr( 402), # latin small letter f with hook
unichr(132): unichr(8222), # double low-9 quotation mark
unichr(133): unichr(8230), # horizontal ellipsis
unichr(134): unichr(8224), # dagger
unichr(135): unichr(8225), # double dagger
unichr(136): unichr( 710), # modifier letter circumflex accent
unichr(137): unichr(8240), # per mille sign
unichr(138): unichr( 352), # latin capital letter s with caron
unichr(139): unichr(8249), # single left-pointing angle quotation mark
unichr(140): unichr( 338), # latin capital ligature oe
unichr(142): unichr( 381), # latin capital letter z with caron
unichr(145): unichr(8216), # left single quotation mark
unichr(146): unichr(8217), # right single quotation mark
unichr(147): unichr(8220), # left double quotation mark
unichr(148): unichr(8221), # right double quotation mark
unichr(149): unichr(8226), # bullet
unichr(150): unichr(8211), # en dash
unichr(151): unichr(8212), # em dash
unichr(152): unichr( 732), # small tilde
unichr(153): unichr(8482), # trade mark sign
unichr(154): unichr( 353), # latin small letter s with caron
unichr(155): unichr(8250), # single right-pointing angle quotation mark
unichr(156): unichr( 339), # latin small ligature oe
unichr(158): unichr( 382), # latin small letter z with caron
unichr(159): unichr( 376)} # latin capital letter y with diaeresis
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
return urlparse.urljoin(base, uri)
except:
uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
#Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
self.svgOK = 0
self.hasTitle = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if type(baseuri) != type(u''):
try:
baseuri = unicode(baseuri, self.encoding)
except:
baseuri = unicode(baseuri, 'iso-8859-1')
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg': self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK: self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK: raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities.keys():
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try: name2codepoint[ref]
except KeyError: text = '&%s;' % ref
else: text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0: break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
output_raw = output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
if self.lookslikehtml(output):
self.contentparams['type']='text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding=='utf-8' and type(output) == type(u''):
try:
output = unicode(output.encode('iso-8859-1'), 'utf-8')
except:
pass
# map win-1252 extensions to the proper code points
if type(output) == type(u''):
output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and self.hasTitle:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
contentparams['value_raw'] = output_raw
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
contentparams['value_raw'] = output_raw
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
contentparams['value_raw'] = output_raw
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang: self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
def lookslikehtml(self, str):
if self.version.startswith('atom'): return
if self.contentparams.get('type','text/html') != 'text/plain': return
# must have a close tag or a entity reference to qualify
if not (re.search(r'</(\w+)>',str) or re.search("&#?\w+;",str)): return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',str)): return
# all entities must have been defined as valid HTML entities
from htmlentitydefs import entitydefs
if filter(lambda e: e not in entitydefs.keys(),
re.findall(r'&(\w+);',str)): return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith('rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.hasTitle = 0
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.hasTitle = 0
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.hasTitle = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
if attrsD.get('rel')=='enclosure' and not context.get('id'):
context['id'] = attrsD.get('href')
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
if self.svgOK: return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK: return
value = self.popContent('title')
if not value: return
context = self._getContext()
self.hasTitle = 1
_end_dc_title = _end_title
def _end_media_title(self):
hasTitle = self.hasTitle
self._end_title()
self.hasTitle = hasTitle
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel']='enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href and not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.hasTitle = 0
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if not context['media_thumbnail'][-1].has_key('url'):
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding, type):
self.encoding = encoding
self.type = type
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def parse_starttag(self,i):
j=sgmllib.SGMLParser.parse_starttag(self, i)
if self.type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs: return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if type(value) != type(u''):
try:
value = unicode(value, self.encoding)
except:
value = unicode(value, 'iso-8859-1')
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs=strattrs.encode(self.encoding)
except:
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = unichr(int(ref[1:],16))
else:
value = unichr(int(ref))
if value in _cp1252.keys():
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if name2codepoint.has_key(ref):
self.pieces.append('&%(ref)s;' % locals())
else:
self.pieces.append('&%(ref)s' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_data, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if type(data) == type(u''):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if type(s) in (type(''), type(u'')):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple: return []
elif iPropertyType == self.STRING: return ''
elif iPropertyType == self.DATE: return None
elif iPropertyType == self.URI: return ''
elif iPropertyType == self.NODE: return None
else: return None
arValues = []
for elmResult in arResults:
sValue = None
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a': sValue = elmResult.get('href')
elif sNodeName == 'img': sValue = elmResult.get('src')
elif sNodeName == 'object': sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue: continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or ''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
elmAgent['class'] = ''
elmAgent.contents = []
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = ['BEGIN:vCard','VERSION:3.0'] + arLines + ['END:vCard']
sVCards += '\n'.join(arLines) + '\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if not attrsD.has_key('href'): return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1: return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href: continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
tag = segments.pop()
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', '').split()
xfn_rels = []
for rel in rels:
if rel in self.known_xfn_relationships:
xfn_rels.append(rel)
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup: return
if _debug: sys.stderr.write('entering _parseMicroformats\n')
p = _MicroformatsParser(htmlSource, baseURI, encoding)
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding, type):
_BaseHTMLProcessor.__init__(self, encoding, type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
if _debug:
sys.stderr.write('tag: [%s] with attributes: [%s]\n' % (tag, str(attrs)))
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, type):
if _debug:
sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding, type)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'article',
'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas',
'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command',
'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir',
'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', 'figure', 'footer',
'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i',
'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map',
'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup',
'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', 'sub',
'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead',
'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang']
unacceptable_elements_with_end_tag = ['script', 'applet', 'style']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan']
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value: clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK: self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def _sanitizeHTML(htmlSource, encoding, type):
p = _HTMLSanitizer(encoding, type)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# iri support
try:
if isinstance(url_file_stream_or_string,unicode):
url_file_stream_or_string = url_file_stream_or_string.encode('idna')
else:
url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna')
except:
pass
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if type(modified) == type(''):
modified = _parse_date(modified)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
dow, year, month, day, hour, minute, second, tz = \
_my_date_pattern.search(aDateString).groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
# some feeds claim to be gb2312 but are actually gb18030.
# apparently MSIE and Firefox both do the following switch:
if true_encoding.lower() == 'gb2312':
true_encoding = 'gb18030'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
start = re.search('<\w',data)
start = start and start.start() or -1
head,data = data[:start+1], data[start+1:]
entity_pattern = re.compile(r'^\s*<!ENTITY([^>]*?)>', re.MULTILINE)
entity_results=entity_pattern.findall(head)
head = entity_pattern.sub('', head)
doctype_pattern = re.compile(r'^\s*<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(head)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=''
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement='<!DOCTYPE feed [\n <!ENTITY %s>\n]>' % '>\n <!ENTITY '.join(safe_entities)
data = doctype_pattern.sub(replacement, head) + data
return version, data, dict(replacement and safe_pattern.findall(replacement))
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
etag = info.getheader('ETag')
if etag:
result['etag'] = etag
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
if data is not None:
result['version'], data, entities = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if data is None:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and ('iso-8859-2' not in tried_encodings):
try:
proposed_encoding = 'iso-8859-2'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '', entities)
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
class Serializer:
def __init__(self, results):
self.results = results
class TextSerializer(Serializer):
def write(self, stream=sys.stdout):
self._writer(stream, self.results, '')
def _writer(self, stream, node, prefix):
if not node: return
if hasattr(node, 'keys'):
keys = node.keys()
keys.sort()
for k in keys:
if k in ('description', 'link'): continue
if node.has_key(k + '_detail'): continue
if node.has_key(k + '_parsed'): continue
self._writer(stream, node[k], prefix + k + '.')
elif type(node) == types.ListType:
index = 0
for n in node:
self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].')
index += 1
else:
try:
s = str(node).encode('utf-8')
s = s.replace('\\', '\\\\')
s = s.replace('\r', '')
s = s.replace('\n', r'\n')
stream.write(prefix[:-1])
stream.write('=')
stream.write(s)
stream.write('\n')
except:
pass
class PprintSerializer(Serializer):
def write(self, stream=sys.stdout):
if self.results.has_key('href'):
stream.write(self.results['href'] + '\n\n')
from pprint import pprint
pprint(self.results, stream)
stream.write('\n')
if __name__ == '__main__':
try:
from optparse import OptionParser
except:
OptionParser = None
if OptionParser:
optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-")
optionParser.set_defaults(format="pprint")
optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs")
optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs")
optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs")
optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)")
optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)")
optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr")
(options, urls) = optionParser.parse_args()
if options.verbose:
_debug = 1
if not urls:
optionParser.print_help()
sys.exit(0)
else:
if not sys.argv[1:]:
print __doc__
sys.exit(0)
class _Options:
etag = modified = agent = referrer = None
format = 'pprint'
options = _Options()
urls = sys.argv[1:]
zopeCompatibilityHack()
serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer)
for url in urls:
results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer)
serializer(results).write(sys.stdout)
| Gschlotter/planet.opensuse.org | rawdoglib/feedparser.py | Python | gpl-2.0 | 157,078 | [
"NetCDF",
"VisIt"
] | 15bcb939012c71e97d3203adbeee8cfc229c0d27d0ca6f904aded437cb25ed3d |
"""Main entry point for distributed next-gen sequencing pipelines.
Handles running the full pipeline based on instructions
"""
import abc
from collections import defaultdict
import copy
import os
import sys
import resource
import tempfile
import toolz as tz
import yaml
from bcbio import log, heterogeneity, hla, structural, utils
from bcbio.distributed import prun
from bcbio.distributed.transaction import tx_tmpdir
from bcbio.log import logger
from bcbio.ngsalign import alignprep
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import (archive, config_utils, disambiguate, region,
run_info, qcsummary, rnaseq)
from bcbio.provenance import profile, system
from bcbio.variation import ensemble, genotype, population, validate, joint
def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None,
parallel=None, workflow=None):
"""Run variant analysis, handling command line options.
"""
workdir = utils.safe_makedir(os.path.abspath(workdir))
os.chdir(workdir)
config, config_file = config_utils.load_system_config(config_file, workdir)
if config.get("log_dir", None) is None:
config["log_dir"] = os.path.join(workdir, "log")
if parallel["type"] in ["local", "clusterk"]:
_setup_resources()
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
elif parallel["type"] == "ipython":
assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)"
if parallel["scheduler"] != "sge":
assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)"
elif not parallel["queue"]:
parallel["queue"] = ""
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
else:
raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
def _setup_resources():
"""Attempt to increase resource limits up to hard limits.
This allows us to avoid out of file handle limits where we can
move beyond the soft limit up to the hard limit.
"""
target_procs = 10240
cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC)
target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc))
cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE)
target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
def _run_toplevel(config, config_file, work_dir, parallel,
fc_dir=None, run_info_yaml=None):
"""
Run toplevel analysis, processing a set of input files.
config_file -- Main YAML configuration file with system parameters
fc_dir -- Directory of fastq files to process
run_info_yaml -- YAML configuration file specifying inputs to process
"""
parallel = log.create_base_logger(config, parallel)
log.setup_local_logging(config, parallel)
dirs = run_info.setup_directories(work_dir, fc_dir, config, config_file)
config_file = os.path.join(dirs["config"], os.path.basename(config_file))
pipelines, config = _pair_samples_with_pipelines(run_info_yaml, config)
system.write_info(dirs, parallel, config)
with tx_tmpdir(config) as tmpdir:
tempfile.tempdir = tmpdir
for pipeline, samples in pipelines.items():
for xs in pipeline.run(config, run_info_yaml, parallel, dirs, samples):
pass
# ## Generic pipeline framework
def _wres(parallel, progs, fresources=None, ensure_mem=None):
"""Add resource information to the parallel environment on required programs and files.
Enables spinning up required machines and operating in non-shared filesystem
environments.
progs -- Third party tools used in processing
fresources -- Required file-based resources needed. These will be transferred on non-shared
filesystems.
ensure_mem -- Dictionary of required minimum memory for programs used. Ensures
enough memory gets allocated on low-core machines.
"""
parallel = copy.deepcopy(parallel)
parallel["progs"] = progs
if fresources:
parallel["fresources"] = fresources
if ensure_mem:
parallel["ensure_mem"] = ensure_mem
return parallel
class AbstractPipeline:
"""
Implement this class to participate in the Pipeline abstraction.
name: the analysis name in the run_info.yaml file:
design:
- analysis: name
run: the steps run to perform the analyses
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def name(self):
return
@abc.abstractmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
return
class _WorldWatcher:
"""Watch changes in the world and output directory and report.
Used to create input files we can feed into CWL creation about
the changed state of the world.
"""
def __init__(self, work_dir, is_on=True):
self._work_dir = work_dir
self._is_on = is_on
if not self._is_on:
return
self._out_dir = utils.safe_makedir(os.path.join(work_dir, "world2cwl"))
self._lworld = {}
self._lfiles = set([])
def _find_files(self):
out = []
for (dir, _, files) in os.walk(self._work_dir):
out += [os.path.join(dir, f).replace(self._work_dir + "/", "") for f in files]
return set(out)
def _items_to_world(self, items):
world = {}
for item in items:
assert len(item) == 1
world[dd.get_sample_name(item[0])] = item[0]
return world
def _compare_dicts(self, orig, new, ns):
out = {}
for key, val in new.items():
nskey = ns + [key]
orig_val = tz.get_in([key], orig)
if isinstance(val, dict) and isinstance(orig_val, dict):
for nkey, nval in self._compare_dicts(orig_val or {}, val or {}, nskey).items():
out = tz.update_in(out, [nkey], lambda x: nval)
elif val != orig_val:
print nskey, val, orig_val
out = tz.update_in(out, nskey, lambda x: val)
return out
def initialize(self, world):
if not self._is_on:
return
self._lfiles = self._find_files()
self._lworld = self._items_to_world(world)
def report(self, step, world):
if not self._is_on:
return
new_files = self._find_files()
file_changes = new_files - self._lfiles
self._lfiles = new_files
world_changes = self._compare_dicts(self._lworld, self._items_to_world(world), [])
self._lworld = world
import pprint
print step
pprint.pprint(file_changes)
pprint.pprint(world_changes)
class Variant2Pipeline(AbstractPipeline):
"""Streamlined variant calling pipeline for large files.
This is less generalized but faster in standard cases.
The goal is to replace the base variant calling approach.
"""
name = "variant2"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
with prun.start(_wres(parallel, ["aligner", "samtools", "sambamba"],
(["reference", "fasta"], ["reference", "aligner"], ["files"])),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
ww = _WorldWatcher(dirs["work"], is_on=any([dd.get_cwl_reporting(d[0]) for d in samples]))
ww.initialize(samples)
with profile.report("alignment preparation", dirs):
samples = run_parallel("prep_align_inputs", samples)
ww.report("prep_align_inputs", samples)
samples = run_parallel("disambiguate_split", [samples])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
samples = disambiguate.resolve(samples, run_parallel)
samples = alignprep.merge_split_alignments(samples, run_parallel)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = region.clean_sample_data(samples)
with profile.report("structural variation initial", dirs):
samples = structural.run(samples, run_parallel, "initial")
with profile.report("hla typing", dirs):
samples = hla.run(samples, run_parallel)
## Variant calling on sub-regions of the input file (full cluster)
with prun.start(_wres(parallel, ["gatk", "picard", "variantcaller"]),
samples, config, dirs, "full",
multiplier=region.get_max_counts(samples), max_multicore=1) as run_parallel:
with profile.report("alignment post-processing", dirs):
samples = region.parallel_prep_region(samples, run_parallel)
with profile.report("variant calling", dirs):
samples = genotype.parallel_variantcall_region(samples, run_parallel)
## Finalize variants, BAMs and population databases (per-sample multicore cluster)
with prun.start(_wres(parallel, ["gatk", "gatk-vqsr", "snpeff", "bcbio_variation",
"gemini", "samtools", "fastqc", "bamtools",
"bcbio-variation-recall", "qsignature",
"svcaller"]),
samples, config, dirs, "multicore2",
multiplier=structural.parallel_multiplier(samples)) as run_parallel:
with profile.report("joint squaring off/backfilling", dirs):
samples = joint.square_off(samples, run_parallel)
with profile.report("variant post-processing", dirs):
samples = run_parallel("postprocess_variants", samples)
samples = run_parallel("split_variants_by_sample", samples)
with profile.report("prepped BAM merging", dirs):
samples = region.delayed_bamprep_merge(samples, run_parallel)
with profile.report("validation", dirs):
samples = run_parallel("compare_to_rm", samples)
samples = genotype.combine_multiple_callers(samples)
with profile.report("ensemble calling", dirs):
samples = ensemble.combine_calls_parallel(samples, run_parallel)
with profile.report("validation summary", dirs):
samples = validate.summarize_grading(samples)
with profile.report("structural variation final", dirs):
samples = structural.run(samples, run_parallel, "standard")
with profile.report("structural variation ensemble", dirs):
samples = structural.run(samples, run_parallel, "ensemble")
with profile.report("structural variation validation", dirs):
samples = run_parallel("validate_sv", samples)
with profile.report("heterogeneity", dirs):
samples = heterogeneity.run(samples, run_parallel)
with profile.report("population database", dirs):
samples = population.prep_db_parallel(samples, run_parallel)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("archive", dirs):
samples = archive.compress(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def _debug_samples(i, samples):
print "---", i, len(samples)
for sample in (x[0] for x in samples):
print " ", sample["description"], sample.get("region"), \
utils.get_in(sample, ("config", "algorithm", "variantcaller")), \
utils.get_in(sample, ("config", "algorithm", "jointcaller")), \
utils.get_in(sample, ("metadata", "batch")), \
[x.get("variantcaller") for x in sample.get("variants", [])], \
sample.get("work_bam"), \
sample.get("vrn_file")
class SNPCallingPipeline(Variant2Pipeline):
"""Back compatible: old name for variant analysis.
"""
name = "SNP calling"
class VariantPipeline(Variant2Pipeline):
"""Back compatibility; old name
"""
name = "variant"
class StandardPipeline(AbstractPipeline):
"""Minimal pipeline with alignment and QC.
"""
name = "Standard"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
with prun.start(_wres(parallel, ["aligner", "samtools", "sambamba"]),
samples, config, dirs, "multicore") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = region.clean_sample_data(samples)
## Quality control
with prun.start(_wres(parallel, ["fastqc", "bamtools", "qsignature", "kraken", "gatk", "samtools"]),
samples, config, dirs, "multicore2") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
class MinimalPipeline(StandardPipeline):
name = "Minimal"
class SailfishPipeline(AbstractPipeline):
name = "sailfish"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["picard", "cutadapt"]),
samples, config, dirs, "trimming") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("adapter trimming", dirs):
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_sample", samples)
with prun.start(_wres(parallel, ["sailfish"]), samples, config, dirs,
"sailfish") as run_parallel:
with profile.report("sailfish", dirs):
samples = run_parallel("run_sailfish", samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
return samples
class RnaseqPipeline(AbstractPipeline):
name = "RNA-seq"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["picard", "cutadapt"]),
samples, config, dirs, "trimming", max_multicore=1) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("adapter trimming", dirs):
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_sample", samples)
with prun.start(_wres(parallel, ["aligner", "picard"],
ensure_mem={"tophat": 10, "tophat2": 10, "star": 2, "hisat2": 8}),
samples, config, dirs, "alignment",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["samtools", "cufflinks"]),
samples, config, dirs, "rnaseqcount") as run_parallel:
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
with profile.report("transcript assembly", dirs):
samples = rnaseq.assemble_transcripts(run_parallel, samples)
with profile.report("estimate expression (threaded)", dirs):
samples = rnaseq.quantitate_expression_parallel(samples, run_parallel)
with prun.start(_wres(parallel, ["dexseq", "express"]), samples, config,
dirs, "rnaseqcount-singlethread", max_multicore=1) as run_parallel:
with profile.report("estimate expression (single threaded)", dirs):
samples = rnaseq.quantitate_expression_noparallel(samples, run_parallel)
samples = rnaseq.combine_files(samples)
with prun.start(_wres(parallel, ["gatk"]), samples, config,
dirs, "rnaseq-variation") as run_parallel:
with profile.report("RNA-seq variant calling", dirs):
samples = rnaseq.rnaseq_variant_calling(samples, run_parallel)
with prun.start(_wres(parallel, ["samtools", "fastqc", "qualimap",
"kraken", "gatk"], ensure_mem={"qualimap": 4}),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
class smallRnaseqPipeline(AbstractPipeline):
name = "smallRNA-seq"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
# causes a circular import at the top level
from bcbio.srna.group import report as srna_report
with prun.start(_wres(parallel, ["picard", "cutadapt"]),
samples, config, dirs, "trimming") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("adapter trimming", dirs):
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_srna_sample", samples)
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"bowtie": 8, "bowtie2": 8, "star": 2}),
[samples[0]], config, dirs, "alignment") as run_parallel:
with profile.report("prepare", dirs):
samples = run_parallel("seqcluster_prepare", [samples])
with profile.report("alignment", dirs):
samples = run_parallel("srna_alignment", [samples])
with prun.start(_wres(parallel, ["picard", "miraligner"]),
samples, config, dirs, "annotation") as run_parallel:
with profile.report("small RNA annotation", dirs):
samples = run_parallel("srna_annotation", samples)
with prun.start(_wres(parallel, ["seqcluster"],
ensure_mem={"seqcluster": 8}),
[samples[0]], config, dirs, "cluster") as run_parallel:
with profile.report("cluster", dirs):
samples = run_parallel("seqcluster_cluster", [samples])
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("report", dirs):
srna_report(samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
return samples
class ChipseqPipeline(AbstractPipeline):
name = "chip-seq"
@classmethod
def run(self, config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["aligner", "picard"]),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_sample", samples)
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "persample") as run_parallel:
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
samples = run_parallel("clean_chipseq_alignment", samples)
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
return samples
def _get_pipeline(item):
from bcbio.log import logger
SUPPORTED_PIPELINES = {x.name.lower(): x for x in
utils.itersubclasses(AbstractPipeline)}
analysis_type = item.get("analysis", "").lower()
if analysis_type not in SUPPORTED_PIPELINES:
logger.error("Cannot determine which type of analysis to run, "
"set in the run_info under details.")
sys.exit(1)
else:
return SUPPORTED_PIPELINES[analysis_type]
def _pair_samples_with_pipelines(run_info_yaml, config):
"""Map samples defined in input file to pipelines to run.
"""
with open(run_info_yaml) as in_handle:
samples = yaml.safe_load(in_handle)
if isinstance(samples, dict):
resources = samples.pop("resources", {})
samples = samples["details"]
else:
resources = {}
ready_samples = []
for sample in samples:
if "files" in sample:
del sample["files"]
# add any resources to this item to recalculate global configuration
usample = copy.deepcopy(sample)
usample.pop("algorithm", None)
if "resources" not in usample:
usample["resources"] = {}
for prog, pkvs in resources.iteritems():
if prog not in usample["resources"]:
usample["resources"][prog] = {}
for key, val in pkvs.iteritems():
usample["resources"][prog][key] = val
config = config_utils.update_w_custom(config, usample)
sample["resources"] = {}
ready_samples.append(sample)
paired = [(x, _get_pipeline(x)) for x in ready_samples]
d = defaultdict(list)
for x in paired:
d[x[1]].append([x[0]])
return d, config
| lpantano/bcbio-nextgen | bcbio/pipeline/main.py | Python | mit | 25,515 | [
"Bowtie"
] | a741da6c0a431823979a5044f5f96e88fd01c9ad376ffa7793effe0f6452d4b9 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007-2008 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
import datetime
from decimal import Decimal
import os
from kiwi.component import get_utility
from stoqlib.domain.devices import FiscalDayHistory, FiscalDayTax
from stoqlib.domain.test.domaintest import DomainTest
from stoqlib.lib.diffutils import diff_files
from stoqlib.lib.interfaces import IAppInfo
from stoqlib.lib.unittestutils import get_tests_datadir
from ecf.cat52 import CATFile
from ecf.catgenerator import async
from ecf.ecfdomain import ECFPrinter, FiscalSaleHistory
def compare_files(sfile, basename):
expected = basename + '-expected.txt'
output = basename + '-output.txt'
sfile.write(output)
expected = get_tests_datadir('plugins', expected)
diff = diff_files(expected, output)
os.unlink(output)
return diff
class Cat52Test(DomainTest):
def test_complete(self):
station = self.create_station()
today = datetime.date(2007, 1, 1)
reduction_date = datetime.datetime(2007, 1, 1, 23, 59)
day = FiscalDayHistory(store=self.store,
emission_date=today,
station=station,
serial=u'serial',
serial_id=1,
coupon_start=1,
coupon_end=23,
crz=18,
cro=25,
period_total=Decimal("456.00"),
total=Decimal("123141.00"),
reduction_date=reduction_date)
for code, value, type in [(u'2500', Decimal("123.00"), u'ICMS'),
(u'F', Decimal("789.00"), u'ICMS')]:
FiscalDayTax(fiscal_day_history=day, code=code,
value=value, type=type,
store=self.store)
printer = ECFPrinter(
store=self.store,
model=u'FS345',
brand=u'daruma',
device_name=u'test',
device_serial=u'serial',
baudrate=9600,
station=station,
user_number=1,
register_date=today,
register_cro=1,
)
f = CATFile(printer)
f.software_version = '6.6.6' # kiko sends <3
appinfo = get_utility(IAppInfo)
f.add_software_house(async, appinfo.get('name'),
appinfo.get('version'))
# Cant call add_ecf_identification, since it depends on a
# conected printer
# f.add_ecf_identification()
for item in self.store.find(FiscalDayHistory):
f.add_z_reduction(item)
for i, tax in enumerate(item.taxes):
f.add_z_reduction_details(item, tax, i + 1)
sale = self.create_sale()
sale.client = self.create_client()
sale.confirm_date = today
sellable = self.add_product(sale, price=100)
sellable.code = u'09999'
self.add_payments(sale)
history = FiscalSaleHistory(store=self.store,
sale=sale)
f.add_fiscal_coupon(sale, sale.client, history)
for i, item in enumerate(sale.get_items()):
f.add_fiscal_coupon_details(sale, sale.client, history,
item, 800, i + 1)
for payment in sale.payments:
f.add_payment_method(sale, history, payment)
diff = compare_files(f, 'cat52')
self.failIf(diff, '%s\n%s' % ("Files differ, output:", diff))
| andrebellafronte/stoq | plugins/ecf/test/test_cat52.py | Python | gpl-2.0 | 4,403 | [
"VisIt"
] | 9d7be635ec9f7b4cc6a53dd63f32b6cc372ab3e5fc4649c6e8fdb6c21d48f882 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a core class LammpsData for generating/parsing
LAMMPS data file, and other bridging classes to build LammpsData from
molecules. This module also implements a subclass CombinedData for
merging LammpsData object.
Only point particle styles are supported for now (atom_style in angle,
atomic, bond, charge, full and molecular only). See the pages below for
more info.
http://lammps.sandia.gov/doc/atom_style.html
http://lammps.sandia.gov/doc/read_data.html
"""
import itertools
import re
import warnings
from collections import OrderedDict
from io import StringIO
from pathlib import Path
import numpy as np
import pandas as pd
from monty.dev import deprecated
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.core import yaml
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from pymatgen.core.operations import SymmOp
from pymatgen.util.io_utils import clean_lines
__author__ = "Kiran Mathew, Zhi Deng, Tingzheng Hou"
__copyright__ = "Copyright 2018, The Materials Virtual Lab"
__version__ = "2.0"
__maintainer__ = "Tingzheng Hou"
__email__ = "tingzheng_hou@berkeley.edu"
__date__ = "May 29, 2021"
MODULE_DIR = Path(__file__).resolve().parent
SECTION_KEYWORDS = {
"atom": [
"Atoms",
"Velocities",
"Masses",
"Ellipsoids",
"Lines",
"Triangles",
"Bodies",
],
"topology": ["Bonds", "Angles", "Dihedrals", "Impropers"],
"ff": [
"Pair Coeffs",
"PairIJ Coeffs",
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
],
"class2": [
"BondBond Coeffs",
"BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs",
"AngleAngle Coeffs",
],
}
CLASS2_KEYWORDS = {
"Angle Coeffs": ["BondBond Coeffs", "BondAngle Coeffs"],
"Dihedral Coeffs": [
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs",
],
"Improper Coeffs": ["AngleAngle Coeffs"],
}
SECTION_HEADERS = {
"Masses": ["mass"],
"Velocities": ["vx", "vy", "vz"],
"Bonds": ["type", "atom1", "atom2"],
"Angles": ["type", "atom1", "atom2", "atom3"],
"Dihedrals": ["type", "atom1", "atom2", "atom3", "atom4"],
"Impropers": ["type", "atom1", "atom2", "atom3", "atom4"],
}
ATOMS_HEADERS = {
"angle": ["molecule-ID", "type", "x", "y", "z"],
"atomic": ["type", "x", "y", "z"],
"bond": ["molecule-ID", "type", "x", "y", "z"],
"charge": ["type", "q", "x", "y", "z"],
"full": ["molecule-ID", "type", "q", "x", "y", "z"],
"molecular": ["molecule-ID", "type", "x", "y", "z"],
}
class LammpsBox(MSONable):
"""
Object for representing a simulation box in LAMMPS settings.
"""
def __init__(self, bounds, tilt=None):
"""
Args:
bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
"""
bounds_arr = np.array(bounds)
assert bounds_arr.shape == (
3,
2,
), f"Expecting a (3, 2) array for bounds, got {bounds_arr.shape}"
self.bounds = bounds_arr.tolist()
matrix = np.diag(bounds_arr[:, 1] - bounds_arr[:, 0])
self.tilt = None
if tilt is not None:
tilt_arr = np.array(tilt)
assert tilt_arr.shape == (3,), f"Expecting a (3,) array for box_tilt, got {tilt_arr.shape}"
self.tilt = tilt_arr.tolist()
matrix[1, 0] = tilt_arr[0]
matrix[2, 0] = tilt_arr[1]
matrix[2, 1] = tilt_arr[2]
self._matrix = matrix
def __str__(self):
return self.get_string()
def __repr__(self):
return self.get_string()
@property
def volume(self):
"""
Volume of simulation box.
"""
m = self._matrix
return np.dot(np.cross(m[0], m[1]), m[2])
def get_string(self, significant_figures=6):
"""
Returns the string representation of simulation box in LAMMPS
data file format.
Args:
significant_figures (int): No. of significant figures to
output for box settings. Default to 6.
Returns:
String representation
"""
ph = "{:.%df}" % significant_figures
lines = []
for bound, d in zip(self.bounds, "xyz"):
fillers = bound + [d] * 2
bound_format = " ".join([ph] * 2 + [" {}lo {}hi"])
lines.append(bound_format.format(*fillers))
if self.tilt:
tilt_format = " ".join([ph] * 3 + [" xy xz yz"])
lines.append(tilt_format.format(*self.tilt))
return "\n".join(lines)
def get_box_shift(self, i):
"""
Calculates the coordinate shift due to PBC.
Args:
i: A (n, 3) integer array containing the labels for box
images of n entries.
Returns:
Coorindate shift array with the same shape of i
"""
return np.inner(i, self._matrix.T)
def to_lattice(self):
"""
Converts the simulation box to a more powerful Lattice backend.
Note that Lattice is always periodic in 3D space while a
simulation box is not necessarily periodic in all dimensions.
Returns:
Lattice
"""
return Lattice(self._matrix)
def lattice_2_lmpbox(lattice, origin=(0, 0, 0)):
"""
Converts a lattice object to LammpsBox, and calculates the symmetry
operation used.
Args:
lattice (Lattice): Input lattice.
origin: A (3,) array/list of floats setting lower bounds of
simulation box. Default to (0, 0, 0).
Returns:
LammpsBox, SymmOp
"""
a, b, c = lattice.abc
xlo, ylo, zlo = origin
xhi = a + xlo
m = lattice.matrix
xy = np.dot(m[1], m[0] / a)
yhi = np.sqrt(b ** 2 - xy ** 2) + ylo
xz = np.dot(m[2], m[0] / a)
yz = (np.dot(m[1], m[2]) - xy * xz) / (yhi - ylo)
zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2) + zlo
tilt = None if lattice.is_orthogonal else [xy, xz, yz]
rot_matrix = np.linalg.solve([[xhi - xlo, 0, 0], [xy, yhi - ylo, 0], [xz, yz, zhi - zlo]], m)
bounds = [[xlo, xhi], [ylo, yhi], [zlo, zhi]]
symmop = SymmOp.from_rotation_and_translation(rot_matrix, origin)
return LammpsBox(bounds, tilt), symmop
class LammpsData(MSONable):
"""
Object for representing the data in a LAMMPS data file.
"""
def __init__(
self,
box,
masses,
atoms,
velocities=None,
force_field=None,
topology=None,
atom_style="full",
):
"""
This is a low level constructor designed to work with parsed
data or other bridging objects (ForceField and Topology). Not
recommended to use directly.
Args:
box (LammpsBox): Simulation box.
masses (pandas.DataFrame): DataFrame with one column
["mass"] for Masses section.
atoms (pandas.DataFrame): DataFrame with multiple columns
for Atoms section. Column names vary with atom_style.
velocities (pandas.DataFrame): DataFrame with three columns
["vx", "vy", "vz"] for Velocities section. Optional
with default to None. If not None, its index should be
consistent with atoms.
force_field (dict): Data for force field sections. Optional
with default to None. Only keywords in force field and
class 2 force field are valid keys, and each value is a
DataFrame.
topology (dict): Data for topology sections. Optional with
default to None. Only keywords in topology are valid
keys, and each value is a DataFrame.
atom_style (str): Output atom_style. Default to "full".
"""
if velocities is not None:
assert len(velocities) == len(atoms), "Inconsistency found between atoms and velocities"
if force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
force_field = {k: v for k, v in force_field.items() if k in all_ff_kws}
if topology:
topology = {k: v for k, v in topology.items() if k in SECTION_KEYWORDS["topology"]}
self.box = box
self.masses = masses
self.atoms = atoms
self.velocities = velocities
self.force_field = force_field
self.topology = topology
self.atom_style = atom_style
def __str__(self):
return self.get_string()
def __repr__(self):
return self.get_string()
@property
def structure(self):
"""
Exports a periodic structure object representing the simulation
box.
Return:
Structure
"""
masses = self.masses
atoms = self.atoms.copy()
if "nx" in atoms.columns:
atoms.drop(["nx", "ny", "nz"], axis=1, inplace=True)
atoms["molecule-ID"] = 1
ld_copy = self.__class__(self.box, masses, atoms)
topologies = ld_copy.disassemble()[-1]
molecule = topologies[0].sites
coords = molecule.cart_coords - np.array(self.box.bounds)[:, 0]
species = molecule.species
latt = self.box.to_lattice()
site_properties = {}
if "q" in atoms:
site_properties["charge"] = atoms["q"].values
if self.velocities is not None:
site_properties["velocities"] = self.velocities.values
return Structure(
latt,
species,
coords,
coords_are_cartesian=True,
site_properties=site_properties,
)
def get_string(self, distance=6, velocity=8, charge=4, hybrid=True):
"""
Returns the string representation of LammpsData, essentially
the string to be written to a file. Support hybrid style
coeffs read and write.
Args:
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 4.
hybrid (bool): Whether to write hybrid coeffs types.
Default to True. If the data object has no hybrid
coeffs types and has large coeffs section, one may
use False to speedup the process. Otherwise the
default is recommended.
Returns:
String representation
"""
file_template = """Generated by pymatgen.io.lammps.data.LammpsData
{stats}
{box}
{body}
"""
box = self.box.get_string(distance)
body_dict = OrderedDict()
body_dict["Masses"] = self.masses
types = OrderedDict()
types["atom"] = len(self.masses)
if self.force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
ff_kws = [k for k in all_ff_kws if k in self.force_field]
for kw in ff_kws:
body_dict[kw] = self.force_field[kw]
if kw in SECTION_KEYWORDS["ff"][2:]:
types[kw.lower()[:-7]] = len(self.force_field[kw])
body_dict["Atoms"] = self.atoms
counts = OrderedDict()
counts["atoms"] = len(self.atoms)
if self.velocities is not None:
body_dict["Velocities"] = self.velocities
if self.topology:
for kw in SECTION_KEYWORDS["topology"]:
if kw in self.topology:
body_dict[kw] = self.topology[kw]
counts[kw.lower()] = len(self.topology[kw])
all_stats = list(counts.values()) + list(types.values())
stats_template = "{:>%d} {}" % len(str(max(all_stats)))
count_lines = [stats_template.format(v, k) for k, v in counts.items()]
type_lines = [stats_template.format(v, k + " types") for k, v in types.items()]
stats = "\n".join(count_lines + [""] + type_lines)
def map_coords(q):
return ("{:.%df}" % distance).format(q)
def map_velos(q):
return ("{:.%df}" % velocity).format(q)
def map_charges(q):
return ("{:.%df}" % charge).format(q)
float_format = "{:.9f}".format
float_format_2 = "{:.1f}".format
int_format = "{:.0f}".format
default_formatters = {
"x": map_coords,
"y": map_coords,
"z": map_coords,
"vx": map_velos,
"vy": map_velos,
"vz": map_velos,
"q": map_charges,
}
coeffsdatatype = loadfn(str(MODULE_DIR / "CoeffsDataType.yaml"))
coeffs = {}
for style, types in coeffsdatatype.items():
coeffs[style] = {}
for type, formatter in types.items():
coeffs[style][type] = {}
for coeff, datatype in formatter.items():
if datatype == "int_format":
coeffs[style][type][coeff] = int_format
elif datatype == "float_format_2":
coeffs[style][type][coeff] = float_format_2
else:
coeffs[style][type][coeff] = float_format
section_template = "{kw}\n\n{df}\n"
parts = []
for k, v in body_dict.items():
index = k != "PairIJ Coeffs"
if (
k
in [
"Bond Coeffs",
"Angle Coeffs",
"Dihedral Coeffs",
"Improper Coeffs",
]
and hybrid
):
listofdf = np.array_split(v, len(v.index))
df_string = ""
for i, df in enumerate(listofdf):
if isinstance(df.iloc[0]["coeff1"], str):
try:
formatters = {
**default_formatters,
**coeffs[k][df.iloc[0]["coeff1"]],
}
except KeyError:
formatters = default_formatters
line_string = df.to_string(
header=False,
formatters=formatters,
index_names=False,
index=index,
na_rep="",
)
else:
line_string = v.to_string(
header=False,
formatters=default_formatters,
index_names=False,
index=index,
na_rep="",
).splitlines()[i]
df_string += line_string.replace("nan", "").rstrip() + "\n"
else:
df_string = v.to_string(
header=False,
formatters=default_formatters,
index_names=False,
index=index,
na_rep="",
)
parts.append(section_template.format(kw=k, df=df_string))
body = "\n".join(parts)
return file_template.format(stats=stats, box=box, body=body)
def write_file(self, filename, distance=6, velocity=8, charge=4):
"""
Writes LammpsData to file.
Args:
filename (str): Filename.
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 4.
"""
with open(filename, "w") as f:
f.write(self.get_string(distance=distance, velocity=velocity, charge=charge))
def disassemble(self, atom_labels=None, guess_element=True, ff_label="ff_map"):
"""
Breaks down LammpsData to building blocks
(LammpsBox, ForceField and a series of Topology).
RESTRICTIONS APPLIED:
1. No complex force field defined not just on atom
types, where the same type or equivalent types of topology
may have more than one set of coefficients.
2. No intermolecular topologies (with atoms from different
molecule-ID) since a Topology object includes data for ONE
molecule or structure only.
Args:
atom_labels ([str]): List of strings (must be different
from one another) for labelling each atom type found in
Masses section. Default to None, where the labels are
automaticaly added based on either element guess or
dummy specie assignment.
guess_element (bool): Whether to guess the element based on
its atomic mass. Default to True, otherwise dummy
species "Qa", "Qb", ... will be assigned to various
atom types. The guessed or assigned elements will be
reflected on atom labels if atom_labels is None, as
well as on the species of molecule in each Topology.
ff_label (str): Site property key for labeling atoms of
different types. Default to "ff_map".
Returns:
LammpsBox, ForceField, [Topology]
"""
atoms_df = self.atoms.copy()
if "nx" in atoms_df.columns:
atoms_df[["x", "y", "z"]] += self.box.get_box_shift(atoms_df[["nx", "ny", "nz"]].values)
atoms_df = pd.concat([atoms_df, self.velocities], axis=1)
mids = atoms_df.get("molecule-ID")
if mids is None:
unique_mids = [1]
data_by_mols = {1: {"Atoms": atoms_df}}
else:
unique_mids = np.unique(mids)
data_by_mols = {}
for k in unique_mids:
df = atoms_df[atoms_df["molecule-ID"] == k]
data_by_mols[k] = {"Atoms": df}
masses = self.masses.copy()
masses["label"] = atom_labels
unique_masses = np.unique(masses["mass"])
if guess_element:
ref_masses = [el.atomic_mass.real for el in Element]
diff = np.abs(np.array(ref_masses) - unique_masses[:, None])
atomic_numbers = np.argmin(diff, axis=1) + 1
symbols = [Element.from_Z(an).symbol for an in atomic_numbers]
else:
symbols = ["Q%s" % a for a in map(chr, range(97, 97 + len(unique_masses)))]
for um, s in zip(unique_masses, symbols):
masses.loc[masses["mass"] == um, "element"] = s
if atom_labels is None: # add unique labels based on elements
for el, vc in masses["element"].value_counts().iteritems():
masses.loc[masses["element"] == el, "label"] = ["%s%d" % (el, c) for c in range(1, vc + 1)]
assert masses["label"].nunique(dropna=False) == len(masses), "Expecting unique atom label for each type"
mass_info = [tuple([r["label"], r["mass"]]) for _, r in masses.iterrows()]
nonbond_coeffs, topo_coeffs = None, None
if self.force_field:
if "PairIJ Coeffs" in self.force_field:
nbc = self.force_field["PairIJ Coeffs"]
nbc = nbc.sort_values(["id1", "id2"]).drop(["id1", "id2"], axis=1)
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
elif "Pair Coeffs" in self.force_field:
nbc = self.force_field["Pair Coeffs"].sort_index()
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
topo_coeffs = {k: [] for k in SECTION_KEYWORDS["ff"][2:] if k in self.force_field}
for kw in topo_coeffs.keys():
class2_coeffs = {
k: list(v.itertuples(False, None))
for k, v in self.force_field.items()
if k in CLASS2_KEYWORDS.get(kw, [])
}
ff_df = self.force_field[kw]
for t in ff_df.itertuples(True, None):
d = {"coeffs": list(t[1:]), "types": []}
if class2_coeffs:
d.update({k: list(v[t[0] - 1]) for k, v in class2_coeffs.items()})
topo_coeffs[kw].append(d)
if self.topology:
def label_topo(t):
return tuple(masses.loc[atoms_df.loc[t, "type"], "label"])
for k, v in self.topology.items():
ff_kw = k[:-1] + " Coeffs"
for topo in v.itertuples(False, None):
topo_idx = topo[0] - 1
indices = list(topo[1:])
mids = atoms_df.loc[indices]["molecule-ID"].unique()
assert (
len(mids) == 1
), "Do not support intermolecular topology formed by atoms with different molecule-IDs"
label = label_topo(indices)
topo_coeffs[ff_kw][topo_idx]["types"].append(label)
if data_by_mols[mids[0]].get(k):
data_by_mols[mids[0]][k].append(indices)
else:
data_by_mols[mids[0]][k] = [indices]
if topo_coeffs:
for v in topo_coeffs.values():
for d in v:
d["types"] = list(set(d["types"]))
ff = ForceField(mass_info=mass_info, nonbond_coeffs=nonbond_coeffs, topo_coeffs=topo_coeffs)
topo_list = []
for mid in unique_mids:
data = data_by_mols[mid]
atoms = data["Atoms"]
shift = min(atoms.index)
type_ids = atoms["type"]
species = masses.loc[type_ids, "element"]
labels = masses.loc[type_ids, "label"]
coords = atoms[["x", "y", "z"]]
m = Molecule(species.values, coords.values, site_properties={ff_label: labels.values})
charges = atoms.get("q")
velocities = atoms[["vx", "vy", "vz"]] if "vx" in atoms.columns else None
topologies = {}
for kw in SECTION_KEYWORDS["topology"]:
if data.get(kw):
topologies[kw] = (np.array(data[kw]) - shift).tolist()
topologies = None if not topologies else topologies
topo_list.append(
Topology(
sites=m,
ff_label=ff_label,
charges=charges,
velocities=velocities,
topologies=topologies,
)
)
return self.box, ff, topo_list
@classmethod
def from_file(cls, filename, atom_style="full", sort_id=False):
"""
Constructor that parses a file.
Args:
filename (str): Filename to read.
atom_style (str): Associated atom_style. Default to "full".
sort_id (bool): Whether sort each section by id. Default to
True.
"""
with open(filename) as f:
lines = f.readlines()
kw_pattern = r"|".join(itertools.chain(*SECTION_KEYWORDS.values()))
section_marks = [i for i, l in enumerate(lines) if re.search(kw_pattern, l)]
parts = np.split(lines, section_marks)
float_group = r"([0-9eE.+-]+)"
header_pattern = {}
header_pattern["counts"] = r"^\s*(\d+)\s+([a-zA-Z]+)$"
header_pattern["types"] = r"^\s*(\d+)\s+([a-zA-Z]+)\s+types$"
header_pattern["bounds"] = r"^\s*{}$".format(r"\s+".join([float_group] * 2 + [r"([xyz])lo \3hi"]))
header_pattern["tilt"] = r"^\s*{}$".format(r"\s+".join([float_group] * 3 + ["xy xz yz"]))
header = {"counts": {}, "types": {}}
bounds = {}
for l in clean_lines(parts[0][1:]): # skip the 1st line
match = None
for k, v in header_pattern.items():
match = re.match(v, l)
if match:
break
if match and k in ["counts", "types"]:
header[k][match.group(2)] = int(match.group(1))
elif match and k == "bounds":
g = match.groups()
bounds[g[2]] = [float(i) for i in g[:2]]
elif match and k == "tilt":
header["tilt"] = [float(i) for i in match.groups()]
header["bounds"] = [bounds.get(i, [-0.5, 0.5]) for i in "xyz"]
box = LammpsBox(header["bounds"], header.get("tilt"))
def parse_section(sec_lines):
title_info = sec_lines[0].split("#", 1)
kw = title_info[0].strip()
sio = StringIO("".join(sec_lines[2:])) # skip the 2nd line
if kw.endswith("Coeffs") and not kw.startswith("PairIJ"):
df_list = [
pd.read_csv(StringIO(line), header=None, comment="#", delim_whitespace=True)
for line in sec_lines[2:]
if line.strip()
]
df = pd.concat(df_list, ignore_index=True)
names = ["id"] + ["coeff%d" % i for i in range(1, df.shape[1])]
else:
df = pd.read_csv(sio, header=None, comment="#", delim_whitespace=True)
if kw == "PairIJ Coeffs":
names = ["id1", "id2"] + ["coeff%d" % i for i in range(1, df.shape[1] - 1)]
df.index.name = None # pylint: disable=E1101
elif kw in SECTION_HEADERS:
names = ["id"] + SECTION_HEADERS[kw]
elif kw == "Atoms":
names = ["id"] + ATOMS_HEADERS[atom_style]
if df.shape[1] == len(names): # pylint: disable=E1101
pass
elif df.shape[1] == len(names) + 3: # pylint: disable=E1101
names += ["nx", "ny", "nz"]
else:
raise ValueError("Format in Atoms section inconsistent with atom_style %s" % atom_style)
else:
raise NotImplementedError("Parser for %s section not implemented" % kw)
df.columns = names
if sort_id:
sort_by = "id" if kw != "PairIJ Coeffs" else ["id1", "id2"]
df.sort_values(sort_by, inplace=True)
if "id" in df.columns:
df.set_index("id", drop=True, inplace=True)
df.index.name = None
return kw, df
err_msg = "Bad LAMMPS data format where "
body = {}
seen_atoms = False
for part in parts[1:]:
name, section = parse_section(part)
if name == "Atoms":
seen_atoms = True
if (
name in ["Velocities"] + SECTION_KEYWORDS["topology"] and not seen_atoms
): # Atoms must appear earlier than these
raise RuntimeError(err_msg + "%s section appears before Atoms section" % name)
body.update({name: section})
err_msg += "Nos. of {} do not match between header and {} section"
assert len(body["Masses"]) == header["types"]["atom"], err_msg.format("atom types", "Masses")
atom_sections = ["Atoms", "Velocities"] if "Velocities" in body else ["Atoms"]
for s in atom_sections:
assert len(body[s]) == header["counts"]["atoms"], err_msg.format("atoms", s)
for s in SECTION_KEYWORDS["topology"]:
if header["counts"].get(s.lower(), 0) > 0:
assert len(body[s]) == header["counts"][s.lower()], err_msg.format(s.lower(), s)
items = {k.lower(): body[k] for k in ["Masses", "Atoms"]}
items["velocities"] = body.get("Velocities")
ff_kws = [k for k in body if k in SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]]
items["force_field"] = {k: body[k] for k in ff_kws} if ff_kws else None
topo_kws = [k for k in body if k in SECTION_KEYWORDS["topology"]]
items["topology"] = {k: body[k] for k in topo_kws} if topo_kws else None
items["atom_style"] = atom_style
items["box"] = box
return cls(**items)
@classmethod
def from_ff_and_topologies(cls, box, ff, topologies, atom_style="full"):
"""
Constructor building LammpsData from a ForceField object and a
list of Topology objects. Do not support intermolecular
topologies since a Topology object includes data for ONE
molecule or structure only.
Args:
box (LammpsBox): Simulation box.
ff (ForceField): ForceField object with data for Masses and
force field sections.
topologies ([Topology]): List of Topology objects with data
for Atoms, Velocities and topology sections.
atom_style (str): Output atom_style. Default to "full".
"""
atom_types = set.union(*[t.species for t in topologies])
assert atom_types.issubset(ff.maps["Atoms"].keys()), "Unknown atom type found in topologies"
items = dict(box=box, atom_style=atom_style, masses=ff.masses, force_field=ff.force_field)
mol_ids, charges, coords, labels = [], [], [], []
v_collector = [] if topologies[0].velocities else None
topo_collector = {"Bonds": [], "Angles": [], "Dihedrals": [], "Impropers": []}
topo_labels = {"Bonds": [], "Angles": [], "Dihedrals": [], "Impropers": []}
for i, topo in enumerate(topologies):
if topo.topologies:
shift = len(labels)
for k, v in topo.topologies.items():
topo_collector[k].append(np.array(v) + shift + 1)
topo_labels[k].extend([tuple(topo.type_by_sites[j] for j in t) for t in v])
if isinstance(v_collector, list):
v_collector.append(topo.velocities)
mol_ids.extend([i + 1] * len(topo.sites))
labels.extend(topo.type_by_sites)
coords.append(topo.sites.cart_coords)
q = [0.0] * len(topo.sites) if not topo.charges else topo.charges
charges.extend(q)
atoms = pd.DataFrame(np.concatenate(coords), columns=["x", "y", "z"])
atoms["molecule-ID"] = mol_ids
atoms["q"] = charges
atoms["type"] = list(map(ff.maps["Atoms"].get, labels))
atoms.index += 1
atoms = atoms[ATOMS_HEADERS[atom_style]]
velocities = None
if v_collector:
velocities = pd.DataFrame(np.concatenate(v_collector), columns=SECTION_HEADERS["Velocities"])
velocities.index += 1
topology = {k: None for k, v in topo_labels.items() if len(v) > 0}
for k in topology:
df = pd.DataFrame(np.concatenate(topo_collector[k]), columns=SECTION_HEADERS[k][1:])
df["type"] = list(map(ff.maps[k].get, topo_labels[k]))
if any(pd.isnull(df["type"])): # Throw away undefined topologies
warnings.warn("Undefined %s detected and removed" % k.lower())
df.dropna(subset=["type"], inplace=True)
df.reset_index(drop=True, inplace=True)
df.index += 1
topology[k] = df[SECTION_HEADERS[k]]
topology = {k: v for k, v in topology.items() if not v.empty}
items.update({"atoms": atoms, "velocities": velocities, "topology": topology})
return cls(**items)
@classmethod
def from_structure(cls, structure, ff_elements=None, atom_style="charge", is_sort=False):
"""
Simple constructor building LammpsData from a structure without
force field parameters and topologies.
Args:
structure (Structure): Input structure.
ff_elements ([str]): List of strings of elements that must
be present due to force field settings but not
necessarily in the structure. Default to None.
atom_style (str): Choose between "atomic" (neutral) and
"charge" (charged). Default to "charge".
is_sort (bool): whether to sort sites
"""
if is_sort:
s = structure.get_sorted_structure()
else:
s = structure.copy()
box, symmop = lattice_2_lmpbox(s.lattice)
coords = symmop.operate_multi(s.cart_coords)
site_properties = s.site_properties
if "velocities" in site_properties:
velos = np.array(s.site_properties["velocities"])
rot = SymmOp.from_rotation_and_translation(symmop.rotation_matrix)
rot_velos = rot.operate_multi(velos)
site_properties.update({"velocities": rot_velos})
boxed_s = Structure(
box.to_lattice(),
s.species,
coords,
site_properties=site_properties,
coords_are_cartesian=True,
)
symbols = list(s.symbol_set)
if ff_elements:
symbols.extend(ff_elements)
elements = sorted(Element(el) for el in set(symbols))
mass_info = [tuple([i.symbol] * 2) for i in elements]
ff = ForceField(mass_info)
topo = Topology(boxed_s)
return cls.from_ff_and_topologies(box=box, ff=ff, topologies=[topo], atom_style=atom_style)
class Topology(MSONable):
"""
Class carrying most data in Atoms, Velocities and molecular
topology sections for ONE SINGLE Molecule or Structure
object, or a plain list of Sites.
"""
def __init__(self, sites, ff_label=None, charges=None, velocities=None, topologies=None):
"""
Args:
sites ([Site] or SiteCollection): A group of sites in a
list or as a Molecule/Structure.
ff_label (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
topologies (dict): Bonds, angles, dihedrals and improper
dihedrals defined by site indices. Default to None,
i.e., no additional topology. All four valid keys
listed below are optional.
{
"Bonds": [[i, j], ...],
"Angles": [[i, j, k], ...],
"Dihedrals": [[i, j, k, l], ...],
"Impropers": [[i, j, k, l], ...]
}
"""
if not isinstance(sites, (Molecule, Structure)):
sites = Molecule.from_sites(sites)
if ff_label:
type_by_sites = sites.site_properties.get(ff_label)
else:
type_by_sites = [site.specie.symbol for site in sites]
# search for site property if not override
if charges is None:
charges = sites.site_properties.get("charge")
if velocities is None:
velocities = sites.site_properties.get("velocities")
# validate shape
if charges is not None:
charge_arr = np.array(charges)
assert charge_arr.shape == (len(sites),), "Wrong format for charges"
charges = charge_arr.tolist()
if velocities is not None:
velocities_arr = np.array(velocities)
assert velocities_arr.shape == (
len(sites),
3,
), "Wrong format for velocities"
velocities = velocities_arr.tolist()
if topologies:
topologies = {k: v for k, v in topologies.items() if k in SECTION_KEYWORDS["topology"]}
self.sites = sites
self.ff_label = ff_label
self.charges = charges
self.velocities = velocities
self.topologies = topologies
self.type_by_sites = type_by_sites
self.species = set(type_by_sites)
@classmethod
def from_bonding(cls, molecule, bond=True, angle=True, dihedral=True, tol=0.1, **kwargs):
"""
Another constructor that creates an instance from a molecule.
Covalent bonds and other bond-based topologies (angles and
dihedrals) can be automatically determined. Cannot be used for
non bond-based topologies, e.g., improper dihedrals.
Args:
molecule (Molecule): Input molecule.
bond (bool): Whether find bonds. If set to False, angle and
dihedral searching will be skipped. Default to True.
angle (bool): Whether find angles. Default to True.
dihedral (bool): Whether find dihedrals. Default to True.
tol (float): Bond distance tolerance. Default to 0.1.
Not recommended to alter.
**kwargs: Other kwargs supported by Topology.
"""
real_bonds = molecule.get_covalent_bonds(tol=tol)
bond_list = [list(map(molecule.index, [b.site1, b.site2])) for b in real_bonds]
if not all((bond, bond_list)):
# do not search for others if not searching for bonds or no bonds
return cls(sites=molecule, **kwargs)
angle_list, dihedral_list = [], []
dests, freq = np.unique(bond_list, return_counts=True)
hubs = dests[np.where(freq > 1)].tolist()
bond_arr = np.array(bond_list)
if len(hubs) > 0:
hub_spokes = {}
for hub in hubs:
ix = np.any(np.isin(bond_arr, hub), axis=1)
bonds = np.unique(bond_arr[ix]).tolist()
bonds.remove(hub)
hub_spokes[hub] = bonds
# skip angle or dihedral searching if too few bonds or hubs
dihedral = False if len(bond_list) < 3 or len(hubs) < 2 else dihedral
angle = False if len(bond_list) < 2 or len(hubs) < 1 else angle
if angle:
for k, v in hub_spokes.items():
angle_list.extend([[i, k, j] for i, j in itertools.combinations(v, 2)])
if dihedral:
hub_cons = bond_arr[np.all(np.isin(bond_arr, hubs), axis=1)]
for i, j in hub_cons.tolist():
ks = [k for k in hub_spokes[i] if k != j]
ls = [l for l in hub_spokes[j] if l != i]
dihedral_list.extend([[k, i, j, l] for k, l in itertools.product(ks, ls) if k != l])
topologies = {
k: v for k, v in zip(SECTION_KEYWORDS["topology"][:3], [bond_list, angle_list, dihedral_list]) if len(v) > 0
}
topologies = None if len(topologies) == 0 else topologies
return cls(sites=molecule, topologies=topologies, **kwargs)
class ForceField(MSONable):
"""
Class carrying most data in Masses and force field sections.
Attributes:
masses (pandas.DataFrame): DataFrame for Masses section.
force_field (dict): Force field section keywords (keys) and
data (values) as DataFrames.
maps (dict): Dict for labeling atoms and topologies.
"""
@staticmethod
def _is_valid(df):
return not pd.isnull(df).values.any()
def __init__(self, mass_info, nonbond_coeffs=None, topo_coeffs=None):
"""
Args:
mass_info (list): List of atomic mass info. Elements,
strings (symbols) and floats are all acceptable for the
values, with the first two converted to the atomic mass
of an element. It is recommended to use
OrderedDict.items() to prevent key duplications.
[("C", 12.01), ("H", Element("H")), ("O", "O"), ...]
nonbond_coeffs [coeffs]: List of pair or pairij
coefficients, of which the sequence must be sorted
according to the species in mass_dict. Pair or PairIJ
determined by the length of list. Optional with default
to None.
topo_coeffs (dict): Dict with force field coefficients for
molecular topologies. Optional with default
to None. All four valid keys listed below are optional.
Each value is a list of dicts with non optional keys
"coeffs" and "types", and related class2 force field
keywords as optional keys.
{
"Bond Coeffs":
[{"coeffs": [coeff],
"types": [("C", "C"), ...]}, ...],
"Angle Coeffs":
[{"coeffs": [coeff],
"BondBond Coeffs": [coeff],
"types": [("H", "C", "H"), ...]}, ...],
"Dihedral Coeffs":
[{"coeffs": [coeff],
"BondBond13 Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
"Improper Coeffs":
[{"coeffs": [coeff],
"AngleAngle Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
}
Topology of same type or equivalent types (e.g.,
("C", "H") and ("H", "C") bonds) are NOT ALLOWED to
be defined MORE THAN ONCE with DIFFERENT coefficients.
"""
def map_mass(v):
return (
v.atomic_mass.real
if isinstance(v, Element)
else Element(v).atomic_mass.real
if isinstance(v, str)
else v
)
index, masses, self.mass_info, atoms_map = [], [], [], {}
for i, m in enumerate(mass_info):
index.append(i + 1)
mass = map_mass(m[1])
masses.append(mass)
self.mass_info.append((m[0], mass))
atoms_map[m[0]] = i + 1
self.masses = pd.DataFrame({"mass": masses}, index=index)
self.maps = {"Atoms": atoms_map}
ff_dfs = {}
self.nonbond_coeffs = nonbond_coeffs
if self.nonbond_coeffs:
ff_dfs.update(self._process_nonbond())
self.topo_coeffs = topo_coeffs
if self.topo_coeffs:
self.topo_coeffs = {k: v for k, v in self.topo_coeffs.items() if k in SECTION_KEYWORDS["ff"][2:]}
for k in self.topo_coeffs.keys():
coeffs, mapper = self._process_topo(k)
ff_dfs.update(coeffs)
self.maps.update(mapper)
self.force_field = None if len(ff_dfs) == 0 else ff_dfs
def _process_nonbond(self):
pair_df = pd.DataFrame(self.nonbond_coeffs)
assert self._is_valid(pair_df), "Invalid nonbond coefficients with rows varying in length"
npair, ncoeff = pair_df.shape
pair_df.columns = ["coeff%d" % i for i in range(1, ncoeff + 1)]
nm = len(self.mass_info)
ncomb = int(nm * (nm + 1) / 2)
if npair == nm:
kw = "Pair Coeffs"
pair_df.index = range(1, nm + 1)
elif npair == ncomb:
kw = "PairIJ Coeffs"
ids = list(itertools.combinations_with_replacement(range(1, nm + 1), 2))
id_df = pd.DataFrame(ids, columns=["id1", "id2"])
pair_df = pd.concat([id_df, pair_df], axis=1)
else:
raise ValueError(
"Expecting {} Pair Coeffs or "
"{} PairIJ Coeffs for {} atom types,"
" got {}".format(nm, ncomb, nm, npair)
)
return {kw: pair_df}
def _process_topo(self, kw):
def find_eq_types(label, section):
if section.startswith("Improper"):
label_arr = np.array(label)
seqs = [[0, 1, 2, 3], [0, 2, 1, 3], [3, 1, 2, 0], [3, 2, 1, 0]]
return [tuple(label_arr[s]) for s in seqs]
return [label] + [label[::-1]]
main_data, distinct_types = [], []
class2_data = {k: [] for k in self.topo_coeffs[kw][0].keys() if k in CLASS2_KEYWORDS.get(kw, [])}
for i, d in enumerate(self.topo_coeffs[kw]):
main_data.append(d["coeffs"])
distinct_types.append(d["types"])
for k in class2_data.keys():
class2_data[k].append(d[k])
distinct_types = [set(itertools.chain(*[find_eq_types(t, kw) for t in dt])) for dt in distinct_types]
type_counts = sum(len(dt) for dt in distinct_types)
type_union = set.union(*distinct_types)
assert len(type_union) == type_counts, "Duplicated items found under different coefficients in %s" % kw
atoms = set(np.ravel(list(itertools.chain(*distinct_types))))
assert atoms.issubset(self.maps["Atoms"].keys()), "Undefined atom type found in %s" % kw
mapper = {}
for i, dt in enumerate(distinct_types):
for t in dt:
mapper[t] = i + 1
def process_data(data):
df = pd.DataFrame(data)
assert self._is_valid(df), "Invalid coefficients with rows varying in length"
n, c = df.shape
df.columns = ["coeff%d" % i for i in range(1, c + 1)]
df.index = range(1, n + 1)
return df
all_data = {kw: process_data(main_data)}
if class2_data:
all_data.update({k: process_data(v) for k, v in class2_data.items()})
return all_data, {kw[:-7] + "s": mapper}
def to_file(self, filename):
"""
Saves object to a file in YAML format.
Args:
filename (str): Filename.
"""
d = {
"mass_info": self.mass_info,
"nonbond_coeffs": self.nonbond_coeffs,
"topo_coeffs": self.topo_coeffs,
}
with open(filename, "w") as f:
yaml.dump(d, f)
@classmethod
def from_file(cls, filename):
"""
Constructor that reads in a file in YAML format.
Args:
filename (str): Filename.
"""
with open(filename) as f:
d = yaml.load(f)
return cls.from_dict(d)
@classmethod
def from_dict(cls, d):
"""
Constructor that reads in a dictionary.
Args:
d (dict): Dictionary to read.
"""
d["mass_info"] = [tuple(m) for m in d["mass_info"]]
if d.get("topo_coeffs"):
for v in d["topo_coeffs"].values():
for c in v:
c["types"] = [tuple(t) for t in c["types"]]
return cls(d["mass_info"], d["nonbond_coeffs"], d["topo_coeffs"])
class CombinedData(LammpsData):
"""
Object for a collective set of data for a series of LAMMPS data file.
velocities not yet implementd.
"""
def __init__(
self,
list_of_molecules,
list_of_names,
list_of_numbers,
coordinates,
atom_style="full",
):
"""
Args:
list_of_molecules: A list of LammpsData objects of a chemical cluster.
Each LammpsData object (cluster) may contain one or more molecule ID.
list_of_names: A list of name (string) for each cluster. The characters in each name are
restricted to word characters ([a-zA-Z0-9_]). If names with any non-word characters
are passed in, the special characters will be substituted by '_'.
list_of_numbers: A list of Integer for counts of each molecule
coordinates (pandas.DataFrame): DataFrame at least containing
columns of ["x", "y", "z"] for coordinates of atoms.
atom_style (str): Output atom_style. Default to "full".
"""
self._list_of_molecules = list_of_molecules
self._list_of_names = list_of_names
self._list_of_numbers = list_of_numbers
self._coordinates = coordinates
self._coordinates.index = self._coordinates.index.map(int)
max_xyz = self._coordinates[["x", "y", "z"]].max().max()
min_xyz = self._coordinates[["x", "y", "z"]].min().min()
self.box = LammpsBox(np.array(3 * [[min_xyz - 0.5, max_xyz + 0.5]]))
self.atom_style = atom_style
self.n = sum(self._list_of_numbers)
self.names = []
for name in self._list_of_names:
self.names.append("_".join(re.findall(r"\w+", name)))
self.mols = self._list_of_molecules
self.nums = self._list_of_numbers
self.masses = pd.concat([mol.masses.copy() for mol in self.mols], ignore_index=True)
self.masses.index += 1
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
appeared_kws = {k for mol in self.mols if mol.force_field is not None for k in mol.force_field}
ff_kws = [k for k in all_ff_kws if k in appeared_kws]
self.force_field = {}
for kw in ff_kws:
self.force_field[kw] = pd.concat(
[mol.force_field[kw].copy() for mol in self.mols if kw in mol.force_field],
ignore_index=True,
)
self.force_field[kw].index += 1
if not bool(self.force_field):
self.force_field = None
self.atoms = pd.DataFrame()
mol_count = 0
type_count = 0
self.mols_per_data = []
for i, mol in enumerate(self.mols):
atoms_df = mol.atoms.copy()
atoms_df["molecule-ID"] += mol_count
atoms_df["type"] += type_count
mols_in_data = len(atoms_df["molecule-ID"].unique())
self.mols_per_data.append(mols_in_data)
for j in range(self.nums[i]):
self.atoms = self.atoms.append(atoms_df, ignore_index=True)
atoms_df["molecule-ID"] += mols_in_data
type_count += len(mol.masses)
mol_count += self.nums[i] * mols_in_data
self.atoms.index += 1
assert len(self.atoms) == len(self._coordinates), "Wrong number of coordinates."
self.atoms.update(self._coordinates)
self.velocities = None
assert self.mols[0].velocities is None, "Velocities not supported"
self.topology = {}
atom_count = 0
count = {"Bonds": 0, "Angles": 0, "Dihedrals": 0, "Impropers": 0}
for i, mol in enumerate(self.mols):
for kw in SECTION_KEYWORDS["topology"]:
if bool(mol.topology) and kw in mol.topology:
if kw not in self.topology:
self.topology[kw] = pd.DataFrame()
topo_df = mol.topology[kw].copy()
topo_df["type"] += count[kw]
for col in topo_df.columns[1:]:
topo_df[col] += atom_count
for j in range(self.nums[i]):
self.topology[kw] = self.topology[kw].append(topo_df, ignore_index=True)
for col in topo_df.columns[1:]:
topo_df[col] += len(mol.atoms)
count[kw] += len(mol.force_field[kw[:-1] + " Coeffs"])
atom_count += len(mol.atoms) * self.nums[i]
for kw in SECTION_KEYWORDS["topology"]:
if kw in self.topology:
self.topology[kw].index += 1
if not bool(self.topology):
self.topology = None
@property
def structure(self):
"""
Exports a periodic structure object representing the simulation
box.
Return:
Structure
"""
ld_cp = self.as_lammpsdata()
return ld_cp.structure
def disassemble(self, atom_labels=None, guess_element=True, ff_label="ff_map"):
"""
Breaks down each LammpsData in CombinedData to building blocks
(LammpsBox, ForceField and a series of Topology).
RESTRICTIONS APPLIED:
1. No complex force field defined not just on atom
types, where the same type or equivalent types of topology
may have more than one set of coefficients.
2. No intermolecular topologies (with atoms from different
molecule-ID) since a Topology object includes data for ONE
molecule or structure only.
Args:
atom_labels ([str]): List of strings (must be different
from one another) for labelling each atom type found in
Masses section. Default to None, where the labels are
automaticaly added based on either element guess or
dummy specie assignment.
guess_element (bool): Whether to guess the element based on
its atomic mass. Default to True, otherwise dummy
species "Qa", "Qb", ... will be assigned to various
atom types. The guessed or assigned elements will be
reflected on atom labels if atom_labels is None, as
well as on the species of molecule in each Topology.
ff_label (str): Site property key for labeling atoms of
different types. Default to "ff_map".
Returns:
[(LammpsBox, ForceField, [Topology]), ...]
"""
disassembles = []
for mol in self.mols:
disassembles.append(
mol.disassemble(atom_labels=atom_labels, guess_element=guess_element, ff_label=ff_label)
)
return disassembles
@classmethod
def from_ff_and_topologies(cls):
"""
Unsupported constructor for CombinedData objects.
"""
raise AttributeError("Unsupported constructor for CombinedData objects.")
@classmethod
def from_structure(cls):
"""
Unsupported constructor for CombinedData objects.
"""
raise AttributeError("Unsupported constructor for CombinedData objects.")
@classmethod
def parse_xyz(cls, filename):
"""
load xyz file generated from packmol (for those who find it hard to install openbabel)
Returns:
pandas.DataFrame
"""
with open(filename) as f:
lines = f.readlines()
sio = StringIO("".join(lines[2:])) # skip the 2nd line
df = pd.read_csv(
sio,
header=None,
comment="#",
delim_whitespace=True,
names=["atom", "x", "y", "z"],
)
df.index += 1
return df
@classmethod
def from_files(cls, coordinate_file, list_of_numbers, *filenames):
"""
Constructor that parse a series of data file.
Args:
coordinate_file (str): The filename of xyz coordinates.
list_of_numbers (list): A list of numbers specifying counts for each
clusters parsed from files.
filenames (str): A series of LAMMPS data filenames in string format.
"""
names = []
mols = []
styles = []
coordinates = cls.parse_xyz(filename=coordinate_file)
for i in range(0, len(filenames)):
exec("cluster%d = LammpsData.from_file(filenames[i])" % (i + 1))
names.append("cluster%d" % (i + 1))
mols.append(eval("cluster%d" % (i + 1)))
styles.append(eval("cluster%d" % (i + 1)).atom_style)
style = set(styles)
assert len(style) == 1, "Files have different atom styles."
return cls.from_lammpsdata(mols, names, list_of_numbers, coordinates, style.pop())
@classmethod
def from_lammpsdata(cls, mols, names, list_of_numbers, coordinates, atom_style=None):
"""
Constructor that can infer atom_style.
The input LammpsData objects are used non-destructively.
Args:
mols: a list of LammpsData of a chemical cluster.Each LammpsData object (cluster)
may contain one or more molecule ID.
names: a list of name for each cluster.
list_of_numbers: a list of Integer for counts of each molecule
coordinates (pandas.DataFrame): DataFrame at least containing
columns of ["x", "y", "z"] for coordinates of atoms.
atom_style (str): Output atom_style. Default to "full".
"""
styles = []
for mol in mols:
styles.append(mol.atom_style)
style = set(styles)
assert len(style) == 1, "Data have different atom_style."
style_return = style.pop()
if atom_style:
assert atom_style == style_return, "Data have different atom_style as specified."
return cls(mols, names, list_of_numbers, coordinates, style_return)
def get_string(self, distance=6, velocity=8, charge=4, hybrid=True):
"""
Returns the string representation of CombinedData, essentially
the string to be written to a file. Combination info is included
as a comment. For single molecule ID data, the info format is:
num name
For data with multiple molecule ID, the format is:
num(mols_per_data) name
Args:
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 4.
hybrid (bool): Whether to write hybrid coeffs types.
Default to True. If the data object has no hybrid
coeffs types and has large coeffs section, one may
use False to speedup the process. Otherwise the
default is recommended.
Returns:
String representation
"""
lines = LammpsData.get_string(self, distance, velocity, charge, hybrid).splitlines()
info = "# " + " + ".join(
(str(a) + " " + b) if c == 1 else (str(a) + "(" + str(c) + ") " + b)
for a, b, c in zip(self.nums, self.names, self.mols_per_data)
)
lines.insert(1, info)
return "\n".join(lines)
def as_lammpsdata(self):
"""
Convert a CombinedData object to a LammpsData object. attributes are deepcopied.
box (LammpsBox): Simulation box.
force_field (dict): Data for force field sections. Optional
with default to None. Only keywords in force field and
class 2 force field are valid keys, and each value is a
DataFrame.
topology (dict): Data for topology sections. Optional with
default to None. Only keywords in topology are valid
keys, and each value is a DataFrame.
"""
items = {}
items["box"] = LammpsBox(self.box.bounds, self.box.tilt)
items["masses"] = self.masses.copy()
items["atoms"] = self.atoms.copy()
items["atom_style"] = self.atom_style
items["velocities"] = None # Velocities not supported
if self.force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
items["force_field"] = {k: v.copy() for k, v in self.force_field.items() if k in all_ff_kws}
if self.topology:
items["topology"] = {k: v.copy() for k, v in self.topology.items() if k in SECTION_KEYWORDS["topology"]}
return LammpsData(**items)
@deprecated(
LammpsData.from_structure,
"structure_2_lmpdata has been deprecated in favor of LammpsData.from_structure",
)
def structure_2_lmpdata(structure, ff_elements=None, atom_style="charge", is_sort=False):
"""
Converts a structure to a LammpsData object with no force field
parameters and topologies.
Args:
structure (Structure): Input structure.
ff_elements ([str]): List of strings of elements that must be
present due to force field settings but not necessarily in
the structure. Default to None.
atom_style (str): Choose between "atomic" (neutral) and
"charge" (charged). Default to "charge".
is_sort (bool): whether to sort the structure sites
Returns:
LammpsData
"""
if is_sort:
s = structure.get_sorted_structure()
else:
s = structure.copy()
a, b, c = s.lattice.abc
m = s.lattice.matrix
xhi = a
xy = np.dot(m[1], m[0] / xhi)
yhi = np.sqrt(b ** 2 - xy ** 2)
xz = np.dot(m[2], m[0] / xhi)
yz = (np.dot(m[1], m[2]) - xy * xz) / yhi
zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2)
box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]]
box_tilt = [xy, xz, yz]
box_tilt = None if not any(box_tilt) else box_tilt
box = LammpsBox(box_bounds, box_tilt)
new_latt = Lattice([[xhi, 0, 0], [xy, yhi, 0], [xz, yz, zhi]])
s.lattice = new_latt
symbols = list(s.symbol_set)
if ff_elements:
symbols.extend(ff_elements)
elements = sorted(Element(el) for el in set(symbols))
mass_info = [tuple([i.symbol] * 2) for i in elements]
ff = ForceField(mass_info)
topo = Topology(s)
return LammpsData.from_ff_and_topologies(box=box, ff=ff, topologies=[topo], atom_style=atom_style)
| vorwerkc/pymatgen | pymatgen/io/lammps/data.py | Python | mit | 62,358 | [
"LAMMPS",
"pymatgen"
] | fbd5b342b9aeeb48b6b06b374343a90af23eac38fada776cd3d0b24b5b4a311a |
from json import load
from os import makedirs
from os.path import exists
from os.path import abspath
from os.path import dirname
from os.path import join
from os.path import basename
from os.path import sep
import fnmatch
from re import compile
from re import escape
import galaxy.util
from galaxy.util.bunch import Bunch
from .config_util import read_file
from .util import directory_files
from .util import unique_path_prefix
from .transport import get_file
from .transport import post_file
DEFAULT_MAPPED_ACTION = 'transfer' # Not really clear to me what this should be, exception?
DEFAULT_PATH_MAPPER_TYPE = 'prefix'
STAGING_ACTION_REMOTE = "remote"
STAGING_ACTION_LOCAL = "local"
STAGING_ACTION_NONE = None
STAGING_ACTION_DEFAULT = "default"
# Poor man's enum.
path_type = Bunch(
# Galaxy input datasets and extra files.
INPUT="input",
# Galaxy config and param files.
CONFIG="config",
# Files from tool's tool_dir (for now just wrapper if available).
TOOL="tool",
# Input work dir files - e.g. metadata files, task-split input files, etc..
WORKDIR="workdir",
# Galaxy output datasets in their final home.
OUTPUT="output",
# Galaxy from_work_dir output paths and other files (e.g. galaxy.json)
OUTPUT_WORKDIR="output_workdir",
# Other fixed tool parameter paths (likely coming from tool data, but not
# nessecarily). Not sure this is the best name...
UNSTRUCTURED="unstructured",
)
ACTION_DEFAULT_PATH_TYPES = [
path_type.INPUT,
path_type.CONFIG,
path_type.TOOL,
path_type.WORKDIR,
path_type.OUTPUT,
path_type.OUTPUT_WORKDIR,
]
ALL_PATH_TYPES = ACTION_DEFAULT_PATH_TYPES + [path_type.UNSTRUCTURED]
class FileActionMapper(object):
"""
Objects of this class define how paths are mapped to actions.
>>> json_string = r'''{"paths": [ \
{"path": "/opt/galaxy", "action": "none"}, \
{"path": "/galaxy/data", "action": "transfer"}, \
{"path": "/cool/bamfiles/**/*.bam", "action": "copy", "match_type": "glob"}, \
{"path": ".*/dataset_\\\\d+.dat", "action": "copy", "match_type": "regex"} \
]}'''
>>> from tempfile import NamedTemporaryFile
>>> from os import unlink
>>> def mapper_for(default_action, config_contents):
... f = NamedTemporaryFile(delete=False)
... f.write(config_contents.encode('UTF-8'))
... f.close()
... mock_client = Bunch(default_file_action=default_action, action_config_path=f.name, files_endpoint=None)
... mapper = FileActionMapper(mock_client)
... mapper = FileActionMapper(config=mapper.to_dict()) # Serialize and deserialize it to make sure still works
... unlink(f.name)
... return mapper
>>> mapper = mapper_for(default_action='none', config_contents=json_string)
>>> # Test first config line above, implicit path prefix mapper
>>> action = mapper.action('/opt/galaxy/tools/filters/catWrapper.py', 'input')
>>> action.action_type == u'none'
True
>>> action.staging_needed
False
>>> # Test another (2nd) mapper, this one with a different action
>>> action = mapper.action('/galaxy/data/files/000/dataset_1.dat', 'input')
>>> action.action_type == u'transfer'
True
>>> action.staging_needed
True
>>> # Always at least copy work_dir outputs.
>>> action = mapper.action('/opt/galaxy/database/working_directory/45.sh', 'workdir')
>>> action.action_type == u'copy'
True
>>> action.staging_needed
True
>>> # Test glob mapper (matching test)
>>> mapper.action('/cool/bamfiles/projectABC/study1/patient3.bam', 'input').action_type == u'copy'
True
>>> # Test glob mapper (non-matching test)
>>> mapper.action('/cool/bamfiles/projectABC/study1/patient3.bam.bai', 'input').action_type == u'none'
True
>>> # Regex mapper test.
>>> mapper.action('/old/galaxy/data/dataset_10245.dat', 'input').action_type == u'copy'
True
>>> # Doesn't map unstructured paths by default
>>> mapper.action('/old/galaxy/data/dataset_10245.dat', 'unstructured').action_type == u'none'
True
>>> input_only_mapper = mapper_for(default_action="none", config_contents=r'''{"paths": [ \
{"path": "/", "action": "transfer", "path_types": "input"} \
] }''')
>>> input_only_mapper.action('/dataset_1.dat', 'input').action_type == u'transfer'
True
>>> input_only_mapper.action('/dataset_1.dat', 'output').action_type == u'none'
True
>>> unstructured_mapper = mapper_for(default_action="none", config_contents=r'''{"paths": [ \
{"path": "/", "action": "transfer", "path_types": "*any*"} \
] }''')
>>> unstructured_mapper.action('/old/galaxy/data/dataset_10245.dat', 'unstructured').action_type == u'transfer'
True
"""
def __init__(self, client=None, config=None):
if config is None and client is None:
message = "FileActionMapper must be constructed from either a client or a config dictionary."
raise Exception(message)
if config is None:
config = self.__client_to_config(client)
self.default_action = config.get("default_action", "transfer")
self.mappers = mappers_from_dicts(config.get("paths", []))
self.files_endpoint = config.get("files_endpoint", None)
def action(self, path, type, mapper=None):
mapper = self.__find_mapper(path, type, mapper)
action_class = self.__action_class(path, type, mapper)
file_lister = DEFAULT_FILE_LISTER
action_kwds = {}
if mapper:
file_lister = mapper.file_lister
action_kwds = mapper.action_kwds
action = action_class(path, file_lister=file_lister, **action_kwds)
self.__process_action(action, type)
return action
def unstructured_mappers(self):
""" Return mappers that will map 'unstructured' files (i.e. go beyond
mapping inputs, outputs, and config files).
"""
return filter(lambda m: path_type.UNSTRUCTURED in m.path_types, self.mappers)
def to_dict(self):
return dict(
default_action=self.default_action,
files_endpoint=self.files_endpoint,
paths=map(lambda m: m.to_dict(), self.mappers)
)
def __client_to_config(self, client):
action_config_path = client.action_config_path
if action_config_path:
config = read_file(action_config_path)
else:
config = dict()
config["default_action"] = client.default_file_action
config["files_endpoint"] = client.files_endpoint
return config
def __load_action_config(self, path):
config = load(open(path, 'rb'))
self.mappers = mappers_from_dicts(config.get('paths', []))
def __find_mapper(self, path, type, mapper=None):
if not mapper:
normalized_path = abspath(path)
for query_mapper in self.mappers:
if query_mapper.matches(normalized_path, type):
mapper = query_mapper
break
return mapper
def __action_class(self, path, type, mapper):
action_type = self.default_action if type in ACTION_DEFAULT_PATH_TYPES else "none"
if mapper:
action_type = mapper.action_type
if type in ["workdir", "output_workdir"] and action_type == "none":
# We are changing the working_directory relative to what
# Galaxy would use, these need to be copied over.
action_type = "copy"
action_class = actions.get(action_type, None)
if action_class is None:
message_template = "Unknown action_type encountered %s while trying to map path %s"
message_args = (action_type, path)
raise Exception(message_template % message_args)
return action_class
def __process_action(self, action, file_type):
""" Extension point to populate extra action information after an
action has been created.
"""
if action.action_type == "remote_transfer":
url_base = self.files_endpoint
if not url_base:
raise Exception("Attempted to use remote_transfer action with defining a files_endpoint")
if "?" not in url_base:
url_base = "%s?" % url_base
# TODO: URL encode path.
url = "%s&path=%s&file_type=%s" % (url_base, action.path, file_type)
action.url = url
REQUIRED_ACTION_KWD = object()
class BaseAction(object):
action_spec = {}
def __init__(self, path, file_lister=None):
self.path = path
self.file_lister = file_lister or DEFAULT_FILE_LISTER
def unstructured_map(self, path_helper):
unstructured_map = self.file_lister.unstructured_map(self.path)
if self.staging_needed:
# To ensure uniqueness, prepend unique prefix to each name
prefix = unique_path_prefix(self.path)
for path, name in unstructured_map.iteritems():
unstructured_map[path] = join(prefix, name)
else:
path_rewrites = {}
for path in unstructured_map:
rewrite = self.path_rewrite(path_helper, path)
if rewrite:
path_rewrites[path] = rewrite
unstructured_map = path_rewrites
return unstructured_map
@property
def staging_needed(self):
return self.staging != STAGING_ACTION_NONE
@property
def staging_action_local(self):
return self.staging == STAGING_ACTION_LOCAL
class NoneAction(BaseAction):
""" This action indicates the corresponding path does not require any
additional action. This should indicate paths that are available both on
the Pulsar client (i.e. Galaxy server) and remote Pulsar server with the same
paths. """
action_type = "none"
staging = STAGING_ACTION_NONE
def to_dict(self):
return dict(path=self.path, action_type=self.action_type)
@classmethod
def from_dict(cls, action_dict):
return NoneAction(path=action_dict["path"])
def path_rewrite(self, path_helper, path=None):
return None
class RewriteAction(BaseAction):
""" This actin indicates the Pulsar server should simply rewrite the path
to the specified file.
"""
action_spec = dict(
source_directory=REQUIRED_ACTION_KWD,
destination_directory=REQUIRED_ACTION_KWD
)
action_type = "rewrite"
staging = STAGING_ACTION_NONE
def __init__(self, path, file_lister=None, source_directory=None, destination_directory=None):
self.path = path
self.file_lister = file_lister or DEFAULT_FILE_LISTER
self.source_directory = source_directory
self.destination_directory = destination_directory
def to_dict(self):
return dict(
path=self.path,
action_type=self.action_type,
source_directory=self.source_directory,
destination_directory=self.destination_directory,
)
@classmethod
def from_dict(cls, action_dict):
return RewriteAction(
path=action_dict["path"],
source_directory=action_dict["source_directory"],
destination_directory=action_dict["destination_directory"],
)
def path_rewrite(self, path_helper, path=None):
if not path:
path = self.path
new_path = path_helper.from_posix_with_new_base(self.path, self.source_directory, self.destination_directory)
return None if new_path == self.path else new_path
class TransferAction(BaseAction):
""" This actions indicates that the Pulsar client should initiate an HTTP
transfer of the corresponding path to the remote Pulsar server before
launching the job. """
action_type = "transfer"
staging = STAGING_ACTION_LOCAL
class CopyAction(BaseAction):
""" This action indicates that the Pulsar client should execute a file system
copy of the corresponding path to the Pulsar staging directory prior to
launching the corresponding job. """
action_type = "copy"
staging = STAGING_ACTION_LOCAL
class RemoteCopyAction(BaseAction):
""" This action indicates the Pulsar server should copy the file before
execution via direct file system copy. This is like a CopyAction, but
it indicates the action should occur on the Pulsar server instead of on
the client.
"""
action_type = "remote_copy"
staging = STAGING_ACTION_REMOTE
def to_dict(self):
return dict(path=self.path, action_type=self.action_type)
@classmethod
def from_dict(cls, action_dict):
return RemoteCopyAction(path=action_dict["path"])
def write_to_path(self, path):
galaxy.util.copy_to_path(open(self.path, "rb"), path)
def write_from_path(self, pulsar_path):
destination = self.path
parent_directory = dirname(destination)
if not exists(parent_directory):
makedirs(parent_directory)
with open(pulsar_path, "rb") as f:
galaxy.util.copy_to_path(f, destination)
class RemoteTransferAction(BaseAction):
""" This action indicates the Pulsar server should copy the file before
execution via direct file system copy. This is like a CopyAction, but
it indicates the action should occur on the Pulsar server instead of on
the client.
"""
action_type = "remote_transfer"
staging = STAGING_ACTION_REMOTE
def __init__(self, path, file_lister=None, url=None):
super(RemoteTransferAction, self).__init__(path, file_lister=file_lister)
self.url = url
def to_dict(self):
return dict(path=self.path, action_type=self.action_type, url=self.url)
@classmethod
def from_dict(cls, action_dict):
return RemoteTransferAction(path=action_dict["path"], url=action_dict["url"])
def write_to_path(self, path):
get_file(self.url, path)
def write_from_path(self, pulsar_path):
post_file(self.url, pulsar_path)
class MessageAction(object):
""" Sort of pseudo action describing "files" store in memory and
transferred via message (HTTP, Python-call, MQ, etc...)
"""
action_type = "message"
staging = STAGING_ACTION_DEFAULT
def __init__(self, contents, client=None):
self.contents = contents
self.client = client
@property
def staging_needed(self):
return True
@property
def staging_action_local(self):
# Ekkk, cannot be called if created through from_dict.
# Shouldn't be a problem the way it is used - but is an
# object design problem.
return self.client.prefer_local_staging
def to_dict(self):
return dict(contents=self.contents, action_type=MessageAction.action_type)
@classmethod
def from_dict(cls, action_dict):
return MessageAction(contents=action_dict["contents"])
def write_to_path(self, path):
open(path, "w").write(self.contents)
DICTIFIABLE_ACTION_CLASSES = [RemoteCopyAction, RemoteTransferAction, MessageAction]
def from_dict(action_dict):
action_type = action_dict.get("action_type", None)
target_class = None
for action_class in DICTIFIABLE_ACTION_CLASSES:
if action_type == action_class.action_type:
target_class = action_class
if not target_class:
message = "Failed to recover action from dictionary - invalid action type specified %s." % action_type
raise Exception(message)
return target_class.from_dict(action_dict)
class BasePathMapper(object):
def __init__(self, config):
action_type = config.get('action', DEFAULT_MAPPED_ACTION)
action_class = actions.get(action_type, None)
action_kwds = action_class.action_spec.copy()
for key, value in action_kwds.items():
if key in config:
action_kwds[key] = config[key]
elif value is REQUIRED_ACTION_KWD:
message_template = "action_type %s requires key word argument %s"
message = message_template % (action_type, key)
raise Exception(message)
self.action_type = action_type
self.action_kwds = action_kwds
path_types_str = config.get('path_types', "*defaults*")
path_types_str = path_types_str.replace("*defaults*", ",".join(ACTION_DEFAULT_PATH_TYPES))
path_types_str = path_types_str.replace("*any*", ",".join(ALL_PATH_TYPES))
self.path_types = path_types_str.split(",")
self.file_lister = FileLister(config)
def matches(self, path, path_type):
path_type_matches = path_type in self.path_types
return path_type_matches and self._path_matches(path)
def _extend_base_dict(self, **kwds):
base_dict = dict(
action=self.action_type,
path_types=",".join(self.path_types),
match_type=self.match_type
)
base_dict.update(self.file_lister.to_dict())
base_dict.update(self.action_kwds)
base_dict.update(**kwds)
return base_dict
class PrefixPathMapper(BasePathMapper):
match_type = 'prefix'
def __init__(self, config):
super(PrefixPathMapper, self).__init__(config)
self.prefix_path = abspath(config['path'])
def _path_matches(self, path):
return path.startswith(self.prefix_path)
def to_pattern(self):
pattern_str = "(%s%s[^\s,\"\']+)" % (escape(self.prefix_path), escape(sep))
return compile(pattern_str)
def to_dict(self):
return self._extend_base_dict(path=self.prefix_path)
class GlobPathMapper(BasePathMapper):
match_type = 'glob'
def __init__(self, config):
super(GlobPathMapper, self).__init__(config)
self.glob_path = config['path']
def _path_matches(self, path):
return fnmatch.fnmatch(path, self.glob_path)
def to_pattern(self):
return compile(fnmatch.translate(self.glob_path))
def to_dict(self):
return self._extend_base_dict(path=self.glob_path)
class RegexPathMapper(BasePathMapper):
match_type = 'regex'
def __init__(self, config):
super(RegexPathMapper, self).__init__(config)
self.pattern_raw = config['path']
self.pattern = compile(self.pattern_raw)
def _path_matches(self, path):
return self.pattern.match(path) is not None
def to_pattern(self):
return self.pattern
def to_dict(self):
return self._extend_base_dict(path=self.pattern_raw)
MAPPER_CLASSES = [PrefixPathMapper, GlobPathMapper, RegexPathMapper]
MAPPER_CLASS_DICT = dict(map(lambda c: (c.match_type, c), MAPPER_CLASSES))
def mappers_from_dicts(mapper_def_list):
return map(lambda m: __mappper_from_dict(m), mapper_def_list)
def __mappper_from_dict(mapper_dict):
map_type = mapper_dict.get('match_type', DEFAULT_PATH_MAPPER_TYPE)
return MAPPER_CLASS_DICT[map_type](mapper_dict)
class FileLister(object):
def __init__(self, config):
self.depth = int(config.get("depth", "0"))
def to_dict(self):
return dict(
depth=self.depth
)
def unstructured_map(self, path):
depth = self.depth
if self.depth == 0:
return {path: basename(path)}
else:
while depth > 0:
path = dirname(path)
depth -= 1
return dict([(join(path, f), f) for f in directory_files(path)])
DEFAULT_FILE_LISTER = FileLister(dict(depth=0))
ACTION_CLASSES = [
NoneAction,
RewriteAction,
TransferAction,
CopyAction,
RemoteCopyAction,
RemoteTransferAction,
]
actions = dict([(clazz.action_type, clazz) for clazz in ACTION_CLASSES])
__all__ = [
FileActionMapper,
path_type,
from_dict,
MessageAction,
RemoteTransferAction, # For testing
]
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/pulsar/client/action_mapper.py | Python | gpl-3.0 | 19,930 | [
"Galaxy"
] | be02a4de24c0655008cf09abf307faca12d2ef4bb9f873b8778c1ecf1b621134 |
#!/usr/bin/env python3
########################################################################
# Solves problem 4 from projectEuler.net.
# Finds the largest palindrome product of two 3-digt numbers.
# Copyright (C) 2010 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://san-ss.wikidot.com
########################################################################
from CommonFunctions import is_palindrome
if __name__ == '__main__':
result = 0
n1 = 100
while n1 < 1000:
n2 = n1
while n2 < 1000:
if is_palindrome(n1 * n2):
result = max(result, (n1 * n2))
n2 += 1
n1 += 1
print("The result is:", result)
| sanSS/programming-contests | project-euler/problem004.py | Python | gpl-3.0 | 1,398 | [
"VisIt"
] | 7f23203104038bc03bda7eeac1ed3c7e86e88719bbcc7ca5c605d20e270dee0c |
import pycasso
import numpy as np
from sklearn.preprocessing import scale
## Sparse linear regression
## Generate the design matrix and regression coefficient vector
n = 100 # sample number
d = 80 # sample dimension
c = 0.5 # correlation parameter
s = 20 # support size of coefficient
X = scale(np.random.randn(n,d)+c* np.tile(np.random.randn(n),[d,1]).T )/ (n*(n-1))**0.5
beta = np.append(np.random.rand(s), np.zeros(d-s))
## Generate response using Gaussian noise, and fit sparse linear models
noise = np.random.randn(n)
Y = np.matmul(X,beta) + noise
## l1 regularization solved with naive update
solver_l1 = pycasso.Solver(X,Y, lambdas=(100,0.05), family="gaussian")
solver_l1.train()
## mcp regularization
solver_mcp = pycasso.Solver(X,Y, lambdas=(100,0.05), penalty="mcp")
solver_mcp.train()
## scad regularization
solver_scad = pycasso.Solver(X,Y, lambdas=(100,0.05), penalty="scad")
solver_scad.train()
## Obtain the result
result = solver_l1.coef()
## print out training time
print(result['total_train_time'])
## lambdas used
print(solver_l1.lambdas)
## number of nonzero coefficients for each lambda
print(result['df'])
## coefficients and intercept for the i-th lambda
i = 30
print(solver_l1.lambdas[i])
print(result['beta'][i])
print(result['intercept'][i])
## Visualize the solution path
solver_l1.plot()
solver_mcp.plot()
solver_scad.plot()
################################################################
## Sparse logistic regression
## Generate the design matrix and regression coefficient vector
n = 100 # sample number
d = 80 # sample dimension
c = 0.5 # correlation parameter
s = 20 # support size of coefficient
X = scale(np.random.randn(n,d)+c* np.tile(np.random.randn(n),[d,1]).T )/ (n*(n-1))**0.5
beta = np.append(np.random.rand(s), np.zeros(d-s))
## Generate response and fit sparse logistic models
noise = np.random.randn(n)
p = 1/(1+np.exp(-np.matmul(X,beta) - noise))
Y = np.random.binomial(np.ones(n,dtype='int64'),p)
## l1 regularization
solver_l1 = pycasso.Solver(X,Y, lambdas=(100,0.05), family="binomial", penalty="l1")
solver_l1.train()
## mcp regularization
solver_mcp = pycasso.Solver(X,Y, lambdas=(100,0.05), family="binomial", penalty="mcp")
solver_mcp.train()
## scad regularization
solver_scad = pycasso.Solver(X,Y, lambdas=(100,0.05), family="binomial", penalty="scad")
solver_scad.train()
## Obtain the result
result = solver_l1.coef()
## print out training time
print(result['total_train_time'])
## lambdas used
print(solver_l1.lambdas)
## number of nonzero coefficients for each lambda
print(result['df'])
## coefficients and intercept for the i-th lambda
i = 30
print(solver_l1.lambdas[i])
print(result['beta'][i])
print(result['intercept'][i])
## Visualize the solution path
solver_l1.plot()
solver_mcp.plot()
solver_scad.plot()
################################################################
## Sparse poisson regression
## Generate the design matrix and regression coefficient vector
n = 100 # sample number
d = 80 # sample dimension
c = 0.5 # correlation parameter
s = 20 # support size of coefficient
X = scale(np.random.randn(n,d)+c* np.tile(np.random.randn(n),[d,1]).T )/ (n*(n-1))**0.5
beta = np.append(np.random.rand(s), np.zeros(d-s))/(s**0.5)
## Generate response and fit sparse logistic models
noise = np.random.randn(n)
p = np.exp(-np.matmul(X,beta) - noise)
Y = np.random.poisson(p, n)
## l1 regularization
solver_l1 = pycasso.Solver(X,Y, lambdas=(100,0.05), family="poisson", penalty="l1")
solver_l1.train()
## mcp regularization
solver_mcp = pycasso.Solver(X,Y, lambdas=(100,0.05), family="poisson", penalty="mcp")
solver_mcp.train()
## scad regularization
solver_scad = pycasso.Solver(X,Y, lambdas=(100,0.05), family="poisson", penalty="scad")
solver_scad.train()
## Obtain the result
result = solver_l1.coef()
## print out training time
print(result['total_train_time'])
## lambdas used
print(solver_l1.lambdas)
## number of nonzero coefficients for each lambda
print(result['df'])
## coefficients and intercept for the i-th lambda
i = 30
print(solver_l1.lambdas[i])
print(result['beta'][i])
print(result['intercept'][i])
## Visualize the solution path
solver_l1.plot()
solver_mcp.plot()
solver_scad.plot()
| jasonge27/picasso | tutorials/tutorial.py | Python | gpl-3.0 | 4,212 | [
"Gaussian"
] | e8f6f7f034f36a7c7e5d05dc28ad9907a0f7c164debac80508dbe69eb38732b6 |
#!/usr/bin/env python3
"""
Generation of XML for Galaxy from https://bio.tools based on the Tooldog model using
galaxyxml library.
"""
# Import ------------------------------
# General libraries
import os
import copy
import logging
# External libraries
from lxml import etree
import galaxyxml.tool as gxt
import galaxyxml.tool.parameters as gxtp
from galaxyxml.tool.import_xml import GalaxyXmlParser
# Class and Objects
from .edam_to_galaxy import EdamToGalaxy
from tooldog import __version__
# Constant(s) ------------------------------
LOGGER = logging.getLogger(__name__)
PARAM_COMMENT = "This parameter has been automatically generated from" \
" https://bio.tools/tool/%s by ToolDog v" + str(__version__) + "."
FIXME = "FIXME: Please map this parameter to its command line argument."
# Class(es) ------------------------------
class GalaxyToolGen(object):
"""
Class to support generation of XML from :class:`tooldog.biotool_model.Biotool` object.
"""
def __init__(self, biotool, galaxy_url=None, edam_url=None, mapping_json=None,
existing_tool=None):
"""
Initialize a [Tool] object from galaxyxml with the minimal information
(a name, an id, a version, a description, the command, the command version
and a help).
:param biotool: Biotool object of an entry from https://bio.tools.
:type biotool: :class:`tooldog.biotool_model.Biotool`
"""
# Initialize GalaxyInfo
self.etog = EdamToGalaxy(galaxy_url=galaxy_url, edam_url=edam_url,
mapping_json=mapping_json)
# Initialize counters for inputs and outputs from bio.tools
self.input_ct = 0
self.output_ct = 0
self.biotool_id = biotool.tool_id
if existing_tool:
LOGGER.info("Loading existing XML from " + existing_tool)
gxp = GalaxyXmlParser()
self.tool = gxp.import_xml(existing_tool)
# Add a description if missing from description
if self.tool.root.find('description').text is None:
self.tool.root.find('description').text = biotool.description.split('.')[0] + '.'
# Add information about Tooldog version
self.tool.add_comment("This tool descriptor has been annotated by ToolDog v" +
__version__)
# Help if missing or TODO
if self.tool.help is None:
self.tool.help = biotool.generate_galaxy_help()
elif "TODO" in self.tool.help:
LOGGER.info("TODO has been found in help, content has been replaced.")
self.tool.help = biotool.generate_galaxy_help()
else:
LOGGER.info("Creating new GalaxyToolGen object...")
# Initialize tool
# Get the first sentence of the description only
description = biotool.description.split('.')[0] + '.'
self.tool = gxt.Tool(biotool.name, biotool.tool_id, biotool.version,
description, "COMMAND", version_command="COMMAND --version")
self.tool.help = biotool.generate_galaxy_help()
# Add information about Galaxy and EDAM in the XML
self.tool.add_comment("Information was obtained from the Galaxy instance: " +
self.etog.galaxy_url + " v" +
self.etog.galaxy_version + " and EDAM v" +
self.etog.edam_version)
# Add information about Tooldog version
self.tool.add_comment("This tool descriptor has been generated by ToolDog v" +
__version__)
def add_edam_topic(self, topic):
"""
Add the EDAM topic to the tool (XML: <edam_topics>).
:param topic: Topic object.
:type topic: :class:`tooldog.biotool_model.Topic`
"""
LOGGER.debug("Adding EDAM topic " + topic.get_edam_id() + " to GalaxyToolGen object.")
if not hasattr(self.tool, 'edam_topics'):
# First time we add topics to the tool
self.tool.edam_topics = gxtp.EdamTopics()
if not self.tool.edam_topics.has_topic(topic.get_edam_id()):
self.tool.edam_topics.append(gxtp.EdamTopic(topic.get_edam_id()))
def add_edam_operation(self, operation):
"""
Add the EDAM operation to the tool (XML: <edam_operations>).
:param topic: Operation object.
:type topic: :class:`tooldog.biotool_model.Operation`
"""
LOGGER.debug("Adding EDAM operation " + operation.get_edam_id() +
" to GalaxyToolGen object.")
if not hasattr(self.tool, 'edam_operations'):
# First time we add operations to the tool
self.tool.edam_operations = gxtp.EdamOperations()
if not self.tool.edam_operations.has_operation(operation.get_edam_id()):
self.tool.edam_operations.append(gxtp.EdamOperation(operation.get_edam_id()))
def add_input_file(self, input_obj):
"""
Add an input to the tool (XML: <inputs>).
:param input_obj: Input object.
:type input_obj: :class:`tooldog.biotool_model.Input`
"""
LOGGER.debug("Adding input to GalaxyToolGen object...")
if not hasattr(self.tool, 'inputs'):
self.tool.inputs = gxtp.Inputs()
# Build parameter
self.input_ct += 1
data_uri = input_obj.data_type.get_edam_id()
# Give unique name to the input
name = 'INPUT' + str(self.input_ct)
# Get all different format for this input
list_formats = []
if not input_obj.formats:
list_formats.append(self.etog.get_datatype(edam_data=data_uri))
else:
for format_obj in input_obj.formats:
format_uri = format_obj.get_edam_id()
list_formats.append(self.etog.get_datatype(edam_data=data_uri,
edam_format=format_uri))
formats = ', '.join(list_formats)
# Create the parameter
param = gxtp.DataParam(name, label=input_obj.data_type.term,
help=input_obj.description, format=formats)
# Override the corresponding arguments in the command line
param.command_line_override = '--' + name + ' $' + name
# Write comment about this param
param.node.insert(0, etree.Comment(FIXME))
param.node.insert(0, etree.Comment(PARAM_COMMENT % (self.biotool_id)))
# Appends parameter to inputs
self.tool.inputs.append(param)
def add_output_file(self, output):
"""
Add an output to the tool (XML: <outputs>).
:param output: Output object.
:type output: :class:`tooldog.biotool_model.Output`
"""
LOGGER.debug("Adding output to GalaxyToolGen object...")
if not hasattr(self.tool, 'outputs'):
self.tool.outputs = gxtp.Outputs()
# Build parameter
self.output_ct += 1
data_uri = output.data_type.get_edam_id()
# Give unique name to the output
name = 'OUTPUT' + str(self.output_ct)
# Get all different format for this output
list_formats = []
if not output.formats:
list_formats.append(self.etog.get_datatype(edam_data=data_uri))
else:
for format_obj in output.formats:
format_uri = format_obj.get_edam_id()
list_formats.append(self.etog.get_datatype(edam_data=data_uri,
edam_format=format_uri))
formats = ', '.join(list_formats)
# Create the parameter
param = gxtp.OutputData(name, format=formats, from_work_dir=name +
"." + formats.replace('.', '/'))
param.command_line_override = ''
# Write comment about this param
param.node.insert(0, etree.Comment(FIXME))
param.node.insert(0, etree.Comment(PARAM_COMMENT % (self.biotool_id)))
self.tool.outputs.append(param)
def add_citation(self, publication):
"""
Add publication(s) to the tool (XML: <citations>).
:param publication: Publication object.
:type publication: :class:`tooldog.biotool_model.Publication`
"""
LOGGER.debug("Adding citation to GalaxyToolGen object...")
if not hasattr(self.tool, 'citations'):
self.tool.citations = gxtp.Citations()
# Add citation depending the type (doi, pmid...)
if publication.doi is not None:
if not self.tool.citations.has_citation('doi', publication.doi):
self.tool.citations.append(gxtp.Citation('doi', publication.doi))
# <citation> only supports doi and bibtex as a type
elif publication.pmid is not None:
# self.tool.citations.append(gxtp.Citation('pmid', publication.pmid))
LOGGER.warn('pmid is not supported by <citation>, citation skipped')
elif publication.pmcid is not None:
# self.tool.citations.append(gxtp.Citation('pmcid', publication.pmcid))
LOGGER.warn('pmcid is not supported by <citation>, citation skipped')
def write_xml(self, out_file=None, index=None, keep_old_command=False):
"""
Write CWL to STDOUT or out_file(s).
:param out_file: path to output file.
:type out_file: STRING
:param index: Index in case more than one function is described.
:type index: INT
"""
# Copy informations to avoid expension of xml in case we write several XMLs
export_tool = copy.deepcopy(self.tool)
# Give XML on STDout
if out_file is None:
if index is not None:
print('########## XML number ' + str(index) + ' ##########')
LOGGER.info("Writing XML file to STDOUT")
print(export_tool.export(keep_old_command).decode('utf-8'))
else:
# Format name for output file(s)
if index is not None:
out_file = os.path.splitext(out_file)[0] + str(index) + '.xml'
else:
out_file = os.path.splitext(out_file)[0] + '.xml'
LOGGER.info("Writing XML file to " + out_file)
with open(out_file, 'w') as file_w:
file_w.write(export_tool.export(keep_old_command).decode('utf-8'))
| khillion/ToolDog | tooldog/annotate/galaxy.py | Python | mit | 10,519 | [
"Galaxy"
] | 80082aebfde478e3e2960265af14b7777641bc8eac41100adcf9883187120f86 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Heyo
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_nssm
version_added: "2.0"
short_description: NSSM - the Non-Sucking Service Manager
description:
- nssm is a service helper which doesn't suck. See U(https://nssm.cc/) for more information.
requirements:
- "nssm >= 2.24.0 # (install via M(win_chocolatey)) C(win_chocolatey: name=nssm)"
options:
name:
description:
- Name of the service to operate on.
type: str
required: true
state:
description:
- State of the service on the system.
- Note that NSSM actions like "pause", "continue", "rotate" do not fit the declarative style of ansible, so these should be implemented via the
ansible command module.
type: str
choices: [ absent, present, started, stopped, restarted ]
default: started
application:
description:
- The application binary to run as a service
- "Specify this whenever the service may need to be installed (state: present, started, stopped, restarted)"
- "Note that the application name must look like the following, if the directory includes spaces:"
- 'nssm install service "C:\\Program Files\\app.exe\\" "C:\\Path with spaces\\"'
- >
See commit 0b386fc1984ab74ee59b7bed14b7e8f57212c22b in the nssm.git project for more info:
U(https://git.nssm.cc/?p=nssm.git;a=commit;h=0b386fc1984ab74ee59b7bed14b7e8f57212c22b)
stdout_file:
description:
- Path to receive output.
type: str
stderr_file:
description:
- Path to receive error output.
type: str
app_parameters:
description:
- A string representing a dictionary of parameters to be passed to the application when it starts.
- Use either this or C(app_parameters_free_form), not both.
type: str
app_parameters_free_form:
description:
- Single string of parameters to be passed to the service.
- Use either this or C(app_parameters), not both.
type: str
version_added: "2.3"
dependencies:
description:
- Service dependencies that has to be started to trigger startup, separated by comma.
type: list
user:
description:
- User to be used for service startup.
type: str
password:
description:
- Password to be used for service startup.
type: str
start_mode:
description:
- If C(auto) is selected, the service will start at bootup.
- C(delayed) causes a delayed but automatic start after boot (added in version 2.5).
- C(manual) means that the service will start only when another service needs it.
- C(disabled) means that the service will stay off, regardless if it is needed or not.
type: str
choices: [ auto, delayed, disabled, manual ]
default: auto
seealso:
- module: win_service
author:
- Adam Keech (@smadam813)
- George Frank (@georgefrank)
- Hans-Joachim Kliemeck (@h0nIg)
- Michael Wild (@themiwi)
'''
EXAMPLES = r'''
# Install and start the foo service
- win_nssm:
name: foo
application: C:\windows\foo.exe
# Install and start the foo service with a key-value pair argument
# This will yield the following command: C:\windows\foo.exe -bar true
- win_nssm:
name: foo
application: C:\windows\foo.exe
app_parameters: -bar=true
# Install and start the foo service with a single parameter
# This will yield the following command: C:\windows\\foo.exe bar
- win_nssm:
name: foo
application: C:\windows\foo.exe
app_parameters: _=bar
# Install and start the foo service with a mix of single params, and key value pairs
# This will yield the following command: C:\windows\\foo.exe bar -file output.bat -foo false
- win_nssm:
name: foo
application: C:\windows\foo.exe
app_parameters: _=bar; -file=output.bat; -foo=false
# Use the single line parameters option to specify an arbitrary string of parameters
# for the service executable
- name: Make sure the Consul service runs
win_nssm:
name: consul
application: C:\consul\consul.exe
app_parameters_free_form: agent -config-dir=C:\consul\config
stdout_file: C:\consul\log.txt
stderr_file: C:\consul\error.txt
# Install and start the foo service, redirecting stdout and stderr to the same file
- win_nssm:
name: foo
application: C:\windows\foo.exe
stdout_file: C:\windows\foo.log
stderr_file: C:\windows\foo.log
# Install and start the foo service, but wait for dependencies tcpip and adf
- win_nssm:
name: foo
application: C:\windows\foo.exe
dependencies: 'adf,tcpip'
# Install and start the foo service with dedicated user
- win_nssm:
name: foo
application: C:\windows\foo.exe
user: foouser
password: secret
# Install the foo service but do not start it automatically
- win_nssm:
name: foo
application: C:\windows\foo.exe
state: present
start_mode: manual
# Remove the foo service
- win_nssm:
name: foo
state: absent
'''
| Jorge-Rodriguez/ansible | lib/ansible/modules/windows/win_nssm.py | Python | gpl-3.0 | 5,307 | [
"ADF"
] | 7e244983d23b4c32c5fe892f161f7192a3eee76fab28b0a481acb67e8b765103 |
import os
from glutton.utils import get_log, tmpfasta, tmpfasta_orfs, tmpfile, rm_f, threadsafe_io, fasta_stats
from glutton.prank import Prank
from glutton.pagan import Pagan
from glutton.blast import Blastx, Tblastx
from abc import abstractmethod
from os.path import basename, isfile, join
from sys import exit
import time
DEBUG = False
class JobError(Exception) :
pass
class Job(object) :
QUEUED,RUNNING,SUCCESS,FAIL,TERMINATED,INTERNAL_ERROR,NOTHING_TO_DO = range(7)
states = {
QUEUED : 'QUEUED',
RUNNING : 'RUNNING',
SUCCESS : 'SUCCESS',
FAIL : 'FAIL',
TERMINATED : 'TERMINATED',
INTERNAL_ERROR : 'INTERNAL_ERROR',
NOTHING_TO_DO : 'NOTHING_TO_DO'
}
def __init__(self, callback) :
self.state = Job.QUEUED
self.log = get_log()
self.callback = callback
def success(self) :
if self.state not in (Job.SUCCESS, Job.FAIL, Job.TERMINATED, Job.INTERNAL_ERROR) :
raise JobError('job has not been run')
return self.state == Job.SUCCESS
def fail(self) :
return not self.success()
def terminated(self) :
if self.state not in (Job.SUCCESS, Job.FAIL, Job.TERMINATED, Job.INTERNAL_ERROR) :
raise JobError('job has not been run')
return self.state == Job.TERMINATED
def start(self) :
self.state = Job.RUNNING
def end(self, s) :
assert s in (Job.SUCCESS, Job.FAIL, Job.TERMINATED, Job.INTERNAL_ERROR), "status should be success, fail or terminated"
self.state = s
def run(self) :
self.start()
ret = self._run()
#try :
# ret = self._run()
#except Exception, e :
# self.log.error(str(e))
# self.end(Job.INTERNAL_ERROR)
# self.cleanup()
# return
if ret == 0 :
self.end(Job.SUCCESS)
elif ret == -2 : # SIGINT = 130
self.end(Job.TERMINATED)
else :
self.end(Job.FAIL)
if not self.terminated() :
self.callback(self)
self.cleanup()
def state_str(self) :
return Job.states[self.state]
# delete only the files the program created
# the responsibility to delete the input files is for the caller
# - this is no longer true because the callers are passing data not filenames
def cleanup(self) :
for f in self._get_filenames() :
if f and isfile(f) :
self.log.debug("deleting %s" % f)
rm_f(f)
@abstractmethod
def _run(self) :
pass
@abstractmethod
def _get_filenames(self) :
pass
def __str__(self) :
return type(self).__name__ #"%s, %s" % (type(self).__name__, self.fname)
class PrankJob(Job) :
def __init__(self, callback, sequences) :
super(PrankJob, self).__init__(callback)
self.sequences = sequences
self.prank = Prank()
@property
def input(self) :
return self.sequences
@property
def tree(self) :
return self.prank.tree
@property
def alignment(self) :
return self.prank.alignment
def _get_filenames(self) :
return [self.infile] + self.prank.output_filenames(self.infile)
def _run(self) :
global DEBUG
self.infile = tmpfasta(self.sequences)
start_time = time.time()
result = self.prank.run(self.infile, self.infile)
elapsed_time = time.time() - start_time
q_count, q_sum, q_min, q_max, q_mean, q_sd = fasta_stats(self.infile)
if DEBUG :
threadsafe_io('prank_stats.txt', "%d %d %d %d %d %.3f %.3f %d" % \
(result, \
q_count, q_sum, q_min, q_max, q_mean, q_sd, \
elapsed_time))
return result
class BlastJob(Job) :
def __init__(self, callback, database, queries, blast_version='blastx') :
super(BlastJob, self).__init__(callback)
self.database = database
self.queries = queries
assert blast_version in ('blastx', 'tblastx')
self.blastx = Tblastx() if blast_version == 'tblastx' else Blastx()
@property
def input(self) :
return self.queries
@property
def results(self) :
return self.blastx.results
def _get_filenames(self) :
return [self.query_fname, self.out_fname]
def _run(self) :
global DEBUG
self.query_fname = tmpfasta(self.queries)
self.out_fname = tmpfile()
result = self.blastx.run(self.query_fname, self.database, self.out_fname)
q = dict([ (q.id, len(q)) for q in self.input ])
if DEBUG :
for br in self.results :
threadsafe_io('blastx_stats.txt', "%s %s %.3f %d %d %d %d %d %.3e %d %.3f" % \
(br.qseqid,
br.sseqid,
br.pident,
br.length,
br.qstart,
br.qend,
br.sstart,
br.send,
br.evalue,
q[br.qseqid],
((br.pident / 100.0) * (max(br.qstart, br.qend) - min(br.qstart, br.qend))) / float(q[br.qseqid])))
return result
class PaganJob(Job) :
def __init__(self, callback, queries, genefamily_id, alignment, tree, identity, overlap) :
super(PaganJob, self).__init__(callback)
self._queries = queries
self._genefamily = genefamily_id
self._alignment = alignment
self._tree = tree
self.identity = identity
self.overlap = overlap
self.pagan = Pagan()
self.query_fname = None
self.out_fname = None
self.alignment_fname = None
self.tree_fname = None
@property
def input(self) :
return self._queries
@property
def genefamily(self) :
return self._genefamily
@property
def nucleotide_alignment(self) :
return self.pagan.nucleotide_alignment
@property
def protein_alignment(self) :
return self.pagan.protein_alignment
def _get_filenames(self) :
#return self.pagan.output_filenames(self.out_fname)
return [self.query_fname, self.alignment_fname, self.tree_fname] + self.pagan.output_filenames(self.out_fname)
def _run(self) :
global DEBUG
self.query_fname = tmpfasta_orfs(self._queries, strand=True)
#self.query_fname = tmpfasta(self._queries)
self.out_fname = tmpfile()
self.alignment_fname = tmpfasta(self._alignment) # tmpfasta_kill_n(self._alignment)
self.tree_fname = tmpfile(self._tree) if self._tree else None
start_time = time.time()
result = self.pagan.run(self.query_fname,
self.out_fname,
self.alignment_fname,
self.tree_fname,
self.identity,
self.overlap)
elapsed_time = time.time() - start_time
q_count, q_sum, q_min, q_max, q_mean, q_sd = fasta_stats(self.query_fname)
a_count, a_sum, a_min, a_max, a_mean, a_sd = fasta_stats(self.alignment_fname)
if DEBUG :
threadsafe_io('pagan_stats.txt', "%s %d %d %d %d %d %.3f %.3f %d %d %d %d %.3f %.3f %d" % \
(self._genefamily, result, \
q_count, q_sum, q_min, q_max, q_mean, q_sd, \
a_count, a_sum, a_min, a_max, a_mean, a_sd, \
elapsed_time))
return result
| ajm/glutton | glutton/job.py | Python | gpl-3.0 | 8,540 | [
"BLAST"
] | 7f9b4f8094b218f1d5e9e0b90b10e63568acb072a7c21ff08086de539e214714 |
"""Collection of function implementations.
Functions are either implemented as :class:`~chainer.Function`\\ s or
:class:`~chainer.FunctionNode`\\ s.
"""
from chainer.functions.activation.clipped_relu import clipped_relu # NOQA
from chainer.functions.activation.clipped_relu import ClippedReLU # NOQA
from chainer.functions.activation.crelu import crelu # NOQA
from chainer.functions.activation.crelu import CReLU # NOQA
from chainer.functions.activation.elu import elu # NOQA
from chainer.functions.activation.elu import ELU # NOQA
from chainer.functions.activation.hard_sigmoid import hard_sigmoid # NOQA
from chainer.functions.activation.hard_sigmoid import HardSigmoid # NOQA
from chainer.functions.activation.leaky_relu import leaky_relu # NOQA
from chainer.functions.activation.leaky_relu import LeakyReLU # NOQA
from chainer.functions.activation.log_softmax import log_softmax # NOQA
from chainer.functions.activation.log_softmax import LogSoftmax # NOQA
from chainer.functions.activation.lstm import lstm # NOQA
from chainer.functions.activation.lstm import LSTM # NOQA
from chainer.functions.activation.maxout import maxout # NOQA
from chainer.functions.activation.prelu import prelu # NOQA
from chainer.functions.activation.relu import relu # NOQA
from chainer.functions.activation.relu import ReLU # NOQA
from chainer.functions.activation.selu import selu # NOQA
from chainer.functions.activation.sigmoid import sigmoid # NOQA
from chainer.functions.activation.sigmoid import Sigmoid # NOQA
from chainer.functions.activation.slstm import slstm # NOQA
from chainer.functions.activation.slstm import SLSTM # NOQA
from chainer.functions.activation.softmax import softmax # NOQA
from chainer.functions.activation.softmax import Softmax # NOQA
from chainer.functions.activation.softplus import softplus # NOQA
from chainer.functions.activation.softplus import Softplus # NOQA
from chainer.functions.activation.swish import swish # NOQA
from chainer.functions.activation.tanh import tanh # NOQA
from chainer.functions.activation.tanh import Tanh # NOQA
from chainer.functions.activation.tree_lstm import tree_lstm # NOQA
from chainer.functions.array.broadcast import broadcast # NOQA
from chainer.functions.array.broadcast import Broadcast # NOQA
from chainer.functions.array.broadcast import broadcast_to # NOQA
from chainer.functions.array.broadcast import BroadcastTo # NOQA
from chainer.functions.array.cast import cast # NOQA
from chainer.functions.array.cast import Cast # NOQA
from chainer.functions.array.concat import concat # NOQA
from chainer.functions.array.concat import Concat # NOQA
from chainer.functions.array.copy import copy # NOQA
from chainer.functions.array.copy import Copy # NOQA
from chainer.functions.array.depth2space import depth2space # NOQA
from chainer.functions.array.depth2space import Depth2Space # NOQA
from chainer.functions.array.diagonal import diagonal # NOQA
from chainer.functions.array.dstack import dstack # NOQA
from chainer.functions.array.expand_dims import expand_dims # NOQA
from chainer.functions.array.expand_dims import ExpandDims # NOQA
from chainer.functions.array.flatten import flatten # NOQA
from chainer.functions.array.flip import flip # NOQA
from chainer.functions.array.flip import Flip # NOQA
from chainer.functions.array.fliplr import fliplr # NOQA
from chainer.functions.array.fliplr import FlipLR # NOQA
from chainer.functions.array.flipud import flipud # NOQA
from chainer.functions.array.flipud import FlipUD # NOQA
from chainer.functions.array.get_item import get_item # NOQA
from chainer.functions.array.get_item import GetItem # NOQA
from chainer.functions.array.hstack import hstack # NOQA
from chainer.functions.array.im2col import im2col # NOQA
from chainer.functions.array.im2col import Im2Col # NOQA
from chainer.functions.array.moveaxis import moveaxis # NOQA
from chainer.functions.array.pad import pad # NOQA
from chainer.functions.array.pad import Pad # NOQA
from chainer.functions.array.pad_sequence import pad_sequence # NOQA
from chainer.functions.array.pad_sequence import PadSequence # NOQA
from chainer.functions.array.permutate import permutate # NOQA
from chainer.functions.array.permutate import Permutate # NOQA
from chainer.functions.array.repeat import repeat # NOQA
from chainer.functions.array.reshape import reshape # NOQA
from chainer.functions.array.reshape import Reshape # NOQA
from chainer.functions.array.resize_images import resize_images # NOQA
from chainer.functions.array.resize_images import ResizeImages # NOQA
from chainer.functions.array.rollaxis import rollaxis # NOQA
from chainer.functions.array.rollaxis import Rollaxis # NOQA
from chainer.functions.array.scatter_add import scatter_add # NOQA
from chainer.functions.array.select_item import select_item # NOQA
from chainer.functions.array.select_item import SelectItem # NOQA
from chainer.functions.array.separate import separate # NOQA
from chainer.functions.array.space2depth import space2depth # NOQA
from chainer.functions.array.space2depth import Space2Depth # NOQA
from chainer.functions.array.spatial_transformer_grid import spatial_transformer_grid # NOQA
from chainer.functions.array.spatial_transformer_grid import SpatialTransformerGrid # NOQA
from chainer.functions.array.spatial_transformer_sampler import spatial_transformer_sampler # NOQA
from chainer.functions.array.spatial_transformer_sampler import SpatialTransformerSampler # NOQA
from chainer.functions.array.split_axis import split_axis # NOQA
from chainer.functions.array.split_axis import SplitAxis # NOQA
from chainer.functions.array.squeeze import squeeze # NOQA
from chainer.functions.array.squeeze import Squeeze # NOQA
from chainer.functions.array.stack import stack # NOQA
from chainer.functions.array.swapaxes import swapaxes # NOQA
from chainer.functions.array.swapaxes import Swapaxes # NOQA
from chainer.functions.array.tile import tile # NOQA
from chainer.functions.array.tile import Tile # NOQA
from chainer.functions.array.transpose import transpose # NOQA
from chainer.functions.array.transpose import Transpose # NOQA
from chainer.functions.array.transpose_sequence import transpose_sequence # NOQA
from chainer.functions.array.transpose_sequence import TransposeSequence # NOQA
from chainer.functions.array.vstack import vstack # NOQA
from chainer.functions.array.where import where # NOQA
from chainer.functions.array.where import Where # NOQA
from chainer.functions.connection.bilinear import bilinear # NOQA
from chainer.functions.connection.convolution_2d import convolution_2d # NOQA
from chainer.functions.connection.convolution_nd import convolution_nd # NOQA
from chainer.functions.connection.deconvolution_2d import deconvolution_2d # NOQA
from chainer.functions.connection.deconvolution_nd import deconvolution_nd # NOQA
from chainer.functions.connection.deformable_convolution_2d_sampler import deformable_convolution_2d_sampler # NOQA
from chainer.functions.connection.depthwise_convolution_2d import depthwise_convolution_2d # NOQA
from chainer.functions.connection.dilated_convolution_2d import dilated_convolution_2d # NOQA
from chainer.functions.connection.embed_id import embed_id # NOQA
from chainer.functions.connection.linear import linear # NOQA
from chainer.functions.connection.local_convolution_2d import local_convolution_2d # NOQA
from chainer.functions.connection.n_step_gru import n_step_bigru # NOQA
from chainer.functions.connection.n_step_gru import n_step_gru # NOQA
from chainer.functions.connection.n_step_gru import NStepBiGRU # NOQA
from chainer.functions.connection.n_step_gru import NStepGRU # NOQA
from chainer.functions.connection.n_step_lstm import n_step_bilstm # NOQA
from chainer.functions.connection.n_step_lstm import n_step_lstm # NOQA
from chainer.functions.connection.n_step_lstm import NStepBiLSTM # NOQA
from chainer.functions.connection.n_step_lstm import NStepLSTM # NOQA
from chainer.functions.connection.n_step_rnn import n_step_birnn # NOQA
from chainer.functions.connection.n_step_rnn import n_step_rnn # NOQA
from chainer.functions.connection.n_step_rnn import NStepBiRNNReLU # NOQA
from chainer.functions.connection.n_step_rnn import NStepBiRNNTanh # NOQA
from chainer.functions.connection.n_step_rnn import NStepRNNReLU # NOQA
from chainer.functions.connection.n_step_rnn import NStepRNNTanh # NOQA
from chainer.functions.connection.shift import shift # NOQA
from chainer.functions.evaluation.accuracy import accuracy # NOQA
from chainer.functions.evaluation.accuracy import Accuracy # NOQA
from chainer.functions.evaluation.binary_accuracy import binary_accuracy # NOQA
from chainer.functions.evaluation.binary_accuracy import BinaryAccuracy # NOQA
from chainer.functions.evaluation.classification_summary import classification_summary # NOQA
from chainer.functions.evaluation.classification_summary import ClassificationSummary # NOQA
from chainer.functions.evaluation.classification_summary import f1_score # NOQA
from chainer.functions.evaluation.classification_summary import precision # NOQA
from chainer.functions.evaluation.classification_summary import recall # NOQA
from chainer.functions.evaluation.r2_score import r2_score # NOQA
from chainer.functions.loss.absolute_error import absolute_error # NOQA
from chainer.functions.loss.absolute_error import AbsoluteError # NOQA
from chainer.functions.loss.black_out import black_out # NOQA
from chainer.functions.loss.contrastive import contrastive # NOQA
from chainer.functions.loss.contrastive import Contrastive # NOQA
from chainer.functions.loss.crf1d import argmax_crf1d # NOQA
from chainer.functions.loss.crf1d import crf1d # NOQA
from chainer.functions.loss.cross_covariance import cross_covariance # NOQA
from chainer.functions.loss.cross_covariance import CrossCovariance # NOQA
from chainer.functions.loss.ctc import connectionist_temporal_classification # NOQA
from chainer.functions.loss.ctc import ConnectionistTemporalClassification # NOQA
from chainer.functions.loss.decov import decov # NOQA
from chainer.functions.loss.decov import DeCov # NOQA
from chainer.functions.loss.hinge import hinge # NOQA
from chainer.functions.loss.hinge import Hinge # NOQA
from chainer.functions.loss.huber_loss import huber_loss # NOQA
from chainer.functions.loss.huber_loss import HuberLoss # NOQA
from chainer.functions.loss.mean_absolute_error import mean_absolute_error # NOQA
from chainer.functions.loss.mean_absolute_error import MeanAbsoluteError # NOQA
from chainer.functions.loss.mean_squared_error import mean_squared_error # NOQA
from chainer.functions.loss.mean_squared_error import MeanSquaredError # NOQA
from chainer.functions.loss.negative_sampling import negative_sampling # NOQA
from chainer.functions.loss.sigmoid_cross_entropy import sigmoid_cross_entropy # NOQA
from chainer.functions.loss.sigmoid_cross_entropy import SigmoidCrossEntropy # NOQA
from chainer.functions.loss.softmax_cross_entropy import softmax_cross_entropy # NOQA
from chainer.functions.loss.softmax_cross_entropy import SoftmaxCrossEntropy # NOQA
from chainer.functions.loss.squared_error import squared_error # NOQA
from chainer.functions.loss.squared_error import SquaredError # NOQA
from chainer.functions.loss.triplet import triplet # NOQA
from chainer.functions.loss.triplet import Triplet # NOQA
from chainer.functions.loss.vae import bernoulli_nll # NOQA
from chainer.functions.loss.vae import gaussian_kl_divergence # NOQA
from chainer.functions.loss.vae import gaussian_nll # NOQA
from chainer.functions.math.average import average # NOQA
from chainer.functions.math.basic_math import absolute # NOQA
from chainer.functions.math.basic_math import add # NOQA
from chainer.functions.math.batch_l2_norm_squared import batch_l2_norm_squared # NOQA
from chainer.functions.math.batch_l2_norm_squared import BatchL2NormSquared # NOQA
from chainer.functions.math.bias import bias # NOQA
from chainer.functions.math.ceil import ceil # NOQA
from chainer.functions.math.clip import clip # NOQA
from chainer.functions.math.clip import Clip # NOQA
from chainer.functions.math.cumsum import cumsum # NOQA
from chainer.functions.math.cumsum import Cumsum # NOQA
from chainer.functions.math.det import batch_det # NOQA
from chainer.functions.math.det import BatchDet # NOQA
from chainer.functions.math.det import det # NOQA
from chainer.functions.math.erf import erf # NOQA
from chainer.functions.math.erfc import erfc # NOQA
from chainer.functions.math.erfinv import erfinv # NOQA
from chainer.functions.math.exponential import exp # NOQA
from chainer.functions.math.exponential import Exp # NOQA
from chainer.functions.math.exponential import log # NOQA
from chainer.functions.math.exponential import Log # NOQA
from chainer.functions.math.exponential import log10 # NOQA
from chainer.functions.math.exponential import Log10 # NOQA
from chainer.functions.math.exponential import log2 # NOQA
from chainer.functions.math.exponential import Log2 # NOQA
from chainer.functions.math.exponential_m1 import expm1 # NOQA
from chainer.functions.math.exponential_m1 import Expm1 # NOQA
from chainer.functions.math.fft import fft # NOQA
from chainer.functions.math.fft import ifft # NOQA
from chainer.functions.math.fix import fix # NOQA
from chainer.functions.math.floor import floor # NOQA
from chainer.functions.math.fmod import fmod # NOQA
from chainer.functions.math.fmod import Fmod # NOQA
from chainer.functions.math.hyperbolic import cosh # NOQA
from chainer.functions.math.hyperbolic import Cosh # NOQA
from chainer.functions.math.hyperbolic import sinh # NOQA
from chainer.functions.math.hyperbolic import Sinh # NOQA
from chainer.functions.math.identity import identity # NOQA
from chainer.functions.math.identity import Identity # NOQA
from chainer.functions.math.inv import batch_inv # NOQA
from chainer.functions.math.inv import BatchInv # NOQA
from chainer.functions.math.inv import inv # NOQA
from chainer.functions.math.inv import Inv # NOQA
from chainer.functions.math.linear_interpolate import linear_interpolate # NOQA
from chainer.functions.math.linear_interpolate import LinearInterpolate # NOQA
from chainer.functions.math.logarithm_1p import Log1p # NOQA
from chainer.functions.math.logarithm_1p import log1p # NOQA
from chainer.functions.math.logsumexp import logsumexp # NOQA
from chainer.functions.math.logsumexp import LogSumExp # NOQA
from chainer.functions.math.matmul import batch_matmul # NOQA
from chainer.functions.math.matmul import matmul # NOQA
from chainer.functions.math.matmul import MatMul # NOQA
from chainer.functions.math.maximum import maximum # NOQA
from chainer.functions.math.maximum import Maximum # NOQA
from chainer.functions.math.minimum import minimum # NOQA
from chainer.functions.math.minimum import Minimum # NOQA
from chainer.functions.math.minmax import argmax # NOQA
from chainer.functions.math.minmax import ArgMax # NOQA
from chainer.functions.math.minmax import argmin # NOQA
from chainer.functions.math.minmax import ArgMin # NOQA
from chainer.functions.math.minmax import max # NOQA
from chainer.functions.math.minmax import Max # NOQA
from chainer.functions.math.minmax import min # NOQA
from chainer.functions.math.minmax import Min # NOQA
from chainer.functions.math.prod import prod # NOQA
from chainer.functions.math.prod import Prod # NOQA
from chainer.functions.math.scale import scale # NOQA
from chainer.functions.math.sign import sign # NOQA
from chainer.functions.math.sparse_matmul import sparse_matmul # NOQA
from chainer.functions.math.sqrt import rsqrt # NOQA
from chainer.functions.math.sqrt import sqrt # NOQA
from chainer.functions.math.sqrt import Sqrt # NOQA
from chainer.functions.math.square import square # NOQA
from chainer.functions.math.square import Square # NOQA
from chainer.functions.math.squared_difference import squared_difference # NOQA
from chainer.functions.math.squared_difference import SquaredDifference # NOQA
from chainer.functions.math.sum import sum # NOQA
from chainer.functions.math.sum import Sum # NOQA
from chainer.functions.math.sum import sum_to # NOQA
from chainer.functions.math.tensordot import tensordot # NOQA
from chainer.functions.math.trigonometric import arccos # NOQA
from chainer.functions.math.trigonometric import Arccos # NOQA
from chainer.functions.math.trigonometric import arcsin # NOQA
from chainer.functions.math.trigonometric import Arcsin # NOQA
from chainer.functions.math.trigonometric import arctan # NOQA
from chainer.functions.math.trigonometric import Arctan # NOQA
from chainer.functions.math.trigonometric import arctan2 # NOQA
from chainer.functions.math.trigonometric import Arctan2 # NOQA
from chainer.functions.math.trigonometric import cos # NOQA
from chainer.functions.math.trigonometric import Cos # NOQA
from chainer.functions.math.trigonometric import sin # NOQA
from chainer.functions.math.trigonometric import Sin # NOQA
from chainer.functions.math.trigonometric import tan # NOQA
from chainer.functions.math.trigonometric import Tan # NOQA
from chainer.functions.noise.dropout import dropout # NOQA
from chainer.functions.noise.dropout import Dropout # NOQA
from chainer.functions.noise.gaussian import gaussian # NOQA
from chainer.functions.noise.gaussian import Gaussian # NOQA
from chainer.functions.noise.gumbel_softmax import gumbel_softmax # NOQA
from chainer.functions.noise.simplified_dropconnect import simplified_dropconnect # NOQA
from chainer.functions.noise.simplified_dropconnect import SimplifiedDropconnect # NOQA
from chainer.functions.noise.zoneout import zoneout # NOQA
from chainer.functions.noise.zoneout import Zoneout # NOQA
from chainer.functions.normalization.batch_normalization import batch_normalization # NOQA
from chainer.functions.normalization.batch_normalization import fixed_batch_normalization # NOQA
from chainer.functions.normalization.batch_renormalization import batch_renormalization # NOQA
from chainer.functions.normalization.batch_renormalization import fixed_batch_renormalization # NOQA
from chainer.functions.normalization.group_normalization import group_normalization # NOQA
from chainer.functions.normalization.l2_normalization import normalize # NOQA
from chainer.functions.normalization.l2_normalization import NormalizeL2 # NOQA
from chainer.functions.normalization.layer_normalization import layer_normalization # NOQA
from chainer.functions.normalization.layer_normalization import LayerNormalization # NOQA
from chainer.functions.normalization.local_response_normalization import local_response_normalization # NOQA
from chainer.functions.normalization.local_response_normalization import LocalResponseNormalization # NOQA
from chainer.functions.pooling.average_pooling_2d import average_pooling_2d # NOQA
from chainer.functions.pooling.average_pooling_2d import AveragePooling2D # NOQA
from chainer.functions.pooling.average_pooling_nd import average_pooling_nd # NOQA
from chainer.functions.pooling.average_pooling_nd import AveragePoolingND # NOQA
from chainer.functions.pooling.max_pooling_2d import max_pooling_2d # NOQA
from chainer.functions.pooling.max_pooling_2d import MaxPooling2D # NOQA
from chainer.functions.pooling.max_pooling_nd import max_pooling_nd # NOQA
from chainer.functions.pooling.max_pooling_nd import MaxPoolingND # NOQA
from chainer.functions.pooling.roi_pooling_2d import roi_pooling_2d # NOQA
from chainer.functions.pooling.roi_pooling_2d import ROIPooling2D # NOQA
from chainer.functions.pooling.spatial_pyramid_pooling_2d import spatial_pyramid_pooling_2d # NOQA
from chainer.functions.pooling.unpooling_2d import Unpooling2D # NOQA
from chainer.functions.pooling.unpooling_2d import unpooling_2d # NOQA
from chainer.functions.pooling.unpooling_nd import unpooling_nd # NOQA
from chainer.functions.pooling.unpooling_nd import UnpoolingND # NOQA
from chainer.functions.pooling.upsampling_2d import Upsampling2D # NOQA
from chainer.functions.pooling.upsampling_2d import upsampling_2d # NOQA
from chainer.functions.theano.theano_function import TheanoFunction # NOQA
from chainer.functions.util.forget import forget # NOQA
from chainer.functions.util.forget import Forget # NOQA
# Aliases
from chainer.functions.math.average import average as mean # NOQA
| anaruse/chainer | chainer/functions/__init__.py | Python | mit | 20,455 | [
"Gaussian"
] | b8ffa4bc770c4d485f5dda57551aeec4d35ac1aef561fc9f85393e184d7fcf0a |
"""Model definition for base class for Linear Time-varying systems
@author: Jerker Nordh
"""
from pyparticleest.interfaces import FFBSi, ParticleFiltering
try:
import pyparticleest.utils.ckalman as kalman
import pyparticleest.utils.cmlnlg_compute as mlnlg_compute
except ImportError:
print("Falling back to pure python implementaton, expect horrible performance")
import pyparticleest.utils.kalman as kalman
import pyparticleest.utils.mlnlg_compute as mlnlg_compute
import numpy
import scipy.linalg
from builtins import range
class LTV(FFBSi, ParticleFiltering):
"""
Base class for particles of the type linear time varying with additive gaussian noise.
Implement this type of system by extending this class and provide the methods for returning
the system matrices at each time instant
z_{t+1} = A*z_t + f + v, v ~ N(0, Q)
y_t = C*z_t + h + e, e ~ N(0,R)
Args:
- z0: Initial mean value of the state estimate
- P0: Coviariance of initial z estimate
- A (array-like): A matrix (if constant)
- C (array-like): C matrix (if constant)
- Q (array-like): Q matrix (if constant)
- R (array-like): R matrix (if constant)
- f (array-like): f vector (if constant)
- h (array-like): h vector (if constant)
- params (array-like): model parameters (if any)
"""
def __init__(self, z0, P0, A=None, C=None, Q=None,
R=None, f=None, h=None, params=None, **kwargs):
self.z0 = numpy.copy(z0).reshape((-1, 1))
self.P0 = numpy.copy(P0)
if (f is None):
f = numpy.zeros_like(self.z0)
self.kf = kalman.KalmanSmoother(lz=len(self.z0),
A=A, C=C,
Q=Q, R=R,
f_k=f, h_k=h)
super(LTV, self).__init__(**kwargs)
def create_initial_estimate(self, N):
"""Sample particles from initial distribution
Args:
- N (int): Number of particles to sample, since the estimate is
deterministic there is no reason for N > 1
Returns:
(array-like) with first dimension = N, model specific representation
of all particles """
if (N > 1):
print("N > 1 redundamt for LTV system (N={0})".format(N),)
lz = len(self.z0)
dim = lz + lz * lz
particles = numpy.empty((N, dim))
for i in range(N):
particles[i, :lz] = numpy.copy(self.z0).ravel()
particles[i, lz:] = numpy.copy(self.P0).ravel()
return particles
def set_states(self, particles, z_list, P_list):
"""
Set the estimate of the states
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- z_list (list): list of mean values for z for each particle
- P_list (list): list of covariance matrices for z for each particle
"""
lz = len(self.z0)
N = len(particles)
for i in range(N):
particles[i, :lz] = z_list[i].ravel()
lzP = lz + lz * lz
particles[i, lz:lzP] = P_list[i].ravel()
def get_states(self, particles):
"""
Return the estimates contained in the particles array
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
Returns
(zl, Pl):
- zl: list of mean values for z
- Pl: list of covariance matrices for z
"""
N = len(particles)
zl = list()
Pl = list()
lz = len(self.z0)
for i in range(N):
zl.append(particles[i, :lz].reshape(-1, 1))
lzP = lz + lz * lz
Pl.append(particles[i, lz:lzP].reshape(self.P0.shape))
return (zl, Pl)
def get_pred_dynamics(self, u, t):
"""
Return matrices describing affine relation of next
nonlinear state conditioned on the current time and input signal
z_{t+1} = A*z_t + f + v, v ~ N(0, Q)
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- u (array-like): input signal
- t (float): time stamp
Returns:
(A, f, Q) where each element is a list
with the corresponding matrix for each particle. None indicates
that the matrix is identical for all particles and the value stored
in this class should be used instead
"""
return (None, None, None)
def update(self, particles, u, t, noise):
""" Propagate estimate forward in time
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- u (array-like): input signal
- t (float): time-stamp
- noise: Unused for this type of model
Returns:
(array-like) with first dimension = N, particle estimate at time t+1
"""
# Update linear estimate with data from measurement of next non-linear
# state
(zl, Pl) = self.get_states(particles)
(A, f, Q) = self.get_pred_dynamics(u=u, t=t)
self.kf.set_dynamics(A=A, Q=Q, f_k=f)
for i in range(len(zl)):
# Predict z_{t+1}
(zl[i], Pl[i]) = self.kf.predict(zl[i], Pl[i])
# Predict next states conditioned on eta_next
self.set_states(particles, zl, Pl)
return particles
def get_meas_dynamics(self, y, t):
"""
Return matrices describing affine relation of measurement and current
state estimates
y_t = C*z_t + h + e, e ~ N(0,R)
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- y (array-like): measurement
- t (float): time stamp
Returns:
(y, C, h, R): y is a preprocessed measurement, the rest are lists
with the corresponding matrix for each particle. None indicates
that the matrix is identical for all particles and the value stored
in this class should be used instead
"""
return (y, None, None, None)
def measure(self, particles, y, t):
"""
Return the log-pdf value of the measurement and update the statistics
for the states
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- y (array-like): measurement
- t (float): time-stamp
Returns:
(array-like) with first dimension = N, logp(y|x^i)
"""
(zl, Pl) = self.get_states(particles)
(y, C, h, R) = self.get_meas_dynamics(y=y, t=t)
self.kf.set_dynamics(C=C, R=R, h_k=h)
lyz = numpy.empty((len(particles)))
for i in range(len(zl)):
# Predict z_{t+1}
lyz[i] = self.kf.measure(y, zl[i], Pl[i])
self.set_states(particles, zl, Pl)
return lyz
def logp_xnext(self, particles, next_part, u, t):
"""
Return the log-pdf value for the possible future state 'next'
given input u.
Always returns zeros since all particles are always equivalent for this
type of model
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- next_part: Unused
- u: Unused
- t: Unused
Returns:
(array-like) with first dimension = N, numpu.zeros((N,))
"""
# Not needed for Linear Gaussian models, always return 0 (all particles will be identical anyhow)
N = len(particles)
return numpy.zeros((N,))
def sample_process_noise(self, particles, u, t):
"""
There is no need to sample noise for this type of model
Args:
- particles: Unused
- next_part: Unused
- u: Unused
- t: Unused
Returns:
None
"""
return None
def sample_smooth(self, part, ptraj, anc, future_trajs, find, ut, yt, tt, cur_ind):
"""
Update sufficient statistics based on the future states
Args:
- part (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- ptraj: array of trajectory step objects from previous time-steps,
last index is step just before the current
- anc (array-like): index of the ancestor of each particle in part
- future_trajs (array-like): particle estimate for {t+1:T}
- find (array-like): index in future_trajs corresponding to each
particle in part
- ut (array-like): input signals for {0:T}
- yt (array-like): measurements for {0:T}
- tt (array-like): time stamps for {0:T}
- cur_ind (int): index of current timestep (in ut, yt and tt)
Returns:
(array-like) with first dimension = N
"""
(zl, Pl) = self.get_states(part)
M = len(part)
lz = len(self.z0)
lzP = lz + lz * lz
res = numpy.empty((M, lz + 2 * lz ** 2))
for j in range(M):
if (future_trajs is not None):
zn = future_trajs[0].pa.part[j, :lz].reshape((lz, 1))
Pn = future_trajs[0].pa.part[j, lz:lzP].reshape((lz, lz))
(A, f, Q) = self.get_pred_dynamics(u=ut[0], t=tt[0])
self.kf.set_dynamics(A=A, Q=Q, f_k=f)
(zs, Ps, Ms) = self.kf.smooth(zl[0], Pl[0], zn, Pn, self.kf.A, self.kf.f_k, self.kf.Q)
else:
zs = zl[j]
Ps = Pl[j]
Ms = numpy.zeros_like(Ps)
res[j] = numpy.hstack((zs.ravel(), Ps.ravel(), Ms.ravel()))
return res
def fwd_peak_density(self, u, t):
"""
No need for rejections sampling for this type of model, always returns
0.0 since all particles are equivalent
Args:
- u: Unused
- t: Unused
Returns
(float) 0.0
"""
return 0.0
def eval_logp_x0(self, particles, t):
"""
Evaluate sum log p(x_0)
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- t (float): time stamp
"""
# Calculate l1 according to (19a)
N = len(particles)
(zl, Pl) = self.get_states(particles)
lpz0 = numpy.empty(N)
for i in range(N):
l1 = self.calc_l1(zl[i], Pl[i], self.z0, self.P0)
(_tmp, ld) = numpy.linalg.slogdet(self.P0)
tmp = numpy.linalg.solve(self.P0, l1)
lpz0[i] = -0.5 * (ld + numpy.trace(tmp))
return lpz0
def eval_logp_x0_val_grad(self, particles, t):
"""
Evaluate gradient of sum log p(x_0)
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- t (float): time stamp
"""
# Calculate l1 according to (19a)
N = len(particles)
lparam = len(self.params)
lpz0_grad = numpy.zeros(lparam)
(zl, Pl) = self.get_states(particles)
(z0_grad, P0_grad) = self.get_initial_grad()
if (z0_grad is None and P0_grad is None):
lpz0 = self.eval_logp_x0(particles, t)
else:
lpz0 = 0.0
P0cho = scipy.linalg.cho_factor(self.P0)
ld = numpy.sum(numpy.log(numpy.diagonal(P0cho[0]))) * 2
for i in range(N):
(l1, l1_grad) = self.calc_l1_grad(zl[i], Pl[i], self.z0, self.P0, z0_grad)
tmp = scipy.linalg.cho_solve(P0cho, l1)
lpz0 += -0.5 * (ld + numpy.trace(tmp))
for j in range(len(self.params)):
lpz0_grad[j] -= 0.5 * mlnlg_compute.compute_logprod_derivative(P0cho, P0_grad[j], l1, l1_grad[j])
return (lpz0, lpz0_grad)
def eval_logp_xnext(self, particles, x_next, u, t):
"""
Evaluate log p(x_{t+1}|x_t)
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- x_next (array-like): future states
- t (float): time stamp
Returns: (array-like)
"""
# Calculate l2 according to (16)
N = len(particles)
(zl, Pl) = self.get_states(particles)
(zn, Pn) = self.get_states(x_next)
(A, f, Q) = self.get_pred_dynamics(u=u, t=t)
self.kf.set_dynamics(A=A, Q=Q, f_k=f)
self.t = t
lpxn = numpy.empty(N)
for k in range(N):
lz = len(self.z0)
lzP = lz + lz * lz
Mz = particles[k][lzP:].reshape((lz, lz))
(l2, _A, _M_ext, _predict_err) = self.calc_l2(zn[k], Pn[k], zl[k], Pl[k], self.kf.A, self.kf.f_k, Mz)
(_tmp, ld) = numpy.linalg.slogdet(self.kf.Q)
tmp = numpy.linalg.solve(self.kf.Q, l2)
lpxn[k] = -0.5 * (ld + numpy.trace(tmp))
return lpxn
def eval_logp_xnext_val_grad(self, particles, x_next, u, t):
"""
Evaluate value and gradient of log p(x_{t+1}|x_t)
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- x_next (array-like): future states
- t (float): time stamp
Returns: ((array-like), (array-like))
"""
# Calculate l2 according to (16)
N = len(particles)
lparam = len(self.params)
(zl, Pl) = self.get_states(particles)
(zn, Pn) = self.get_states(x_next)
(A, f, Q) = self.get_pred_dynamics(u=u, t=t)
(A_grad, f_grad, Q_grad) = self.get_pred_dynamics_grad(u=u, t=t)
lpxn_grad = numpy.zeros(lparam)
if (A_grad is None and f_grad is None and Q_grad is None):
lpxn = self.eval_logp_xnext(particles, x_next, u, t)
else:
self.kf.set_dynamics(A=A, Q=Q, f_k=f)
lpxn = 0.0
Qcho = scipy.linalg.cho_factor(self.kf.Q, check_finite=False)
ld = numpy.sum(numpy.log(numpy.diagonal(Qcho[0]))) * 2
if (Q_grad is None):
Q_grad = numpy.zeros(
(len(self.params), self.kf.lz, self.kf.lz))
for k in range(N):
lz = len(self.z0)
lzP = lz + lz * lz
Mz = particles[k][lzP:].reshape((lz, lz))
(l2, l2_grad) = self.calc_l2_grad(zn[k], Pn[k], zl[k], Pl[k], self.kf.A, self.kf.f_k, Mz, A_grad, f_grad)
tmp = scipy.linalg.cho_solve(Qcho, l2)
lpxn += -0.5 * (ld + numpy.trace(tmp))
for j in range(len(self.params)):
lpxn_grad[j] -= 0.5 * mlnlg_compute.compute_logprod_derivative(Qcho, Q_grad[j], l2, l2_grad[j])
return (lpxn, lpxn_grad)
def eval_logp_y(self, particles, y, t):
"""
Evaluate value of log p(y_t|x_t)
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- y (array-like): measurement
- t (float): time stamp
Returns: (array-like)
"""
N = len(particles)
self.t = t
(y, C, h, R) = self.get_meas_dynamics(y=y, t=t)
self.kf.set_dynamics(C=C, R=R, h_k=h)
(zl, Pl) = self.get_states(particles)
logpy = numpy.empty(N)
for i in range(N):
# Calculate l3 according to (19b)
l3 = self.calc_l3(y, zl[i], Pl[i])
(_tmp, ld) = numpy.linalg.slogdet(self.kf.R)
tmp = numpy.linalg.solve(self.kf.R, l3)
logpy[i] = -0.5 * (ld + numpy.trace(tmp))
return logpy
def eval_logp_y_val_grad(self, particles, y, t):
"""
Evaluate value and gradient of log p(y_t|x_t)
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- y (array-like): measurement
- t (float): time stamp
Returns: ((array-like), (array-like))
"""
N = len(particles)
lparam = len(self.params)
(y, C, h, R) = self.get_meas_dynamics(y=y, t=t)
(C_grad, h_grad, R_grad) = self.get_meas_dynamics_grad(y=y, t=t)
logpy_grad = numpy.zeros(lparam)
if (C_grad is None and h_grad is None and R_grad is None):
logpy = self.eval_logp_y(particles, y, t)
else:
self.kf.set_dynamics(C=C, R=R, h_k=h)
Rcho = scipy.linalg.cho_factor(self.kf.R, check_finite=False)
ld = numpy.sum(numpy.log(numpy.diagonal(Rcho[0]))) * 2
(zl, Pl) = self.get_states(particles)
logpy = 0.0
if (R_grad is None):
R_grad = numpy.zeros((len(self.params), len(y), len(y)))
for i in range(N):
# Calculate l3 according to (19b)
(l3, l3_grad) = self.calc_l3_grad(y, zl[i], Pl[i])
tmp = scipy.linalg.cho_solve(Rcho, l3)
logpy += -0.5 * (ld + numpy.trace(tmp))
for j in range(len(self.params)):
logpy_grad[j] -= 0.5 * mlnlg_compute.compute_logprod_derivative(
Rcho, R_grad[j], l3, l3_grad[j])
return (logpy, logpy_grad)
def get_pred_dynamics_grad(self, u, t):
"""
Override this method if (A, f, Q) depends on the parameters
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- u (array-like): input signal
- t (float): time stamps
Returns:
(A_grad, f_grad, Q_grad): Element-wise gradients with respect to all
the parameters for the system matrices
"""
return (None, None, None)
def get_meas_dynamics_grad(self, y, t):
"""
Override this method if (C, h, R) depends on the parameters
Args:
- particles (array-like): Model specific representation
of all particles, with first dimension = N (number of particles)
- y (array-like): measurment
- t (float): time stamps
Returns:
(C_grad, h_grad, R_grad): Element-wise gradients with respect to all
the parameters for the system matrices
"""
return (None, None, None)
def get_initial_grad(self):
"""
Default implementation has no dependence on xi, override if needed
Calculate gradient estimate of initial state for linear state condition on the
nonlinear estimate
Args:
- xi0 (array-like): Initial xi states
Returns:
(z,P): z is a list of element-wise gradients for the inital mean values,
P is a list of element-wise gradients for the covariance matrices
"""
lparam = len(self.params)
return (numpy.zeros((lparam, self.kf.lz, 1)),
numpy.zeros((lparam, self.kf.lz, self.kf.lz)))
def calc_l1(self, z, P, z0, P0):
""" internal helper function """
z0_diff = z - z0
l1 = z0_diff.dot(z0_diff.T) + P
return l1
def calc_l1_grad(self, z, P, z0, P0, z0_grad):
""" internal helper function """
lparams = len(self.params)
z0_diff = z - z0
l1 = z0_diff.dot(z0_diff.T) + P
l1_diff = numpy.zeros((lparams, self.kf.lz, self.kf.lz))
if (z0_grad is not None):
for j in range(lparams):
tmp = -z0_grad[j].dot(z0_diff.T)
l1_diff[j] += tmp + tmp.T
return (l1, l1_diff)
def calc_l2(self, zn, Pn, z, P, A, f, M):
""" internal helper function """
predict_err = zn - f - A.dot(z)
AM = A.dot(M)
l2 = predict_err.dot(predict_err.T)
l2 += Pn + A.dot(P).dot(A.T) - AM.T - AM
return (l2, A, M, predict_err)
def calc_l2_grad(self, zn, Pn, z, P, A, f, M, A_grad, f_grad):
""" internal helper function """
lparam = len(self.params)
predict_err = zn - f - A.dot(z)
AM = A.dot(M)
l2 = predict_err.dot(predict_err.T)
l2 += Pn + A.dot(P).dot(A.T) - AM.T - AM
l2_grad = numpy.zeros((lparam, self.kf.lz, self.kf.lz))
if (f_grad is not None):
for j in range(lparam):
tmp = -f_grad[j].dot(predict_err.T)
l2_grad[j] += tmp + tmp.T
if (A_grad is not None):
for j in range(lparam):
tmp = -A_grad[j].dot(z).dot(predict_err.T)
l2_grad[j] += tmp + tmp.T
tmp = A_grad[j].dot(P).dot(A.T)
l2_grad[j] += tmp + tmp.T
tmp = -A_grad[j].dot(M)
l2_grad[j] += tmp + tmp.T
return (l2, l2_grad)
def calc_l3(self, y, z, P):
""" internal helper function """
meas_diff = self.kf.measurement_diff(y.reshape((-1, 1)),
z,
C=self.kf.C,
h_k=self.kf.h_k)
l3 = meas_diff.dot(meas_diff.T)
l3 += self.kf.C.dot(P).dot(self.kf.C.T)
return l3
def calc_l3_grad(self, y, z, P, C_grad, h_grad):
""" internal helper function """
lparam = len(self.params)
meas_diff = self.kf.measurement_diff(y.reshape((-1, 1)),
z,
C=self.kf.C,
h_k=self.kf.h_k)
l3 = meas_diff.dot(meas_diff.T)
l3 += self.kf.C.dot(P).dot(self.kf.C.T)
l3_grad = numpy.zeros((lparam, len(y), len(y)))
if (h_grad is not None):
for j in range(lparam):
tmp = -h_grad[j].dot(meas_diff)
l3_grad[j] += tmp + tmp.T
if (C_grad is not None):
for j in range(lparam):
tmp = -C_grad[j].dot(z).dot(meas_diff)
l3_grad[j] += tmp + tmp.T
tmp = C_grad[j].dot(P).dot(self.kf.C)
l3_grad[j] += tmp + tmp.T
return (l3, l3_grad)
| jerkern/pyParticleEst | pyparticleest/models/ltv.py | Python | lgpl-3.0 | 22,845 | [
"Gaussian"
] | d4e28255d6c76fa0008db48ae133edf9ab659a1dc479e1618ac41e2df1e65926 |
# reinforcementTestClasses.py
# ---------------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import testClasses
import random, math, traceback, sys, os
import layout, textDisplay, pacman, gridworld
import time
from util import Counter, TimeoutFunction, FixedRandom
from collections import defaultdict
from pprint import PrettyPrinter
from hashlib import sha1
pp = PrettyPrinter()
VERBOSE = False
import gridworld
LIVINGREWARD = -0.1
NOISE = 0.2
class ValueIterationTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(ValueIterationTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
iterations = int(testDict['valueIterations'])
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
maxPreIterations = 10
self.numsIterationsForDisplay = range(min(iterations, maxPreIterations))
self.testOutFile = testDict['test_out_file']
if maxPreIterations < iterations:
self.numsIterationsForDisplay.append(iterations)
def writeFailureFile(self, string):
with open(self.testOutFile, 'w') as handle:
handle.write(string)
def removeFailureFileIfExists(self):
if os.path.exists(self.testOutFile):
os.remove(self.testOutFile)
def execute(self, grades, moduleDict, solutionDict):
failureOutputFileString = ''
failureOutputStdString = ''
for n in self.numsIterationsForDisplay:
checkPolicy = (n == self.numsIterationsForDisplay[-1])
testPass, stdOutString, fileOutString = self.executeNIterations(grades, moduleDict, solutionDict, n, checkPolicy)
failureOutputStdString += stdOutString
failureOutputFileString += fileOutString
if not testPass:
self.addMessage(failureOutputStdString)
self.addMessage('For more details to help you debug, see test output file %s\n\n' % self.testOutFile)
self.writeFailureFile(failureOutputFileString)
return self.testFail(grades)
self.removeFailureFileIfExists()
return self.testPass(grades)
def executeNIterations(self, grades, moduleDict, solutionDict, n, checkPolicy):
testPass = True
valuesPretty, qValuesPretty, actions, policyPretty = self.runAgent(moduleDict, n)
stdOutString = ''
fileOutString = ''
valuesKey = "values_k_%d" % n
if self.comparePrettyValues(valuesPretty, solutionDict[valuesKey]):
fileOutString += "Values at iteration %d are correct.\n" % n
fileOutString += " Student/correct solution:\n %s\n" % self.prettyValueSolutionString(valuesKey, valuesPretty)
else:
testPass = False
outString = "Values at iteration %d are NOT correct.\n" % n
outString += " Student solution:\n %s\n" % self.prettyValueSolutionString(valuesKey, valuesPretty)
outString += " Correct solution:\n %s\n" % self.prettyValueSolutionString(valuesKey, solutionDict[valuesKey])
stdOutString += outString
fileOutString += outString
for action in actions:
qValuesKey = 'q_values_k_%d_action_%s' % (n, action)
qValues = qValuesPretty[action]
if self.comparePrettyValues(qValues, solutionDict[qValuesKey]):
fileOutString += "Q-Values at iteration %d for action %s are correct.\n" % (n, action)
fileOutString += " Student/correct solution:\n %s\n" % self.prettyValueSolutionString(qValuesKey, qValues)
else:
testPass = False
outString = "Q-Values at iteration %d for action %s are NOT correct.\n" % (n, action)
outString += " Student solution:\n %s\n" % self.prettyValueSolutionString(qValuesKey, qValues)
outString += " Correct solution:\n %s\n" % self.prettyValueSolutionString(qValuesKey, solutionDict[qValuesKey])
stdOutString += outString
fileOutString += outString
if checkPolicy:
if not self.comparePrettyValues(policyPretty, solutionDict['policy']):
testPass = False
outString = "Policy is NOT correct.\n"
outString += " Student solution:\n %s\n" % self.prettyValueSolutionString('policy', policyPretty)
outString += " Correct solution:\n %s\n" % self.prettyValueSolutionString('policy', solutionDict['policy'])
stdOutString += outString
fileOutString += outString
return testPass, stdOutString, fileOutString
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
policyPretty = ''
actions = []
for n in self.numsIterationsForDisplay:
valuesPretty, qValuesPretty, actions, policyPretty = self.runAgent(moduleDict, n)
handle.write(self.prettyValueSolutionString('values_k_%d' % n, valuesPretty))
for action in actions:
handle.write(self.prettyValueSolutionString('q_values_k_%d_action_%s' % (n, action), qValuesPretty[action]))
handle.write(self.prettyValueSolutionString('policy', policyPretty))
handle.write(self.prettyValueSolutionString('actions', '\n'.join(actions) + '\n'))
return True
def runAgent(self, moduleDict, numIterations):
agent = moduleDict['valueIterationAgents'].ValueIterationAgent(self.grid, discount=self.discount, iterations=numIterations)
states = self.grid.getStates()
actions = list(reduce(lambda a, b: set(a).union(b), [self.grid.getPossibleActions(state) for state in states]))
values = {}
qValues = {}
policy = {}
for state in states:
values[state] = agent.getValue(state)
policy[state] = agent.computeActionFromValues(state)
possibleActions = self.grid.getPossibleActions(state)
for action in actions:
if not qValues.has_key(action):
qValues[action] = {}
if action in possibleActions:
qValues[action][state] = agent.computeQValueFromValues(state, action)
else:
qValues[action][state] = None
valuesPretty = self.prettyValues(values)
policyPretty = self.prettyPolicy(policy)
qValuesPretty = {}
for action in actions:
qValuesPretty[action] = self.prettyValues(qValues[action])
return (valuesPretty, qValuesPretty, actions, policyPretty)
def prettyPrint(self, elements, formatString):
pretty = ''
states = self.grid.getStates()
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
row = []
for x in range(self.grid.grid.width):
if (x, y) in states:
value = elements[(x, y)]
if value is None:
row.append(' illegal')
else:
row.append(formatString.format(elements[(x,y)]))
else:
row.append('_' * 10)
pretty += ' %s\n' % (" ".join(row), )
pretty += '\n'
return pretty
def prettyValues(self, values):
return self.prettyPrint(values, '{0:10.4f}')
def prettyPolicy(self, policy):
return self.prettyPrint(policy, '{0:10s}')
def prettyValueSolutionString(self, name, pretty):
return '%s: """\n%s\n"""\n\n' % (name, pretty.rstrip())
def comparePrettyValues(self, aPretty, bPretty, tolerance=0.01):
aList = self.parsePrettyValues(aPretty)
bList = self.parsePrettyValues(bPretty)
if len(aList) != len(bList):
return False
for a, b in zip(aList, bList):
try:
aNum = float(a)
bNum = float(b)
# error = abs((aNum - bNum) / ((aNum + bNum) / 2.0))
error = abs(aNum - bNum)
if error > tolerance:
return False
except ValueError:
if a.strip() != b.strip():
return False
return True
def parsePrettyValues(self, pretty):
values = pretty.split()
return values
class AsynchronousValueIterationTest(ValueIterationTest):
def runAgent(self, moduleDict, numIterations):
agent = moduleDict['valueIterationAgents'].AsynchronousValueIterationAgent(self.grid, discount=self.discount, iterations=numIterations)
states = self.grid.getStates()
actions = list(reduce(lambda a, b: set(a).union(b), [self.grid.getPossibleActions(state) for state in states]))
values = {}
qValues = {}
policy = {}
for state in states:
values[state] = agent.getValue(state)
policy[state] = agent.computeActionFromValues(state)
possibleActions = self.grid.getPossibleActions(state)
for action in actions:
if not qValues.has_key(action):
qValues[action] = {}
if action in possibleActions:
qValues[action][state] = agent.computeQValueFromValues(state, action)
else:
qValues[action][state] = None
valuesPretty = self.prettyValues(values)
policyPretty = self.prettyPolicy(policy)
qValuesPretty = {}
for action in actions:
qValuesPretty[action] = self.prettyValues(qValues[action])
return (valuesPretty, qValuesPretty, actions, policyPretty)
class PrioritizedSweepingValueIterationTest(ValueIterationTest):
def runAgent(self, moduleDict, numIterations):
agent = moduleDict['valueIterationAgents'].PrioritizedSweepingValueIterationAgent(self.grid, discount=self.discount, iterations=numIterations)
states = self.grid.getStates()
actions = list(reduce(lambda a, b: set(a).union(b), [self.grid.getPossibleActions(state) for state in states]))
values = {}
qValues = {}
policy = {}
for state in states:
values[state] = agent.getValue(state)
policy[state] = agent.computeActionFromValues(state)
possibleActions = self.grid.getPossibleActions(state)
for action in actions:
if not qValues.has_key(action):
qValues[action] = {}
if action in possibleActions:
qValues[action][state] = agent.computeQValueFromValues(state, action)
else:
qValues[action][state] = None
valuesPretty = self.prettyValues(values)
policyPretty = self.prettyPolicy(policy)
qValuesPretty = {}
for action in actions:
qValuesPretty[action] = self.prettyValues(qValues[action])
return (valuesPretty, qValuesPretty, actions, policyPretty)
class ApproximateQLearningTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(ApproximateQLearningTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.env = gridworld.GridworldEnvironment(self.grid)
self.epsilon = float(testDict['epsilon'])
self.learningRate = float(testDict['learningRate'])
self.extractor = 'IdentityExtractor'
if 'extractor' in testDict:
self.extractor = testDict['extractor']
self.opts = {'actionFn': self.env.getPossibleActions, 'epsilon': self.epsilon, 'gamma': self.discount, 'alpha': self.learningRate}
numExperiences = int(testDict['numExperiences'])
maxPreExperiences = 10
self.numsExperiencesForDisplay = range(min(numExperiences, maxPreExperiences))
self.testOutFile = testDict['test_out_file']
if maxPreExperiences < numExperiences:
self.numsExperiencesForDisplay.append(numExperiences)
def writeFailureFile(self, string):
with open(self.testOutFile, 'w') as handle:
handle.write(string)
def removeFailureFileIfExists(self):
if os.path.exists(self.testOutFile):
os.remove(self.testOutFile)
def execute(self, grades, moduleDict, solutionDict):
failureOutputFileString = ''
failureOutputStdString = ''
for n in self.numsExperiencesForDisplay:
testPass, stdOutString, fileOutString = self.executeNExperiences(grades, moduleDict, solutionDict, n)
failureOutputStdString += stdOutString
failureOutputFileString += fileOutString
if not testPass:
self.addMessage(failureOutputStdString)
self.addMessage('For more details to help you debug, see test output file %s\n\n' % self.testOutFile)
self.writeFailureFile(failureOutputFileString)
return self.testFail(grades)
self.removeFailureFileIfExists()
return self.testPass(grades)
def executeNExperiences(self, grades, moduleDict, solutionDict, n):
testPass = True
qValuesPretty, weights, actions, lastExperience = self.runAgent(moduleDict, n)
stdOutString = ''
fileOutString = "==================== Iteration %d ====================\n" % n
if lastExperience is not None:
fileOutString += "Agent observed the transition (startState = %s, action = %s, endState = %s, reward = %f)\n\n" % lastExperience
weightsKey = 'weights_k_%d' % n
if weights == eval(solutionDict[weightsKey]):
fileOutString += "Weights at iteration %d are correct." % n
fileOutString += " Student/correct solution:\n\n%s\n\n" % pp.pformat(weights)
for action in actions:
qValuesKey = 'q_values_k_%d_action_%s' % (n, action)
qValues = qValuesPretty[action]
if self.comparePrettyValues(qValues, solutionDict[qValuesKey]):
fileOutString += "Q-Values at iteration %d for action '%s' are correct." % (n, action)
fileOutString += " Student/correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
else:
testPass = False
outString = "Q-Values at iteration %d for action '%s' are NOT correct." % (n, action)
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, solutionDict[qValuesKey])
stdOutString += outString
fileOutString += outString
return testPass, stdOutString, fileOutString
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
for n in self.numsExperiencesForDisplay:
qValuesPretty, weights, actions, _ = self.runAgent(moduleDict, n)
handle.write(self.prettyValueSolutionString('weights_k_%d' % n, pp.pformat(weights)))
for action in actions:
handle.write(self.prettyValueSolutionString('q_values_k_%d_action_%s' % (n, action), qValuesPretty[action]))
return True
def runAgent(self, moduleDict, numExperiences):
agent = moduleDict['qlearningAgents'].ApproximateQAgent(extractor=self.extractor, **self.opts)
states = filter(lambda state : len(self.grid.getPossibleActions(state)) > 0, self.grid.getStates())
states.sort()
randObj = FixedRandom().random
# choose a random start state and a random possible action from that state
# get the next state and reward from the transition function
lastExperience = None
for i in range(numExperiences):
startState = randObj.choice(states)
action = randObj.choice(self.grid.getPossibleActions(startState))
(endState, reward) = self.env.getRandomNextState(startState, action, randObj=randObj)
lastExperience = (startState, action, endState, reward)
agent.update(*lastExperience)
actions = list(reduce(lambda a, b: set(a).union(b), [self.grid.getPossibleActions(state) for state in states]))
qValues = {}
weights = agent.getWeights()
for state in states:
possibleActions = self.grid.getPossibleActions(state)
for action in actions:
if not qValues.has_key(action):
qValues[action] = {}
if action in possibleActions:
qValues[action][state] = agent.getQValue(state, action)
else:
qValues[action][state] = None
qValuesPretty = {}
for action in actions:
qValuesPretty[action] = self.prettyValues(qValues[action])
return (qValuesPretty, weights, actions, lastExperience)
def prettyPrint(self, elements, formatString):
pretty = ''
states = self.grid.getStates()
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
row = []
for x in range(self.grid.grid.width):
if (x, y) in states:
value = elements[(x, y)]
if value is None:
row.append(' illegal')
else:
row.append(formatString.format(elements[(x,y)]))
else:
row.append('_' * 10)
pretty += ' %s\n' % (" ".join(row), )
pretty += '\n'
return pretty
def prettyValues(self, values):
return self.prettyPrint(values, '{0:10.4f}')
def prettyPolicy(self, policy):
return self.prettyPrint(policy, '{0:10s}')
def prettyValueSolutionString(self, name, pretty):
return '%s: """\n%s\n"""\n\n' % (name, pretty.rstrip())
def comparePrettyValues(self, aPretty, bPretty, tolerance=0.01):
aList = self.parsePrettyValues(aPretty)
bList = self.parsePrettyValues(bPretty)
if len(aList) != len(bList):
return False
for a, b in zip(aList, bList):
try:
aNum = float(a)
bNum = float(b)
# error = abs((aNum - bNum) / ((aNum + bNum) / 2.0))
error = abs(aNum - bNum)
if error > tolerance:
return False
except ValueError:
if a.strip() != b.strip():
return False
return True
def parsePrettyValues(self, pretty):
values = pretty.split()
return values
class QLearningTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(QLearningTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.env = gridworld.GridworldEnvironment(self.grid)
self.epsilon = float(testDict['epsilon'])
self.learningRate = float(testDict['learningRate'])
self.opts = {'actionFn': self.env.getPossibleActions, 'epsilon': self.epsilon, 'gamma': self.discount, 'alpha': self.learningRate}
numExperiences = int(testDict['numExperiences'])
maxPreExperiences = 10
self.numsExperiencesForDisplay = range(min(numExperiences, maxPreExperiences))
self.testOutFile = testDict['test_out_file']
if maxPreExperiences < numExperiences:
self.numsExperiencesForDisplay.append(numExperiences)
def writeFailureFile(self, string):
with open(self.testOutFile, 'w') as handle:
handle.write(string)
def removeFailureFileIfExists(self):
if os.path.exists(self.testOutFile):
os.remove(self.testOutFile)
def execute(self, grades, moduleDict, solutionDict):
failureOutputFileString = ''
failureOutputStdString = ''
for n in self.numsExperiencesForDisplay:
checkValuesAndPolicy = (n == self.numsExperiencesForDisplay[-1])
testPass, stdOutString, fileOutString = self.executeNExperiences(grades, moduleDict, solutionDict, n, checkValuesAndPolicy)
failureOutputStdString += stdOutString
failureOutputFileString += fileOutString
if not testPass:
self.addMessage(failureOutputStdString)
self.addMessage('For more details to help you debug, see test output file %s\n\n' % self.testOutFile)
self.writeFailureFile(failureOutputFileString)
return self.testFail(grades)
self.removeFailureFileIfExists()
return self.testPass(grades)
def executeNExperiences(self, grades, moduleDict, solutionDict, n, checkValuesAndPolicy):
testPass = True
valuesPretty, qValuesPretty, actions, policyPretty, lastExperience = self.runAgent(moduleDict, n)
stdOutString = ''
fileOutString = "==================== Iteration %d ====================\n" % n
if lastExperience is not None:
fileOutString += "Agent observed the transition (startState = %s, action = %s, endState = %s, reward = %f)\n\n\n" % lastExperience
for action in actions:
qValuesKey = 'q_values_k_%d_action_%s' % (n, action)
qValues = qValuesPretty[action]
if self.comparePrettyValues(qValues, solutionDict[qValuesKey]):
fileOutString += "Q-Values at iteration %d for action '%s' are correct." % (n, action)
fileOutString += " Student/correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
else:
testPass = False
outString = "Q-Values at iteration %d for action '%s' are NOT correct." % (n, action)
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, qValues)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString(qValuesKey, solutionDict[qValuesKey])
stdOutString += outString
fileOutString += outString
if checkValuesAndPolicy:
if not self.comparePrettyValues(valuesPretty, solutionDict['values']):
testPass = False
outString = "Values are NOT correct."
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString('values', valuesPretty)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString('values', solutionDict['values'])
stdOutString += outString
fileOutString += outString
if not self.comparePrettyValues(policyPretty, solutionDict['policy']):
testPass = False
outString = "Policy is NOT correct."
outString += " Student solution:\n\t%s" % self.prettyValueSolutionString('policy', policyPretty)
outString += " Correct solution:\n\t%s" % self.prettyValueSolutionString('policy', solutionDict['policy'])
stdOutString += outString
fileOutString += outString
return testPass, stdOutString, fileOutString
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
valuesPretty = ''
policyPretty = ''
for n in self.numsExperiencesForDisplay:
valuesPretty, qValuesPretty, actions, policyPretty, _ = self.runAgent(moduleDict, n)
for action in actions:
handle.write(self.prettyValueSolutionString('q_values_k_%d_action_%s' % (n, action), qValuesPretty[action]))
handle.write(self.prettyValueSolutionString('values', valuesPretty))
handle.write(self.prettyValueSolutionString('policy', policyPretty))
return True
def runAgent(self, moduleDict, numExperiences):
agent = moduleDict['qlearningAgents'].QLearningAgent(**self.opts)
states = filter(lambda state : len(self.grid.getPossibleActions(state)) > 0, self.grid.getStates())
states.sort()
randObj = FixedRandom().random
# choose a random start state and a random possible action from that state
# get the next state and reward from the transition function
lastExperience = None
for i in range(numExperiences):
startState = randObj.choice(states)
action = randObj.choice(self.grid.getPossibleActions(startState))
(endState, reward) = self.env.getRandomNextState(startState, action, randObj=randObj)
lastExperience = (startState, action, endState, reward)
agent.update(*lastExperience)
actions = list(reduce(lambda a, b: set(a).union(b), [self.grid.getPossibleActions(state) for state in states]))
values = {}
qValues = {}
policy = {}
for state in states:
values[state] = agent.computeValueFromQValues(state)
policy[state] = agent.computeActionFromQValues(state)
possibleActions = self.grid.getPossibleActions(state)
for action in actions:
if not qValues.has_key(action):
qValues[action] = {}
if action in possibleActions:
qValues[action][state] = agent.getQValue(state, action)
else:
qValues[action][state] = None
valuesPretty = self.prettyValues(values)
policyPretty = self.prettyPolicy(policy)
qValuesPretty = {}
for action in actions:
qValuesPretty[action] = self.prettyValues(qValues[action])
return (valuesPretty, qValuesPretty, actions, policyPretty, lastExperience)
def prettyPrint(self, elements, formatString):
pretty = ''
states = self.grid.getStates()
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
row = []
for x in range(self.grid.grid.width):
if (x, y) in states:
value = elements[(x, y)]
if value is None:
row.append(' illegal')
else:
row.append(formatString.format(elements[(x,y)]))
else:
row.append('_' * 10)
pretty += ' %s\n' % (" ".join(row), )
pretty += '\n'
return pretty
def prettyValues(self, values):
return self.prettyPrint(values, '{0:10.4f}')
def prettyPolicy(self, policy):
return self.prettyPrint(policy, '{0:10s}')
def prettyValueSolutionString(self, name, pretty):
return '%s: """\n%s\n"""\n\n' % (name, pretty.rstrip())
def comparePrettyValues(self, aPretty, bPretty, tolerance=0.01):
aList = self.parsePrettyValues(aPretty)
bList = self.parsePrettyValues(bPretty)
if len(aList) != len(bList):
return False
for a, b in zip(aList, bList):
try:
aNum = float(a)
bNum = float(b)
# error = abs((aNum - bNum) / ((aNum + bNum) / 2.0))
error = abs(aNum - bNum)
if error > tolerance:
return False
except ValueError:
if a.strip() != b.strip():
return False
return True
def parsePrettyValues(self, pretty):
values = pretty.split()
return values
class EpsilonGreedyTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(EpsilonGreedyTest, self).__init__(question, testDict)
self.discount = float(testDict['discount'])
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
if 'noise' in testDict: self.grid.setNoise(float(testDict['noise']))
if 'livingReward' in testDict: self.grid.setLivingReward(float(testDict['livingReward']))
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.env = gridworld.GridworldEnvironment(self.grid)
self.epsilon = float(testDict['epsilon'])
self.learningRate = float(testDict['learningRate'])
self.numExperiences = int(testDict['numExperiences'])
self.numIterations = int(testDict['iterations'])
self.opts = {'actionFn': self.env.getPossibleActions, 'epsilon': self.epsilon, 'gamma': self.discount, 'alpha': self.learningRate}
def execute(self, grades, moduleDict, solutionDict):
if self.testEpsilonGreedy(moduleDict):
return self.testPass(grades)
else:
return self.testFail(grades)
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
return True
def runAgent(self, moduleDict):
agent = moduleDict['qlearningAgents'].QLearningAgent(**self.opts)
states = filter(lambda state : len(self.grid.getPossibleActions(state)) > 0, self.grid.getStates())
states.sort()
randObj = FixedRandom().random
# choose a random start state and a random possible action from that state
# get the next state and reward from the transition function
for i in range(self.numExperiences):
startState = randObj.choice(states)
action = randObj.choice(self.grid.getPossibleActions(startState))
(endState, reward) = self.env.getRandomNextState(startState, action, randObj=randObj)
agent.update(startState, action, endState, reward)
return agent
def testEpsilonGreedy(self, moduleDict, tolerance=0.025):
agent = self.runAgent(moduleDict)
for state in self.grid.getStates():
numLegalActions = len(agent.getLegalActions(state))
if numLegalActions <= 1:
continue
numGreedyChoices = 0
optimalAction = agent.computeActionFromQValues(state)
for iteration in range(self.numIterations):
# assume that their computeActionFromQValues implementation is correct (q4 tests this)
if agent.getAction(state) == optimalAction:
numGreedyChoices += 1
# e = epsilon, g = # greedy actions, n = numIterations, k = numLegalActions
# g = n * [(1-e) + e/k] -> e = (n - g) / (n - n/k)
empiricalEpsilonNumerator = self.numIterations - numGreedyChoices
empiricalEpsilonDenominator = self.numIterations - self.numIterations / float(numLegalActions)
empiricalEpsilon = empiricalEpsilonNumerator / empiricalEpsilonDenominator
error = abs(empiricalEpsilon - self.epsilon)
if error > tolerance:
self.addMessage("Epsilon-greedy action selection is not correct.")
self.addMessage("Actual epsilon = %f; student empirical epsilon = %f; error = %f > tolerance = %f" % (self.epsilon, empiricalEpsilon, error, tolerance))
return False
return True
### q8
class Question8Test(testClasses.TestCase):
def __init__(self, question, testDict):
super(Question8Test, self).__init__(question, testDict)
def execute(self, grades, moduleDict, solutionDict):
studentSolution = moduleDict['analysis'].question8()
studentSolution = str(studentSolution).strip().lower()
hashedSolution = sha1(studentSolution).hexdigest()
if hashedSolution == '46729c96bb1e4081fdc81a8ff74b3e5db8fba415':
return self.testPass(grades)
else:
self.addMessage("Solution is not correct.")
self.addMessage(" Student solution: %s" % (studentSolution,))
return self.testFail(grades)
def writeSolution(self, moduleDict, filePath):
handle = open(filePath, 'w')
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
handle.close()
return True
### q7/q8
### =====
## Average wins of a pacman agent
class EvalAgentTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(EvalAgentTest, self).__init__(question, testDict)
self.pacmanParams = testDict['pacmanParams']
self.scoreMinimum = int(testDict['scoreMinimum']) if 'scoreMinimum' in testDict else None
self.nonTimeoutMinimum = int(testDict['nonTimeoutMinimum']) if 'nonTimeoutMinimum' in testDict else None
self.winsMinimum = int(testDict['winsMinimum']) if 'winsMinimum' in testDict else None
self.scoreThresholds = [int(s) for s in testDict.get('scoreThresholds','').split()]
self.nonTimeoutThresholds = [int(s) for s in testDict.get('nonTimeoutThresholds','').split()]
self.winsThresholds = [int(s) for s in testDict.get('winsThresholds','').split()]
self.maxPoints = sum([len(t) for t in [self.scoreThresholds, self.nonTimeoutThresholds, self.winsThresholds]])
def execute(self, grades, moduleDict, solutionDict):
self.addMessage('Grading agent using command: python pacman.py %s'% (self.pacmanParams,))
startTime = time.time()
games = pacman.runGames(** pacman.readCommand(self.pacmanParams.split(' ')))
totalTime = time.time() - startTime
numGames = len(games)
stats = {'time': totalTime, 'wins': [g.state.isWin() for g in games].count(True),
'games': games, 'scores': [g.state.getScore() for g in games],
'timeouts': [g.agentTimeout for g in games].count(True), 'crashes': [g.agentCrashed for g in games].count(True)}
averageScore = sum(stats['scores']) / float(len(stats['scores']))
nonTimeouts = numGames - stats['timeouts']
wins = stats['wins']
def gradeThreshold(value, minimum, thresholds, name):
points = 0
passed = (minimum == None) or (value >= minimum)
if passed:
for t in thresholds:
if value >= t:
points += 1
return (passed, points, value, minimum, thresholds, name)
results = [gradeThreshold(averageScore, self.scoreMinimum, self.scoreThresholds, "average score"),
gradeThreshold(nonTimeouts, self.nonTimeoutMinimum, self.nonTimeoutThresholds, "games not timed out"),
gradeThreshold(wins, self.winsMinimum, self.winsThresholds, "wins")]
totalPoints = 0
for passed, points, value, minimum, thresholds, name in results:
if minimum == None and len(thresholds)==0:
continue
# print passed, points, value, minimum, thresholds, name
totalPoints += points
if not passed:
assert points == 0
self.addMessage("%s %s (fail: below minimum value %s)" % (value, name, minimum))
else:
self.addMessage("%s %s (%s of %s points)" % (value, name, points, len(thresholds)))
if minimum != None:
self.addMessage(" Grading scheme:")
self.addMessage(" < %s: fail" % (minimum,))
if len(thresholds)==0 or minimum != thresholds[0]:
self.addMessage(" >= %s: 0 points" % (minimum,))
for idx, threshold in enumerate(thresholds):
self.addMessage(" >= %s: %s points" % (threshold, idx+1))
elif len(thresholds) > 0:
self.addMessage(" Grading scheme:")
self.addMessage(" < %s: 0 points" % (thresholds[0],))
for idx, threshold in enumerate(thresholds):
self.addMessage(" >= %s: %s points" % (threshold, idx+1))
if any([not passed for passed, _, _, _, _, _ in results]):
totalPoints = 0
return self.testPartial(grades, totalPoints, self.maxPoints)
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
return True
### q2/q3
### =====
## For each parameter setting, compute the optimal policy, see if it satisfies some properties
def followPath(policy, start, numSteps=100):
state = start
path = []
for i in range(numSteps):
if state not in policy:
break
action = policy[state]
path.append("(%s,%s)" % state)
if action == 'north': nextState = state[0],state[1]+1
if action == 'south': nextState = state[0],state[1]-1
if action == 'east': nextState = state[0]+1,state[1]
if action == 'west': nextState = state[0]-1,state[1]
if action == 'exit' or action == None:
path.append('TERMINAL_STATE')
break
state = nextState
return path
def parseGrid(string):
grid = [[entry.strip() for entry in line.split()] for line in string.split('\n')]
for row in grid:
for x, col in enumerate(row):
try:
col = int(col)
except:
pass
if col == "_":
col = ' '
row[x] = col
return gridworld.makeGrid(grid)
def computePolicy(moduleDict, grid, discount):
valueIterator = moduleDict['valueIterationAgents'].ValueIterationAgent(grid, discount=discount)
policy = {}
for state in grid.getStates():
policy[state] = valueIterator.computeActionFromValues(state)
return policy
class GridPolicyTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(GridPolicyTest, self).__init__(question, testDict)
# Function in module in analysis that returns (discount, noise)
self.parameterFn = testDict['parameterFn']
self.question2 = testDict.get('question2', 'false').lower() == 'true'
# GridWorld specification
# _ is empty space
# numbers are terminal states with that value
# # is a wall
# S is a start state
#
self.gridText = testDict['grid']
self.grid = gridworld.Gridworld(parseGrid(testDict['grid']))
self.gridName = testDict['gridName']
# Policy specification
# _ policy choice not checked
# N, E, S, W policy action must be north, east, south, west
#
self.policy = parseGrid(testDict['policy'])
# State the most probable path must visit
# (x,y) for a particular location; (0,0) is bottom left
# terminal for the terminal state
self.pathVisits = testDict.get('pathVisits', None)
# State the most probable path must not visit
# (x,y) for a particular location; (0,0) is bottom left
# terminal for the terminal state
self.pathNotVisits = testDict.get('pathNotVisits', None)
def execute(self, grades, moduleDict, solutionDict):
if not hasattr(moduleDict['analysis'], self.parameterFn):
self.addMessage('Method not implemented: analysis.%s' % (self.parameterFn,))
return self.testFail(grades)
result = getattr(moduleDict['analysis'], self.parameterFn)()
if type(result) == str and result.lower()[0:3] == "not":
self.addMessage('Actually, it is possible!')
return self.testFail(grades)
if self.question2:
livingReward = None
try:
discount, noise = result
discount = float(discount)
noise = float(noise)
except:
self.addMessage('Did not return a (discount, noise) pair; instead analysis.%s returned: %s' % (self.parameterFn, result))
return self.testFail(grades)
if discount != 0.9 and noise != 0.2:
self.addMessage('Must change either the discount or the noise, not both. Returned (discount, noise) = %s' % (result,))
return self.testFail(grades)
else:
try:
discount, noise, livingReward = result
discount = float(discount)
noise = float(noise)
livingReward = float(livingReward)
except:
self.addMessage('Did not return a (discount, noise, living reward) triple; instead analysis.%s returned: %s' % (self.parameterFn, result))
return self.testFail(grades)
self.grid.setNoise(noise)
if livingReward != None:
self.grid.setLivingReward(livingReward)
start = self.grid.getStartState()
policy = computePolicy(moduleDict, self.grid, discount)
## check policy
actionMap = {'N': 'north', 'E': 'east', 'S': 'south', 'W': 'west', 'X': 'exit'}
width, height = self.policy.width, self.policy.height
policyPassed = True
for x in range(width):
for y in range(height):
if self.policy[x][y] in actionMap and policy[(x,y)] != actionMap[self.policy[x][y]]:
differPoint = (x,y)
policyPassed = False
if not policyPassed:
self.addMessage('Policy not correct.')
self.addMessage(' Student policy at %s: %s' % (differPoint, policy[differPoint]))
self.addMessage(' Correct policy at %s: %s' % (differPoint, actionMap[self.policy[differPoint[0]][differPoint[1]]]))
self.addMessage(' Student policy:')
self.printPolicy(policy, False)
self.addMessage(" Legend: N,S,E,W at states which move north etc, X at states which exit,")
self.addMessage(" . at states where the policy is not defined (e.g. walls)")
self.addMessage(' Correct policy specification:')
self.printPolicy(self.policy, True)
self.addMessage(" Legend: N,S,E,W for states in which the student policy must move north etc,")
self.addMessage(" _ for states where it doesn't matter what the student policy does.")
self.printGridworld()
return self.testFail(grades)
## check path
path = followPath(policy, self.grid.getStartState())
if self.pathVisits != None and self.pathVisits not in path:
self.addMessage('Policy does not visit state %s when moving without noise.' % (self.pathVisits,))
self.addMessage(' States visited: %s' % (path,))
self.addMessage(' Student policy:')
self.printPolicy(policy, False)
self.addMessage(" Legend: N,S,E,W at states which move north etc, X at states which exit,")
self.addMessage(" . at states where policy not defined")
self.printGridworld()
return self.testFail(grades)
if self.pathNotVisits != None and self.pathNotVisits in path:
self.addMessage('Policy visits state %s when moving without noise.' % (self.pathNotVisits,))
self.addMessage(' States visited: %s' % (path,))
self.addMessage(' Student policy:')
self.printPolicy(policy, False)
self.addMessage(" Legend: N,S,E,W at states which move north etc, X at states which exit,")
self.addMessage(" . at states where policy not defined")
self.printGridworld()
return self.testFail(grades)
return self.testPass(grades)
def printGridworld(self):
self.addMessage(' Gridworld:')
for line in self.gridText.split('\n'):
self.addMessage(' ' + line)
self.addMessage(' Legend: # wall, _ empty, S start, numbers terminal states with that reward.')
def printPolicy(self, policy, policyTypeIsGrid):
if policyTypeIsGrid:
legend = {'N': 'N', 'E': 'E', 'S': 'S', 'W': 'W', ' ': '_'}
else:
legend = {'north': 'N', 'east': 'E', 'south': 'S', 'west': 'W', 'exit': 'X', '.': '.', ' ': '_'}
for ybar in range(self.grid.grid.height):
y = self.grid.grid.height-1-ybar
if policyTypeIsGrid:
self.addMessage(" %s" % (" ".join([legend[policy[x][y]] for x in range(self.grid.grid.width)]),))
else:
self.addMessage(" %s" % (" ".join([legend[policy.get((x,y), '.')] for x in range(self.grid.grid.width)]),))
# for state in sorted(self.grid.getStates()):
# if state != 'TERMINAL_STATE':
# self.addMessage(' (%s,%s) %s' % (state[0], state[1], policy[state]))
def writeSolution(self, moduleDict, filePath):
with open(filePath, 'w') as handle:
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# File intentionally blank.\n')
return True
| omardroubi/Artificial-Intelligence | Projects/Project3/reinforcement/reinforcementTestClasses.py | Python | apache-2.0 | 46,566 | [
"VisIt"
] | a8d8a207837481997b5b9ddf4f24dfe7261dbcfe5b2a4e083d6428fec319a635 |
#!/usr/bin/env python
#
# setup for TreeFix library packages
#
# use the following to install:
# python setup.py build
# python setup.py install
#
import os,sys
from distutils.core import setup, Extension
import numpy
sys.path.insert(0, os.path.realpath(
os.path.join(os.path.dirname(__file__), "lib")))
USE_CYTHON = True
try:
from Cython.Distutils import build_ext
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
cmdclass = { }
recon_module = []
VERSION = "1.0.1rc"
DESC = "GATC (Genetic Algorithm for Tree Construction/Correction) find the best tree from a list of candidate trees according to sequence likelihood and reconciliation with a species tree."
extra_link_args = ['-lm']
if sys.platform != 'darwin':
extra_link_args.append('-s')
srcs = [os.path.join('src/raxml',fn) for fn in os.listdir('src/raxml')
if (not os.path.isdir(fn)) and fn.endswith('.c')]
raxml_module = [ Extension('lib.raxmlib._raxml',
sources=['lib/raxmlib/raxml.i'] + srcs,
extra_link_args=extra_link_args
)]
if USE_CYTHON:
recon_module += [
Extension("lib.reclkl.computeLKL", sources=[ "src/recon/computeLKL.pyx"], include_dirs=[numpy.get_include()]),
]
cmdclass.update({ 'build_ext': build_ext })
else:
recon_module += [
Extension("lib.reclkl.computeLKL", sources=[ "src/recon/computeLKL.c" ], include_dirs=[numpy.get_include()]),
]
modules = raxml_module + recon_module
setup(
name='GATC',
version = VERSION,
description=DESC,
author='Emmanuel Noutahi',
author_email='noutahie@iro.umontreal.ca',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Education',
],
packages=['lib', 'lib.TreeLib', 'lib.raxmlib', 'lib.ga', 'lib.ga.evolve', 'lib.reclkl', 'lib.PolytomySolver'],
py_modules=[],
scripts=['bin/gatc'],
install_requires=['numpy', 'scipy', 'matplotlib', 'ete3', 'biopython'],
cmdclass=cmdclass,
ext_modules=modules
)
| UdeM-LBIT/GAPol | setup.py | Python | gpl-3.0 | 2,392 | [
"Biopython"
] | 4b8f2cd39ee3ba1908d21e35d1d58a0bcde0dfe8c83ec09d96ea6d73aba42ea2 |
#!/usr/bin/env python
"""
Read simplified JSON BLAST records and report the elapsed time.
"""
from __future__ import print_function
from dark.conversion import JSONRecordsReader
from time import time
import sys
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: %s file.json' % sys.argv[0], file=sys.stderr)
sys.exit(1)
else:
start = time()
jsonReader = JSONRecordsReader(sys.argv[1])
records = jsonReader.records()
for count, record in enumerate(records, start=1):
pass
elapsed = time() - start
print('Read %d JSON BLAST records in %.3f secs (%.0f records/sec)' % (
count, elapsed, float(count) / float(elapsed)))
| bamueh/dark-matter | bin/read-blast-json.py | Python | mit | 729 | [
"BLAST"
] | d69fade41b1761134f782e96663d8034d2b6418dd912b84153529c36dd8f7c8d |
#! CCSD dipole with user-specified basis set
import psi4
psi4.set_output_file("output.dat", False)
h2o = psi4.geometry("""
0 1
H
O 1 0.957
H 2 0.957 1 104.5
""")
psi4.set_options({'freeze_core': 'false'})
psi4.basis_helper("""
# Sadlej-pVTZ
spherical
****
H 0
S 4 1.00
33.8650140000 0.0060680000
5.0947880000 0.0453160000
1.1587860000 0.2028460000
0.3258400000 0.5037090000
S 1 1.00
0.1027410000 1.0000000000
S 1 1.00
0.0324000000 1.0000000000
P 2 1.00
1.1588000000 0.1884400000
0.3258000000 0.8824200000
P 2 1.00
0.1027000000 0.1178000000
0.0324000000 0.0042000000
****
C 0
S 5 1.00
5240.6353000000 0.0009370000
782.2048000000 0.0072280000
178.3508300000 0.0363440000
50.8159420000 0.1306000000
16.8235620000 0.3189310000
S 2 1.00
6.1757760000 0.4387420000
2.4180490000 0.2149740000
S 1 1.00
0.5119000000 1.0000000000
S 1 1.00
0.1565900000 1.0000000000
S 1 1.00
0.0479000000 1.0000000000
P 4 1.00
18.8418000000 0.0138870000
4.1592400000 0.0862790000
1.2067100000 0.2887440000
0.3855400000 0.4994110000
P 1 1.00
0.1219400000 1.0000000000
P 1 1.00
0.0385680000 1.0000000000
D 2 1.00
1.2067000000 0.2628500000
0.3855000000 0.8043000000
D 2 1.00
0.1219000000 0.6535000000
0.0386000000 0.8636000000
****
O 0
S 5 1.00
10662.2850000000 0.0007990000
1599.7097000000 0.0061530000
364.7252600000 0.0311570000
103.6517900000 0.1155960000
33.9058050000 0.3015520000
S 2 1.00
12.2874690000 0.4448700000
4.7568050000 0.2431720000
S 1 1.00
1.0042710000 1.0000000000
S 1 1.00
0.3006860000 1.0000000000
S 1 1.00
0.0900300000 1.0000000000
P 4 1.00
34.8564630000 0.0156480000
7.8431310000 0.0981970000
2.3062490000 0.3077680000
0.7231640000 0.4924700000
P 1 1.00
0.2148820000 1.0000000000
P 1 1.00
0.0638500000 1.0000000000
D 2 1.00
2.3062000000 0.2027000000
0.7232000000 0.5791000000
D 2 1.00
0.2149000000 0.7854500000
0.0639000000 0.5338700000
****
""")
ccsd_e, wfn = psi4.properties('ccsd',properties=['dipole'],return_wfn=True)
psi4.oeprop(wfn,"DIPOLE", "QUADRUPOLE", title="(OEPROP)CC")
psi4.core.print_variables()
| ashutoshvt/psi4 | samples/python/cc54/input.py | Python | lgpl-3.0 | 2,940 | [
"Psi4"
] | 2c22b8f9408c4a56bf9087f415cde365d52e9f06f108242ed2dbff76f066a931 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Marcos Mobley ('markybob') <markybob@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import gtk
import pygtk
from deluge.common import get_pixmap, get_version, open_url_in_browser
from deluge.ui.client import client
from deluge.ui.gtkui.common import get_deluge_icon
pygtk.require('2.0')
class AboutDialog:
def __init__(self):
def url_hook(dialog, url):
open_url_in_browser(url)
gtk.about_dialog_set_url_hook(url_hook)
self.about = gtk.AboutDialog()
self.about.set_position(gtk.WIN_POS_CENTER)
self.about.set_name("Deluge")
self.about.set_program_name(_("Deluge"))
version = get_version()
self.about.set_copyright(
_("Copyright %(year_start)s-%(year_end)s Deluge Team") % {"year_start": 2007, "year_end": 2015})
self.about.set_comments(
_("A peer-to-peer file sharing program\nutilizing the BitTorrent protocol.")
+ "\n\n" + _("Client:") + " %s\n" % version)
self.about.set_version(version)
self.about.set_authors([
_("Current Developers:"), "Andrew Resch", "Damien Churchill",
"John Garland", "Calum Lind", "", "libtorrent (libtorrent.org):",
"Arvid Norberg", "", _("Past Developers or Contributors:"),
"Zach Tibbitts", "Alon Zakai", "Marcos Mobley", "Alex Dedul",
"Sadrul Habib Chowdhury", "Ido Abramovich", "Martijn Voncken"
])
self.about.set_artists(["Andrew Wedderburn", "Andrew Resch"])
self.about.set_translator_credits("\n".join([
"Aaron Wang Shi", "abbigss", "ABCdatos", "Abcx", "Actam", "Adam",
"adaminikisi", "adi_oporanu", "Adrian Goll", "afby", "Ahmades",
"Ahmad Farghal", "Ahmad Gharbeia أحمد غربية", "akira", "Aki Sivula",
"Alan Pepelko", "Alberto", "Alberto Ferrer", "alcatr4z", "AlckO",
"Aleksej Korgenkov", "Alessio Treglia", "Alexander Ilyashov",
"Alexander Matveev", "Alexander Saltykov", "Alexander Taubenkorb",
"Alexander Telenga", "Alexander Yurtsev", "Alexandre Martani",
"Alexandre Rosenfeld", "Alexandre Sapata Carbonell",
"Alexey Osipov", "Alin Claudiu Radut", "allah", "AlSim",
"Alvaro Carrillanca P.", "A.Matveev", "Andras Hipsag",
"András Kárász", "Andrea Ratto", "Andreas Johansson", "Andreas Str",
"André F. Oliveira", "AndreiF", "andrewh", "Angel Guzman Maeso",
"Aníbal Deboni Neto", "animarval", "Antonio Cono", "antoniojreyes",
"Anton Shestakov", "Anton Yakutovich", "antou",
"Arkadiusz Kalinowski", "Artin", "artir", "Astur",
"Athanasios Lefteris", "Athmane MOKRAOUI (ButterflyOfFire)",
"Augusta Carla Klug", "Avoledo Marco", "axaard", "AxelRafn",
"Axezium", "Ayont", "b3rx", "Bae Taegil", "Bajusz Tamás",
"Balaam's Miracle", "Ballestein", "Bent Ole Fosse", "berto89",
"bigx", "Bjorn Inge Berg", "blackbird", "Blackeyed", "blackmx",
"BlueSky", "Blutheo", "bmhm", "bob00work", "boenki",
"Bogdan Bădic-Spătariu", "bonpu", "Boone", "boss01",
"Branislav Jovanović", "bronze", "brownie", "Brus46", "bumper",
"butely", "BXCracer", "c0nfidencal", "Can Kaya",
"Carlos Alexandro Becker", "cassianoleal", "Cédric.h",
"César Rubén", "chaoswizard", "Chen Tao", "chicha",
"Chien Cheng Wei", "Christian Kopac", "Christian Widell",
"Christoffer Brodd-Reijer", "christooss", "CityAceE", "Clopy",
"Clusty", "cnu", "Commandant", "Constantinos Koniaris", "Coolmax",
"cosmix", "Costin Chirvasuta", "CoVaLiDiTy", "cow_2001",
"Crispin Kirchner", "crom", "Cruster", "Cybolic", "Dan Bishop",
"Danek", "Dani", "Daniel Demarco", "Daniel Ferreira",
"Daniel Frank", "Daniel Holm", "Daniel Høyer Iversen",
"Daniel Marynicz", "Daniel Nylander", "Daniel Patriche",
"Daniel Schildt", "Daniil Sorokin", "Dante Díaz", "Daria Michalska",
"DarkenCZ", "Darren", "Daspah", "David Eurenius", "davidhjelm",
"David Machakhelidze", "Dawid Dziurdzia", "Daya Adianto ", "dcruz",
"Deady", "Dereck Wonnacott", "Devgru", "Devid Antonio Filoni"
"DevilDogTG", "di0rz`", "Dialecti Valsamou", "Diego Medeiros",
"Dkzoffy", "Dmitrij D. Czarkoff", "Dmitriy Geels",
"Dmitry Olyenyov", "Dominik Kozaczko", "Dominik Lübben", "doomster",
"Dorota Król", "Doyen Philippe", "Dread Knight", "DreamSonic",
"duan", "Duong Thanh An", "DvoglavaZver", "dwori", "dylansmrjones",
"Ebuntor", "Edgar Alejandro Jarquin Flores", "Eetu", "ekerazha",
"Elias Julkunen", "elparia", "Emberke", "Emiliano Goday Caneda",
"EndelWar", "eng.essam", "enubuntu", "ercangun", "Erdal Ronahi",
"ergin üresin", "Eric", "Éric Lassauge", "Erlend Finvåg", "Errdil",
"ethan shalev", "Evgeni Spasov", "ezekielnin", "Fabian Ordelmans",
"Fabio Mazanatti", "Fábio Nogueira", "FaCuZ", "Felipe Lerena",
"Fernando Pereira", "fjetland", "Florian Schäfer", "FoBoS", "Folke",
"Force", "fosk", "fragarray", "freddeg", "Frédéric Perrin",
"Fredrik Kilegran", "FreeAtMind", "Fulvio Ciucci", "Gabor Kelemen",
"Galatsanos Panagiotis", "Gaussian", "gdevitis", "Georg Brzyk",
"George Dumitrescu", "Georgi Arabadjiev", "Georg Sieber",
"Gerd Radecke", "Germán Heusdens", "Gianni Vialetto",
"Gigih Aji Ibrahim", "Giorgio Wicklein", "Giovanni Rapagnani",
"Giuseppe", "gl", "glen", "granjerox", "Green Fish", "greentea",
"Greyhound", "G. U.", "Guillaume BENOIT", "Guillaume Pelletier",
"Gustavo Henrique Klug", "gutocarvalho", "Guybrush88",
"Hans Rødtang", "HardDisk", "Hargas Gábor",
"Heitor Thury Barreiros Barbosa", "helios91940", "helix84",
"Helton Rodrigues", "Hendrik Luup", "Henrique Ferreiro",
"Henry Goury-Laffont", "Hezy Amiel", "hidro", "hoball", "hokten",
"Holmsss", "hristo.num", "Hubert Życiński", "Hyo", "Iarwain", "ibe",
"ibear", "Id2ndR", "Igor Zubarev", "IKON (Ion)", "imen",
"Ionuț Jula", "Isabelle STEVANT", "István Nyitrai", "Ivan Petrovic",
"Ivan Prignano", "IvaSerge", "jackmc", "Jacks0nxD", "Jack Shen",
"Jacky Yeung", "Jacques Stadler", "Janek Thomaschewski", "Jan Kaláb",
"Jan Niklas Hasse", "Jasper Groenewegen", "Javi Rodríguez",
"Jayasimha (ಜಯಸಿಂಹ)", "jeannich", "Jeff Bailes", "Jesse Zilstorff",
"Joan Duran", "João Santos", "Joar Bagge", "Joe Anderson",
"Joel Calado", "Johan Linde", "John Garland", "Jojan", "jollyr0ger",
"Jonas Bo Grimsgaard", "Jonas Granqvist", "Jonas Slivka",
"Jonathan Zeppettini", "Jørgen", "Jørgen Tellnes", "josé",
"José Geraldo Gouvêa", "José Iván León Islas", "José Lou C.",
"Jose Sun", "Jr.", "Jukka Kauppinen", "Julián Alarcón",
"julietgolf", "Jusic", "Justzupi", "Kaarel", "Kai Thomsen",
"Kalman Tarnay", "Kamil Páral", "Kane_F", "kaotiks@gmail.com",
"Kateikyoushii", "kaxhinaz", "Kazuhiro NISHIYAMA", "Kerberos",
"Keresztes Ákos", "kevintyk", "kiersie", "Kimbo^", "Kim Lübbe",
"kitzOgen", "Kjetil Rydland", "kluon", "kmikz", "Knedlyk",
"koleoptero", "Kőrösi Krisztián", "Kouta", "Krakatos",
"Krešo Kunjas", "kripken", "Kristaps", "Kristian Øllegaard",
"Kristoffer Egil Bonarjee", "Krzysztof Janowski",
"Krzysztof Zawada", "Larry Wei Liu", "laughterwym", "Laur Mõtus",
"lazka", "leandrud", "lê bình", "Le Coz Florent", "Leo", "liorda",
"LKRaider", "LoLo_SaG", "Long Tran", "Lorenz", "Low Kian Seong",
"Luca Andrea Rossi", "Luca Ferretti", "Lucky LIX", "Luis Gomes",
"Luis Reis", "Łukasz Wyszyński", "luojie-dune", "maaark",
"Maciej Chojnacki", "Maciej Meller", "Mads Peter Rommedahl",
"Major Kong", "Malaki", "malde", "Malte Lenz", "Mantas Kriaučiūnas",
"Mara Sorella", "Marcin", "Marcin Falkiewicz", "marcobra",
"Marco da Silva", "Marco de Moulin", "Marco Rodrigues", "Marcos",
"Marcos Escalier", "Marcos Mobley", "Marcus Ekstrom",
"Marek Dębowski", "Mário Buči", "Mario Munda", "Marius Andersen",
"Marius Hudea", "Marius Mihai", "Mariusz Cielecki",
"Mark Krapivner", "marko-markovic", "Markus Brummer",
"Markus Sutter", "Martin", "Martin Dybdal", "Martin Iglesias",
"Martin Lettner", "Martin Pihl", "Masoud Kalali", "mat02",
"Matej Urbančič", "Mathias-K", "Mathieu Arès",
"Mathieu D. (MatToufoutu)", "Mathijs", "Matrik", "Matteo Renzulli",
"Matteo Settenvini", "Matthew Gadd", "Matthias Benkard",
"Matthias Mailänder", "Mattias Ohlsson", "Mauro de Carvalho",
"Max Molchanov", "Me", "MercuryCC", "Mert Bozkurt", "Mert Dirik",
"MFX", "mhietar", "mibtha", "Michael Budde", "Michael Kaliszka",
"Michalis Makaronides", "Michał Tokarczyk", "Miguel Pires da Rosa",
"Mihai Capotă", "Miika Metsälä", "Mikael Fernblad", "Mike Sierra",
"mikhalek", "Milan Prvulović", "Milo Casagrande", "Mindaugas",
"Miroslav Matejaš", "misel", "mithras", "Mitja Pagon", "M.Kitchen",
"Mohamed Magdy", "moonkey", "MrBlonde", "muczy", "Münir Ekinci",
"Mustafa Temizel", "mvoncken", "Mytonn", "NagyMarton", "neaion",
"Neil Lin", "Nemo", "Nerijus Arlauskas", "Nicklas Larsson",
"Nicolaj Wyke", "Nicola Piovesan", "Nicolas Sabatier",
"Nicolas Velin", "Nightfall", "NiKoB", "Nikolai M. Riabov",
"Niko_Thien", "niska", "Nithir", "noisemonkey", "nomemohes",
"nosense", "null", "Nuno Estêvão", "Nuno Santos", "nxxs", "nyo",
"obo", "Ojan", "Olav Andreas Lindekleiv", "oldbeggar",
"Olivier FAURAX", "orphe", "osantana", "Osman Tosun", "OssiR",
"otypoks", "ounn", "Oz123", "Özgür BASKIN", "Pablo Carmona A.",
"Pablo Ledesma", "Pablo Navarro Castillo", "Paco Molinero",
"Pål-Eivind Johnsen", "pano", "Paolo Naldini", "Paracelsus",
"Patryk13_03", "Patryk Skorupa", "PattogoTehen", "Paul Lange",
"Pavcio", "Paweł Wysocki", "Pedro Brites Moita",
"Pedro Clemente Pereira Neto", "Pekka \"PEXI\" Niemistö", "Penegal",
"Penzo", "perdido", "Peter Kotrcka", "Peter Skov",
"Peter Van den Bosch", "Petter Eklund", "Petter Viklund",
"phatsphere", "Phenomen", "Philipi", "Philippides Homer", "phoenix",
"pidi", "Pierre Quillery", "Pierre Rudloff", "Pierre Slamich",
"Pietrao", "Piotr Strębski", "Piotr Wicijowski", "Pittmann Tamás",
"Playmolas", "Prescott", "Prescott_SK", "pronull",
"Przemysław Kulczycki", "Pumy", "pushpika", "PY", "qubicllj",
"r21vo", "Rafał Barański", "rainofchaos", "Rajbir", "ras0ir", "Rat",
"rd1381", "Renato", "Rene Hennig", "Rene Pärts", "Ricardo Duarte",
"Richard", "Robert Hrovat", "Roberth Sjonøy", "Robert Lundmark",
"Robin Jakobsson", "Robin Kåveland", "Rodrigo Donado",
"Roel Groeneveld", "rohmaru", "Rolf Christensen", "Rolf Leggewie",
"Roni Kantis", "Ronmi", "Rostislav Raykov", "royto", "RuiAmaro",
"Rui Araújo", "Rui Moura", "Rune Svendsen", "Rusna", "Rytis",
"Sabirov Mikhail", "salseeg", "Sami Koskinen", "Samir van de Sand",
"Samuel Arroyo Acuña", "Samuel R. C. Vale", "Sanel", "Santi",
"Santi Martínez Cantelli", "Sardan", "Sargate Kanogan",
"Sarmad Jari", "Saša Bodiroža", "sat0shi", "Saulius Pranckevičius",
"Savvas Radevic", "Sebastian Krauß", "Sebastián Porta", "Sedir",
"Sefa Denizoğlu", "sekolands", "Selim Suerkan", "semsomi",
"Sergii Golovatiuk", "setarcos", "Sheki", "Shironeko", "Shlomil",
"silfiriel", "Simone Tolotti", "Simone Vendemia", "sirkubador",
"Sławomir Więch", "slip", "slyon", "smoke", "Sonja", "spectral",
"spin_555", "spitf1r3", "Spiziuz", "Spyros Theodoritsis", "SqUe",
"Squigly", "srtck", "Stefan Horning", "Stefano Maggiolo",
"Stefano Roberto Soleti", "steinberger", "Stéphane Travostino",
"Stephan Klein", "Steven De Winter", "Stevie", "Stian24", "stylius",
"Sukarn Maini", "Sunjae Park", "Susana Pereira", "szymon siglowy",
"takercena", "TAS", "Taygeto", "temy4", "texxxxxx", "thamood",
"Thanos Chatziathanassiou", "Tharawut Paripaiboon", "Theodoor",
"Théophane Anestis", "Thor Marius K. Høgås", "Tiago Silva",
"Tiago Sousa", "Tikkel", "tim__b", "Tim Bordemann", "Tim Fuchs",
"Tim Kornhammar", "Timo", "Timo Jyrinki", "Timothy Babych",
"TitkosRejtozo", "Tom", "Tomas Gustavsson", "Tomas Valentukevičius",
"Tomasz Dominikowski", "Tomislav Plavčić", "Tom Mannerhagen",
"Tommy Mikkelsen", "Tom Verdaat", "Tony Manco",
"Tor Erling H. Opsahl", "Toudi", "tqm_z", "Trapanator", "Tribaal",
"Triton", "TuniX12", "Tuomo Sipola", "turbojugend_gr", "Turtle.net",
"twilight", "tymmej", "Ulrik", "Umarzuki Mochlis", "unikob",
"Vadim Gusev", "Vagi", "Valentin Bora", "Valmantas Palikša",
"VASKITTU", "Vassilis Skoullis", "vetal17", "vicedo", "viki",
"villads hamann", "Vincent Garibal", "Vincent Ortalda", "vinchi007",
"Vinícius de Figueiredo Silva", "Vinzenz Vietzke", "virtoo",
"virtual_spirit", "Vitor Caike", "Vitor Lamas Gatti",
"Vladimir Lazic", "Vladimir Sharshov", "Wanderlust", "Wander Nauta",
"Ward De Ridder", "WebCrusader", "webdr", "Wentao Tang", "wilana",
"Wilfredo Ernesto Guerrero Campos", "Wim Champagne", "World Sucks",
"Xabi Ezpeleta", "Xavi de Moner", "XavierToo", "XChesser",
"Xiaodong Xu", "xyb", "Yaron", "Yasen Pramatarov", "YesPoX",
"Yuren Ju", "Yves MATHIEU", "zekopeko", "zhuqin", "Zissan",
"Γιάννης Κατσαμπίρης", "Артём Попов", "Миша", "Шаймарданов Максим",
"蔡查理"
]))
self.about.set_wrap_license(True)
self.about.set_license(_(
"This program is free software; you can redistribute it and/or "
"modify it under the terms of the GNU General Public License as "
"published by the Free Software Foundation; either version 3 of "
"the License, or (at your option) any later version. \n\n"
"This program "
"is distributed in the hope that it will be useful, but WITHOUT "
"ANY WARRANTY; without even the implied warranty of "
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU "
"General Public License for more details. \n\n"
"You should have received "
"a copy of the GNU General Public License along with this program; "
"if not, see <http://www.gnu.org/licenses>. \n\n"
"In addition, as a "
"special exception, the copyright holders give permission to link "
"the code of portions of this program with the OpenSSL library. "
"You must obey the GNU General Public License in all respects for "
"all of the code used other than OpenSSL. \n\n"
"If you modify file(s) "
"with this exception, you may extend this exception to your "
"version of the file(s), but you are not obligated to do so. If "
"you do not wish to do so, delete this exception statement from "
"your version. If you delete this exception statement from all "
"source files in the program, then also delete it here."
))
self.about.set_website("http://deluge-torrent.org")
self.about.set_website_label("deluge-torrent.org")
self.about.set_icon(get_deluge_icon())
self.about.set_logo(gtk.gdk.pixbuf_new_from_file(get_pixmap("deluge-about.png")))
if client.connected():
if not client.is_classicmode():
self.about.set_comments(
self.about.get_comments() + _("Server:") + " %coreversion%\n")
self.about.set_comments(
self.about.get_comments() + "\n" + _("libtorrent:") + " %ltversion%\n")
def on_lt_version(result):
c = self.about.get_comments()
c = c.replace("%ltversion%", result)
self.about.set_comments(c)
def on_info(result):
c = self.about.get_comments()
c = c.replace("%coreversion%", result)
self.about.set_comments(c)
client.core.get_libtorrent_version().addCallback(on_lt_version)
if not client.is_classicmode():
client.daemon.info().addCallback(on_info)
else:
client.core.get_libtorrent_version().addCallback(on_lt_version)
def run(self):
self.about.show_all()
self.about.run()
self.about.destroy()
| Arzie/deluge | deluge/ui/gtkui/aboutdialog.py | Python | gpl-3.0 | 17,911 | [
"Gaussian"
] | 211b22696c69ecd497fc253588115827e3c75de459baf44a08baeb2411c504f8 |
"""
Exponential Integrate-and-Fire model.
See Neuronal Dynamics, `Chapter 5 Section 2 <http://neuronaldynamics.epfl.ch/online/Ch5.S2.html>`_
"""
# This file is part of the exercise code repository accompanying
# the book: Neuronal Dynamics (see http://neuronaldynamics.epfl.ch)
# located at http://github.com/EPFL-LCN/neuronaldynamics-exercises.
# This free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License 2.0 as published by the
# Free Software Foundation. You should have received a copy of the
# GNU General Public License along with the repository. If not,
# see http://www.gnu.org/licenses/.
# Should you reuse and publish the code for your own purposes,
# please cite the book or point to the webpage http://neuronaldynamics.epfl.ch.
# Wulfram Gerstner, Werner M. Kistler, Richard Naud, and Liam Paninski.
# Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition.
# Cambridge University Press, 2014.
import brian2 as b2
import matplotlib.pyplot as plt
import numpy
from neurodynex3.tools import input_factory
b2.defaultclock.dt = 0.05 * b2.ms
# default values.
MEMBRANE_TIME_SCALE_tau = 12.0 * b2.ms
MEMBRANE_RESISTANCE_R = 20.0 * b2.Mohm
V_REST = -65.0 * b2.mV
V_RESET = -60.0 * b2.mV
RHEOBASE_THRESHOLD_v_rh = -55.0 * b2.mV
SHARPNESS_delta_T = 2.0 * b2.mV
# a technical threshold to tell the algorithm when to reset vm to v_reset
FIRING_THRESHOLD_v_spike = -30. * b2.mV
def simulate_exponential_IF_neuron(
tau=MEMBRANE_TIME_SCALE_tau,
R=MEMBRANE_RESISTANCE_R,
v_rest=V_REST,
v_reset=V_RESET,
v_rheobase=RHEOBASE_THRESHOLD_v_rh,
v_spike=FIRING_THRESHOLD_v_spike,
delta_T=SHARPNESS_delta_T,
I_stim=input_factory.get_zero_current(),
simulation_time=200 * b2.ms):
"""
Implements the dynamics of the exponential Integrate-and-fire model
Args:
tau (Quantity): Membrane time constant
R (Quantity): Membrane resistance
v_rest (Quantity): Resting potential
v_reset (Quantity): Reset value (vm after spike)
v_rheobase (Quantity): Rheobase threshold
v_spike (Quantity) : voltage threshold for the spike condition
delta_T (Quantity): Sharpness of the exponential term
I_stim (TimedArray): Input current
simulation_time (Quantity): Duration for which the model is simulated
Returns:
(voltage_monitor, spike_monitor):
A b2.StateMonitor for the variable "v" and a b2.SpikeMonitor
"""
eqs = """
dv/dt = (-(v-v_rest) +delta_T*exp((v-v_rheobase)/delta_T)+ R * I_stim(t,i))/(tau) : volt
"""
neuron = b2.NeuronGroup(1, model=eqs, reset="v=v_reset", threshold="v>v_spike", method="euler")
neuron.v = v_rest
# monitoring membrane potential of neuron and injecting current
voltage_monitor = b2.StateMonitor(neuron, ["v"], record=True)
spike_monitor = b2.SpikeMonitor(neuron)
# run the simulation
net = b2.Network(neuron, voltage_monitor, spike_monitor)
net.run(simulation_time)
return voltage_monitor, spike_monitor
def getting_started():
"""
A simple example
"""
import neurodynex3.tools.plot_tools as plot_tools
input_current = input_factory.get_step_current(t_start=20, t_end=120, unit_time=b2.ms, amplitude=0.8 * b2.namp)
state_monitor, spike_monitor = simulate_exponential_IF_neuron(
I_stim=input_current, simulation_time=180 * b2.ms)
plot_tools.plot_voltage_and_current_traces(
state_monitor, input_current, title="step current", firing_threshold=FIRING_THRESHOLD_v_spike)
print("nr of spikes: {}".format(spike_monitor.count[0]))
plt.show()
def _min_curr_expl():
from neurodynex3.tools import plot_tools, input_factory
durations = [1, 2, 5, 10, 20, 50, 100, 200]
min_amp = [8.6, 4.45, 2., 1.15, .70, .48, 0.43, .4]
i = 1
t = durations[i]
I_amp = min_amp[i] * b2.namp
input_current = input_factory.get_step_current(
t_start=10, t_end=10 + t - 1, unit_time=b2.ms, amplitude=I_amp)
state_monitor, spike_monitor = simulate_exponential_IF_neuron(
I_stim=input_current, simulation_time=(t + 20) * b2.ms)
plot_tools.plot_voltage_and_current_traces(
state_monitor, input_current, title="step current",
firing_threshold=FIRING_THRESHOLD_v_spike, legend_location=2)
plt.show()
print("nr of spikes: {}".format(spike_monitor.count[0]))
if __name__ == "__main__":
getting_started()
| EPFL-LCN/neuronaldynamics-exercises | neurodynex3/exponential_integrate_fire/exp_IF.py | Python | gpl-2.0 | 4,520 | [
"NEURON"
] | fbfc1c89f29f81d59d1a20319ef1797bb4cfe07be0ff78b03b5aadf99c20a961 |
#!/usr/bin/env python
"""
C.7 Mathematical Formulas (p187)
"""
from Arrays import Array
from plasTeX import Command, Environment, sourceChildren
from plasTeX import DimenCommand, GlueCommand
from plasTeX.Logging import getLogger
#
# C.7.1
#
class MathEnvironment(Environment):
mathMode = True
class ThinSpace(Command):
macroName = '.'
class NegativeThisSpace(Command):
macroName = '!'
class MediumSpace(Command):
macroName = ':'
class ThickSpace(Command):
macroName = ';'
class ThinSpace_(Command):
macroName = '/'
# Need \newcommand\({\begin{math}} and \newcommand\){\end{math}}
class math(MathEnvironment):
@property
def source(self):
if self.hasChildNodes():
return '$%s$' % sourceChildren(self)
return '$'
class displaymath(MathEnvironment):
blockType = True
@property
def source(self):
if self.hasChildNodes():
return r'\[ %s \]' % sourceChildren(self)
if self.macroMode == Command.MODE_END:
return r'\]'
return r'\['
class BeginDisplayMath(Command):
macroName = '['
def invoke(self, tex):
o = self.ownerDocument.createElement('displaymath')
o.macroMode = Command.MODE_BEGIN
self.ownerDocument.context.push(o)
return [o]
class EndDisplayMath(Command):
macroName = ']'
def invoke(self, tex):
o = self.ownerDocument.createElement('displaymath')
o.macroMode = Command.MODE_END
self.ownerDocument.context.pop(o)
return [o]
class BeginMath(Command):
macroName = '('
def invoke(self, tex):
o = self.ownerDocument.createElement('math')
o.macroMode = Command.MODE_BEGIN
self.ownerDocument.context.push(o)
return [o]
class EndMath(Command):
macroName = ')'
def invoke(self, tex):
o = self.ownerDocument.createElement('math')
o.macroMode = Command.MODE_END
self.ownerDocument.context.pop(o)
return [o]
class ensuremath(Command):
args = 'self'
class equation(MathEnvironment):
blockType = True
counter = 'equation'
class EqnarrayStar(Array):
macroName = 'eqnarray*'
blockType = True
mathMode = True
class lefteqn(Command):
args = 'self'
def digest(self, tokens):
res = Command.digest(self, tokens)
obj = self.parentNode
while obj is not None and not isinstance(obj, Array.ArrayCell):
obj = obj.parentNode
if obj is not None:
obj.attributes['colspan'] = 3
obj.style['text-align'] = 'left'
return res
class ArrayCell(Array.ArrayCell):
@property
def source(self):
return '$\\displaystyle %s $' % sourceChildren(self, par=False)
class eqnarray(EqnarrayStar):
macroName = None
counter = 'equation'
class EndRow(Array.EndRow):
""" End of a row """
counter = 'equation'
def invoke(self, tex):
res = Array.EndRow.invoke(self, tex)
res[1].ref = self.ref
self.ownerDocument.context.currentlabel = res[1]
return res
def invoke(self, tex):
res = EqnarrayStar.invoke(self, tex)
if self.macroMode == self.MODE_END:
return res
res[1].ref = self.ref
return res
class nonumber(Command):
def invoke(self, tex):
self.ownerDocument.context.counters['equation'].addtocounter(-1)
def digest(self, tokens):
row = self.parentNode
while not isinstance(row, Array.ArrayRow):
row = row.parentNode
row.ref = None
class notag(nonumber):
pass
class lefteqn(Command):
args = 'self'
#
# Style Parameters
#
class jot(DimenCommand):
value = DimenCommand.new(0)
class mathindent(DimenCommand):
value = DimenCommand.new(0)
class abovedisplayskip(GlueCommand):
value = GlueCommand.new(0)
class belowdisplayskip(GlueCommand):
value = GlueCommand.new(0)
class abovedisplayshortskip(GlueCommand):
value = GlueCommand.new(0)
class belowdisplayshortskip(GlueCommand):
value = GlueCommand.new(0)
#
# C.7.2 Common Structures
#
# _
# ^
# '
class frac(Command):
args = 'numer denom'
class sqrt(Command):
args = '[ n ] self'
class ldots(Command):
pass
class cdots(Command):
pass
class vdots(Command):
pass
class ddots(Command):
pass
#
# C.7.3 Mathematical Symbols
#
#
# Table 3.3: Greek Letters
#
class MathSymbol(Command):
pass
# Lowercase
class alpha(MathSymbol): pass
class beta(MathSymbol): pass
class gamma(MathSymbol): pass
class delta(MathSymbol): pass
class epsilon(MathSymbol): pass
class varepsilon(MathSymbol): pass
class zeta(MathSymbol): pass
class eta(MathSymbol): pass
class theta(MathSymbol): pass
class vartheta(MathSymbol): pass
class iota(MathSymbol): pass
class kappa(MathSymbol): pass
class GreekLamda(MathSymbol):
macroName = 'lambda'
class mu(MathSymbol): pass
class nu(MathSymbol): pass
class xi(MathSymbol): pass
class pi(MathSymbol): pass
class varpi(MathSymbol): pass
class rho(MathSymbol): pass
class varrho(MathSymbol): pass
class sigma(MathSymbol): pass
class varsigma(MathSymbol): pass
class tau(MathSymbol): pass
class upsilon(MathSymbol): pass
class phi(MathSymbol): pass
class varphi(MathSymbol): pass
class chi(MathSymbol): pass
class psi(MathSymbol): pass
class omega(MathSymbol): pass
# Uppercase
class Gamma(MathSymbol): pass
class Delta(MathSymbol): pass
class Theta(MathSymbol): pass
class Lambda(MathSymbol): pass
class Xi(MathSymbol): pass
class Pi(MathSymbol): pass
class Sigma(MathSymbol): pass
class Upsilon(MathSymbol): pass
class Phi(MathSymbol): pass
class Psi(MathSymbol): pass
class Omega(MathSymbol): pass
#
# Table 3.4: Binary Operation Symbols
#
class pm(MathSymbol): pass
class mp(MathSymbol): pass
class times(MathSymbol): pass
class div(MathSymbol): pass
class ast(MathSymbol): pass
class star(MathSymbol): pass
class circ(MathSymbol): pass
class bullet(MathSymbol): pass
class cdot(MathSymbol): pass
class cap(MathSymbol): pass
class cup(MathSymbol): pass
class uplus(MathSymbol): pass
class sqcap(MathSymbol): pass
class sqcup(MathSymbol): pass
class vee(MathSymbol): pass
class wedge(MathSymbol): pass
class setminus(MathSymbol): pass
class wr(MathSymbol): pass
class diamond(MathSymbol): pass
class bigtriangleup(MathSymbol): pass
class bigtriangledown(MathSymbol): pass
class triangleleft(MathSymbol): pass
class triangleright(MathSymbol): pass
class lhd(MathSymbol): pass
class rhd(MathSymbol): pass
class unlhd(MathSymbol): pass
class unrhd(MathSymbol): pass
class oplus(MathSymbol): pass
class ominus(MathSymbol): pass
class otimes(MathSymbol): pass
class oslash(MathSymbol): pass
class odot(MathSymbol): pass
class bigcirc(MathSymbol): pass
class dagger(MathSymbol): pass
class ddagger(MathSymbol): pass
class amalg(MathSymbol): pass
#
# Table 3.5: Relation Symbols
#
class Not(MathSymbol):
macroName = 'not'
args = 'symbol'
class leq(MathSymbol): pass
class le(MathSymbol): pass
class prec(MathSymbol): pass
class preceq(MathSymbol): pass
class ll(MathSymbol): pass
class subset(MathSymbol): pass
class subseteq(MathSymbol): pass
class sqsubseteq(MathSymbol): pass
class In(MathSymbol):
macroName = 'in'
class vdash(MathSymbol): pass
class geq(MathSymbol): pass
class ge(MathSymbol): pass
class succ(MathSymbol): pass
class succeq(MathSymbol): pass
class gg(MathSymbol): pass
class supset(MathSymbol): pass
class supseteq(MathSymbol): pass
class sqsupset(MathSymbol): pass
class sqsupseteq(MathSymbol): pass
class ni(MathSymbol): pass
class dashv(MathSymbol): pass
class equiv(MathSymbol): pass
class sim(MathSymbol): pass
class simeq(MathSymbol): pass
class asymp(MathSymbol): pass
class approx(MathSymbol): pass
class cong(MathSymbol): pass
class neq(MathSymbol): pass
class ne(MathSymbol): pass
class doteq(MathSymbol): pass
class notin(MathSymbol): pass
class models(MathSymbol): pass
class perp(MathSymbol): pass
class mid(MathSymbol): pass
class parallel(MathSymbol): pass
class bowtie(MathSymbol): pass
class Join(MathSymbol): pass
class smile(MathSymbol): pass
class frown(MathSymbol): pass
class propto(MathSymbol): pass
#
# Table 3.6: Arrow Symbols
#
class leftarrow(MathSymbol): pass
class Leftarrow(MathSymbol): pass
class rightarrow(MathSymbol): pass
class Rightarrow(MathSymbol): pass
class leftrightarrow(MathSymbol): pass
class Leftrightarrow(MathSymbol): pass
class mapsto(MathSymbol): pass
class hookleftarrow(MathSymbol): pass
class leftharpoonup(MathSymbol): pass
class leftharpoondown(MathSymbol): pass
class rightleftharpoons(MathSymbol): pass
class longleftarrow(MathSymbol): pass
class Longleftarrow(MathSymbol): pass
class longrightarrow(MathSymbol): pass
class Longrightarrow(MathSymbol): pass
class longleftrightarrow(MathSymbol): pass
class Longleftrightarrow(MathSymbol): pass
class longmapsto(MathSymbol): pass
class hookrightarrow(MathSymbol): pass
class rightharpoonup(MathSymbol): pass
class rightharpoondown(MathSymbol): pass
class leadsto(MathSymbol): pass
class uparrow(MathSymbol): pass
class Uparrow(MathSymbol): pass
class downarrow(MathSymbol): pass
class Downarrow(MathSymbol): pass
class updownarrow(MathSymbol): pass
class Updownarrow(MathSymbol): pass
class nearrow(MathSymbol): pass
class searrow(MathSymbol): pass
class swarrow(MathSymbol): pass
class nwarrow(MathSymbol): pass
#
# Table 3.7: Miscellaneous Symbols
#
class aleph(MathSymbol): pass
class hbar(MathSymbol): pass
class imath(MathSymbol): pass
class jmath(MathSymbol): pass
class ell(MathSymbol): pass
class wp(MathSymbol): pass
class Re(MathSymbol): pass
class Im(MathSymbol): pass
class mho(MathSymbol): pass
class prime(MathSymbol): pass
class emptyset(MathSymbol): pass
class nabla(MathSymbol): pass
class surd(MathSymbol): pass
class top(MathSymbol): pass
class bot(MathSymbol): pass
class VerticalBar(MathSymbol):
macroName = '|'
class forall(MathSymbol): pass
class exists(MathSymbol): pass
class neg(MathSymbol): pass
class flat(MathSymbol): pass
class natural(MathSymbol): pass
class sharp(MathSymbol): pass
class backslash(MathSymbol): pass
class partial(MathSymbol): pass
class infty(MathSymbol): pass
class Box(MathSymbol): pass
class Diamond(MathSymbol): pass
class triangle(MathSymbol): pass
class clubsuit(MathSymbol): pass
class diamondsuit(MathSymbol): pass
class heartsuit(MathSymbol): pass
class spadesuit(MathSymbol): pass
#
# Table 3.8: Variable-sized Symbols
#
class sum(MathSymbol): pass
class prod(MathSymbol): pass
class coprod(MathSymbol): pass
class int(MathSymbol): pass
class oint(MathSymbol): pass
class bigcap(MathSymbol): pass
class bigcup(MathSymbol): pass
class bigsqcup(MathSymbol): pass
class bigvee(MathSymbol): pass
class bigwedge(MathSymbol): pass
class bigodot(MathSymbol): pass
class bigotimes(MathSymbol): pass
class bigoplus(MathSymbol): pass
class biguplus(MathSymbol): pass
#
# Table 3.9: Log-like Functions
#
class Logarithm(MathSymbol):
macroName = 'log'
class bmod(MathSymbol): pass
class pmod(MathSymbol):
args = 'self'
class arccos(MathSymbol): pass
class arcsin(MathSymbol): pass
class arctan(MathSymbol): pass
class arg(MathSymbol): pass
class cos(MathSymbol): pass
class cosh(MathSymbol): pass
class cot(MathSymbol): pass
class coth(MathSymbol): pass
class csc(MathSymbol): pass
class deg(MathSymbol): pass
class det(MathSymbol): pass
class dim(MathSymbol): pass
class exp(MathSymbol): pass
class gcd(MathSymbol): pass
class hom(MathSymbol): pass
class inf(MathSymbol): pass
class ker(MathSymbol): pass
class lg(MathSymbol): pass
class lim(MathSymbol): pass
class liminf(MathSymbol): pass
class limsup(MathSymbol): pass
class ln(MathSymbol): pass
class log(MathSymbol): pass
class max(MathSymbol): pass
class min(MathSymbol): pass
class Pr(MathSymbol): pass
class sec(MathSymbol): pass
class sin(MathSymbol): pass
class sinh(MathSymbol): pass
class sup(MathSymbol): pass
class tan(MathSymbol): pass
class tanh(MathSymbol): pass
#
# C.7.4 Arrays (see Arrays.py)
#
#
# C.7.5 Delimiters
#
class left(Command):
args = 'delim'
class right(Command):
args = 'delim'
# Table 3.10: Delimiters and TeXbook (p359)
class Delimiter(Command):
pass
class langle(Delimiter): pass
class rangle(Delimiter): pass
class lbrace(Delimiter): pass
class rbrace(Delimiter): pass
class lceil(Delimiter): pass
class rceil(Delimiter): pass
class lfloor(Delimiter): pass
class rfloor(Delimiter): pass
class lgroup(Delimiter): pass
class rgroup(Delimiter): pass
class lmoustache(Delimiter): pass
class rmoustache(Delimiter): pass
class uparrow(Delimiter): pass
class Uparrow(Delimiter): pass
class downarrow(Delimiter): pass
class Downarrow(Delimiter): pass
class updownarrow(Delimiter): pass
class Updownarrow(Delimiter): pass
class arrowvert(Delimiter): pass
class Arrowvert(Delimiter): pass
class vert(Delimiter): pass
class Vert(Delimiter): pass
class backslash(Delimiter): pass
class bracevert(Delimiter): pass
class bigl(Delimiter): pass
class bigm(Delimiter): pass
class bigr(Delimiter): pass
class Bigl(Delimiter): pass
class Bigm(Delimiter): pass
class Bigr(Delimiter): pass
class biggl(Delimiter): pass
class biggr(Delimiter): pass
class Biggl(Delimiter): pass
class Biggr(Delimiter): pass
class biggm(Delimiter): pass
class Biggm(Delimiter): pass
class Big(Delimiter):
args = 'char'
class bigg(Delimiter):
args = 'char'
class Bigg(Delimiter):
args = 'char'
class choose(Command):
pass
class brack(Command):
pass
class brace(Command):
pass
class sqrt(Command):
pass
#
# C.7.6 Putting One Thing Above Another
#
class overline(Command):
args = 'self'
class underline(Command):
args = 'self'
class overbrace(Command):
args = 'self'
class underbrace(Command):
args = 'self'
# Accents
class MathAccent(Command):
args = 'self'
class hat(MathAccent): pass
class check(MathAccent): pass
class breve(MathAccent): pass
class acute(MathAccent): pass
class grave(MathAccent): pass
class tilde(MathAccent): pass
class bar(MathAccent): pass
class vec(MathAccent): pass
class dot(MathAccent): pass
class ddot(MathAccent): pass
class widehat(MathAccent): pass
class widetilde(MathAccent): pass
class imath(MathAccent): pass
class jmath(MathAccent): pass
class stackrel(MathAccent):
args = 'top bottom'
#
# C.7.7 Spacing
#
# These are nested inside the MathEnvironemnt
#
# C.7.8 Changing Style
#
# Type Style
class mathrm(Command):
args = 'self'
class mathit(Command):
args = 'self'
class mathbf(Command):
args = 'self'
class mathsf(Command):
args = 'self'
class mathtt(Command):
args = 'self'
class mathcal(Command):
args = 'self'
class boldmath(Command):
pass
class unboldmath(Command):
pass
# Math Style
class displaystyle(Command):
pass
class textstyle(Command):
pass
class scriptstyle(Command):
pass
class scriptscriptstyle(Command):
pass
| nibrahim/PlasTeX | plasTeX/Base/LaTeX/Math.py | Python | mit | 14,996 | [
"Bowtie"
] | 319d1c68b97fe119f2922b9a3031897f05cb910697fd48bc3ef362f896a37ac3 |
"""
Parser function parse() to parse the _u.mat output file of Wannier90 (u matrices).
"""
from __future__ import print_function
import inspect
import re
from collections import defaultdict
from . import show_output
def parse(fname):
"""
Open the file, parses it and return the values
For now, I just check that the size of the file is correct, but
I don't check the actual content
"""
retdict = defaultdict(list)
if show_output:
print("[{}.{}] Parsing file '{}'".format(
__name__, inspect.currentframe().f_code.co_name, fname))
with open(fname) as f:
lines = f.readlines()
#read the values on the second line, that are the size of the matrix
retdict['size'] = [int(_) for _ in lines[1].split()]
retdict = dict(retdict)
if show_output:
for k in sorted(retdict):
print(" {}: {}".format(k, retdict[k]))
print("-"*72)
return retdict
| mostofi/wannier90 | test-suite/tools/parsers/parse_umat.py | Python | gpl-2.0 | 947 | [
"Wannier90"
] | e2d0c949da12d16baa986144e206c81e0f6515b822b61a6843ff2c6166aade60 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# QuickFF is a code to quickly derive accurate force fields from ab initio input.
# Copyright (C) 2012 - 2019 Louis Vanduyfhuys <Louis.Vanduyfhuys@UGent.be>
# Steven Vandenbrande <Steven.Vandenbrande@UGent.be>,
# Jelle Wieme <Jelle.Wieme@UGent.be>,
# Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center for Molecular Modeling
# (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise
# stated.
#
# This file is part of QuickFF.
#
# QuickFF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# QuickFF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
from __future__ import print_function
from argparse import ArgumentParser
import sys, os
import h5py as h5
from molmod.units import angstrom
from molmod.io.chk import load_chk
from yaff import System, ForceField, Cell
from quickff.program import __all__ as allowed_programs, __dict__ as program_modes
from quickff.log import log, version
from quickff.tools import set_ffatypes, project_negative_freqs, get_ei_radii, average, charges_to_bcis
from quickff.reference import SecondOrderTaylor, YaffForceField
from quickff.io import read_abinitio, make_yaff_ei, read_bci_constraints
from quickff.settings import Settings
__all__ = ['qff_input_ei', 'qff']
################################################################################
################## qff-input-ei.py ###################
################################################################################
def qff_input_ei_parse_args(args=None):
description = '''\
This script reads atomic charges from an input file and makes a Yaff parameters file
suitable for the QuickFF option --ei.'''
parser = ArgumentParser(description=description)
parser.add_argument(
'-v', '--verbose', default=False, action='store_true',
help='Increase verbosity of the script [default=%(default)s].'
)
parser.add_argument(
'--ffatypes', default=None,
help='Assign atom types in the system by parsing an ordered list of'
'atom types as argument or through the automatic built-in '
'detection (see documentation) by parsing the detection level '
'low, medium, high or highest. By default (or if None is given), '
'the atom types are assumed to be defined in the input files. '
'[default=%(default)s]'
)
parser.add_argument(
'--enforce-ffatypes', default=None,
help='Enforce the atom type of some atoms (specified by symbol or by '
'index), hence overriding possibly predefined or automatically '
'assigned atom types. [default=%(default)s]'
)
parser.add_argument(
'--gaussian', default=False, action='store_true',
help='Use gaussian smeared charges. The radii are taken from the input '
'file fn_in (from dataset /path/radii for HDF5 file or from label '
'`radii` for CHK file) if the data is present, otherwise the radii '
'are estimated according to the procedure of Chen et al. See '
'``quickff.tools.get_ei_radii`` for more info.'
)
parser.add_argument(
'--bci', default=False, action='store_true',
help='Convert averaged atomic charges to bond charge increments, i.e. '
'charge transfers along the chemical bonds in the system. In this '
'way, one is certain of ending up with a globally neutral system '
'even if bcis from different systems are combined. This option '
'requires the definition of bonds, if the bonds cannot be read '
'from the system file, they will be estimated from the interatomic '
'distances using the detect_bonds routine in the Yaff System '
'class. [default=%(default)s]'
)
parser.add_argument(
'--bci-constraints', default=None,
help='A file containing constraints for the charge to bci fit in a '
'master: slave0,slave1,...: sign format. A new line should be used '
'for each master and the format is insensitive towards spaces.'
'Sign should be 1.0 or -1.0 indicating wheter or not a sign switch '
'should be introduced when mapping the slaves to the master.'
)
parser.add_argument(
'--ei-scales', default='1,1,1',
help='A comma-seperated list representing the electrostatic neighbor'
'scales'
)
parser.add_argument(
'fn_sys',
help='Any file from which the system can be extracted (MolMod CHK, Gaussian '
'FCHK, XYZ, ...).'
)
parser.add_argument(
'charges',
help='The atomic charges to be used. This argument has the form fn_charges:path, '
'where fn_charges is a filename that contains the charges and path refers '
'to a location within that file where the charges can be found. If '
'fn_charges is an HDF5 file, path is the location of a dataset containing '
'the charges, e.g. \'/charges\'. If fn_charges is a MolMod CHK file, path '
'is the label of the dataset that contains the atomic charges.'
)
parser.add_argument(
'fn_out', default='pars_ei.txt', nargs='?',
help='Name of the Yaff file to write the parameters to. [default=%(default)s]'
)
if args is None:
args = parser.parse_args()
else:
args = parser.parse_args(args.split())
if args.charges.count(':') != 1:
parser.error('The argument charges must contain exactly one colon.')
return args
def qff_input_ei(args=None):
if args is None:
args = qff_input_ei_parse_args()
else:
args = qff_input_ei_parse_args(args)
# Load system file
if args.fn_sys.endswith('.fchk'):
numbers, coords, energy, grad, hess, masses, rvecs, pbc = read_abinitio(args.fn_sys, do_hess=False)
system = System(numbers, coords, rvecs=None, charges=None, radii=None, masses=masses)
system.detect_bonds()
else:
system = System.from_file(args.fn_sys)
# Guess atom types if needed
if args.ffatypes is not None:
enforce = {}
if args.enforce_ffatypes is not None:
from quickff.settings import decode_enforce_ffatypes_dict
enforce = decode_enforce_ffatypes_dict(args.enforce_ffatypes)
set_ffatypes(system, args.ffatypes, enforce=enforce)
ffatypes = [system.ffatypes[i] for i in system.ffatype_ids]
# Load atomic charges
fn_charges, _, path = args.charges.partition(':')
if fn_charges.endswith('.h5'):
with h5.File(fn_charges, 'r') as f:
if not path in f:
raise IOError('Given HDF5 file %s does not contain a dataset %s' % (fn_charges, path))
charges = f[path][:]
radii = None
if args.gaussian:
path_radii = os.path.join(os.path.dirname(path), 'radii')
if 'radii' in f[path]:
radii = average(f['%s/radii' %path][:], ffatypes, fmt='dict')
else:
radii = average(get_ei_radii(system.numbers), ffatypes, fmt='dict')
elif fn_charges.endswith('.chk'):
sample = load_chk(fn_charges)
if path in list(sample.keys()):
charges = sample[path]
else:
raise IOError('Given CHK file %s does not contain a dataset with label %s' % (fn_charges, path))
radii = None
if args.gaussian:
if 'radii' in list(sample.keys()):
radii = average(sample['radii'], ffatypes, fmt='dict')
else:
raise IOError('Invalid extension, fn_charges should be a HDF5 or a CHK file.')
# Derive charge parameters
if args.bci:
constraints = {}
if args.bci_constraints is not None:
constraints = read_bci_constraints(args.bci_constraints)
bcis = charges_to_bcis(charges, ffatypes, system.bonds, constraints=constraints, verbose=args.verbose)
make_yaff_ei(args.fn_out, None, bcis=bcis, radii=radii, scales=[float(s) for s in args.ei_scales.split(',')])
else:
charges = average(charges, ffatypes, fmt='dict', verbose=args.verbose)
make_yaff_ei(args.fn_out, charges, radii=radii, scales=[float(s) for s in args.ei_scales.split(',')])
################################################################################
################### qff.py ###################
################################################################################
def qff_parse_args(args=None):
description = '''\
This script will apply QuickFF to derive a covalent force field for the given
system from the ab initio input given in the input files.'''
parser = ArgumentParser(description=description)
parser.add_argument(
'--version', action='version', version='QuickFF %s' %version
)
parser.add_argument(
'-s', '--silent', default=False, action='store_true',
help='Swith of all logging completely, overwrites all other verbosity '
'options.'
)
parser.add_argument(
'-v', '--verbose', default=False, action='store_true',
help='Increases verbosity, is overwriten if SILENT or VERY_VERBOSE is '
'switched on.'
)
parser.add_argument(
'-V', '--very-verbose', default=False, action='store_true',
help='Increases verbosity to highest level, is overwriten if SILENT is '
'switched on.'
)
parser.add_argument(
'-l', '--logfile', default=None,
help='Redirect logger output to a file with name LOGFILE.'
)
parser.add_argument(
'--scoop', default=False, action='store_true',
help='Flag to enable parallelisation using SCOOP. With SCOOP, the '
'command to run QuickFF is slightly different, the absolute path '
'to quickff.py should be used. For example, to run on 4 cores: '
'python -m scoop -n4 /path/to/%(prog)s --scoop [options] fns'
)
#General settings options
settings = parser.add_argument_group(title='General QuickFF specifications')
settings.add_argument(
'-c', '--config-file', default=None,
help='Specify a configuration file to read all QuickFF settings from.'
)
settings.add_argument(
'-m', '--program-mode', default=None,
choices=[prog for prog in allowed_programs if not prog=='BaseProgram'],
help='Specify the program mode which defines the set of instructions '
'that will be executed.'
)
settings.add_argument(
'--fn-traj', default=None,
help='Read/write the perturbation trajectories from/to FN_TRAJ. If the '
'given file exists, the trajectories are read from the file. '
'Otherwise, the trajectories are written to the given file.'
)
settings.add_argument(
'--only-traj', default=None,
help='Construct the perturbation trajectory only for the terms with '+\
'the given basenames. This options is only applied in the ' +\
'MakeTrajectories program.'
)
settings.add_argument(
'-p', '--plot-traj', default=None,
help='If set to final, plots the various energy contributions along '
'the perturbation trajectories to using the final force field. '
'If set to all, plots the contributions along the trajectories '
'using all intermediate force fields (given suffixes _Apt1, '
'_Bhc1, _Cpt2 and _Dhc2) as well as the final force field '
'(given the suffix _Ehc3).'
)
settings.add_argument(
'-x', '--xyz-traj', default=False, action='store_true',
help='Write the perturbation trajectories in XYZ format. '
)
settings.add_argument(
'--suffix', default=None,
help = 'Suffix that will be added to all output files.'
)
#Force field options
ff = parser.add_argument_group(title='Options related to the definition and derivation of the force field')
ff.add_argument(
'--ei', default=None,
help='A Yaff parameters file defining the electrostatic contribution '
'of the force field.'
)
ff.add_argument(
'--ei-rcut', default=None,
help='The real space cut off for the electrostatic interactions. If '
'the system is periodic, the ewald parameters will be adjusted '
'to this cut off.'
)
ff.add_argument(
'--vdw', default=None,
help='A Yaff parameters file defining the van der Waals contribution '
'of the force field.'
)
ff.add_argument(
'--vdw-rcut', default=None,
help='The real space cut off for the van der Waals interactions.'
)
ff.add_argument(
'--covres', default=None,
help='A Yaff parameters file defining a residual contribution to the '
'covalent part of the force field.'
)
#System options
system = parser.add_argument_group(title='Options related to the definition of the system')
system.add_argument(
'--ffatypes', default=None,
help='Assign atom types in the system by parsing an ordered list of'
'atom types as argument or through the automatic built-in '
'detection (see documentation) by parsing the detection level '
'low, medium, high or highest. By default (or if None is given), '
'the atom types are assumed to be defined in the input files. '
'[default=%(default)s]'
)
system.add_argument(
'--enforce-ffatypes', default=None,
help='Enforce the atom type of some atoms (specified by symbol or by '
'index), hence overriding possibly predefined or automatically '
'assigned atom types. [default=%(default)s]'
)
#Input files fn1, fn2, ... represent all input files that specify the system and the ab initio reference data.
parser.add_argument(
'fn', nargs='+',
help='Input file name that specify the system and ab initio reference '
'data. Multiple file names are allowed, but at least one should '
'be given. Files later in the list overwrite information from '
'earlier files. Allowed file formats are MolMod checkpoint files '
'(file.chk), Gaussian formatted checkpoint files (file.fchk) '
'and VASP xml files (file.xml). '
)
if args is None:
args = parser.parse_args()
else:
args = parser.parse_args(args.split())
if not args.ffatypes is None and args.ffatypes.lower()=='none':
args.ffatypes = None
return args
def qff(args=None):
if args is None:
args = qff_parse_args()
else:
args = qff_parse_args(args)
#define logger
verbosity = None
if args.silent:
verbosity = 'silent'
else:
if args.very_verbose:
verbosity = 'highest'
elif args.verbose:
verbosity = 'high'
#get settings
kwargs = {
'fn_traj': args.fn_traj,
'only_traj': args.only_traj,
'program_mode': args.program_mode,
'plot_traj': args.plot_traj,
'xyz_traj': args.xyz_traj,
'suffix': args.suffix,
'log_level': verbosity,
'log_file': args.logfile,
'ffatypes': args.ffatypes,
'enforce_ffatypes': args.enforce_ffatypes,
'ei': args.ei,
'ei_rcut': args.ei_rcut,
'vdw': args.vdw,
'vdw_rcut': args.vdw_rcut,
'covres': args.covres,
}
settings = Settings(fn=args.config_file, **kwargs)
with log.section('INIT', 1, timer='Initializing'):
log.dump('Initializing system')
#read system and ab initio reference
system = None
energy = 0.0
grad = None
hess = None
pbc = None
rvecs = None
for fn in args.fn:
if fn.endswith('.fchk') or fn.endswith('.xml'):
numbers, coords, energy, grad, hess, masses, rvecs, pbc = read_abinitio(fn)
if system is None:
system = System(
numbers, coords, rvecs=rvecs, charges=None, radii=None,
masses=masses
)
else:
system.pos = coords.copy()
system.cell = Cell(rvecs)
system.numbers = numbers.copy()
if masses is not None: system.masses = masses.copy()
system._init_derived()
elif fn.endswith('.chk'):
sample = load_chk(fn)
if 'energy' in list(sample.keys()): energy = sample['energy']
if 'grad' in list(sample.keys()): grad = sample['grad']
elif 'gradient' in list(sample.keys()): grad = sample['gradient']
if 'hess' in list(sample.keys()): hess = sample['hess']
elif 'hessian' in list(sample.keys()): hess = sample['hessian']
if 'rvecs' in list(sample.keys()): pbc = [1,1,1]
else: pbc = [0,0,0]
if system is None:
system = System.from_file(fn)
else:
if 'pos' in list(sample.keys()): system.pos = sample['pos']
elif 'coords' in list(sample.keys()): system.pos = sample['coords']
if 'rvecs' in list(sample.keys()): system.cell = Cell(sample['rvecs'])
elif 'cell' in list(sample.keys()): system.cell = Cell(sample['cell'])
if 'bonds' in list(sample.keys()): system.bonds = sample['bonds']
if 'ffatypes' in list(sample.keys()): system.ffatypes = sample['ffatypes']
if 'ffatype_ids' in list(sample.keys()): system.ffatype_ids = sample['ffatype_ids']
system._init_derived()
else:
raise NotImplementedError('File format for %s not supported' %fn)
assert system is not None, 'No system could be defined from input'
assert grad is not None, 'No ab initio gradient found in input'
assert hess is not None, 'No ab initio hessian found in input'
#complete the system information
if system.bonds is None: system.detect_bonds()
if system.masses is None: system.set_standard_masses()
if system.ffatypes is None:
if settings.ffatypes is not None:
set_ffatypes(system, settings.ffatypes, enforce=settings.enforce_ffatypes)
else:
raise AssertionError('No atom types defined')
if settings.do_hess_negfreq_proj:
log.dump('Projecting negative frequencies out of the mass-weighted hessian.')
with log.section('SYS', 3, 'Initializing'):
hess = project_negative_freqs(hess, system.masses)
#construct ab initio reference
ai = SecondOrderTaylor('ai', coords=system.pos.copy(), energy=energy, grad=grad, hess=hess, pbc=pbc)
#detect a priori defined contributions to the force field
refs = []
if settings.ei is not None:
if rvecs is None:
if settings.ei_rcut is None:
rcut=50*angstrom
else:
rcut = settings.ei_rcut
ff = ForceField.generate(system, settings.ei, rcut=rcut)
else:
if settings.ei_rcut is None:
rcut = 20*angstrom
else:
rcut = settings.ei_rcut
ff = ForceField.generate(system, settings.ei, rcut=rcut, alpha_scale=3.2, gcut_scale=1.5, smooth_ei=True)
refs.append(YaffForceField('EI', ff))
if settings.vdw is not None:
ff = ForceField.generate(system, settings.vdw, rcut=settings.vdw_rcut)
refs.append(YaffForceField('vdW', ff))
if settings.covres is not None:
ff = ForceField.generate(system, settings.covres)
refs.append(YaffForceField('Cov res', ff))
#define quickff program
assert settings.program_mode in allowed_programs, \
'Given program mode %s not allowed. Choose one of %s' %(
settings.program_mode,
', '.join([prog for prog in allowed_programs if not prog=='BaseProgram'])
)
mode = program_modes[settings.program_mode]
program = mode(system, ai, settings, ffrefs=refs)
#run program
program.run()
return program
| molmod/QuickFF | quickff/scripts.py | Python | gpl-3.0 | 21,393 | [
"Gaussian",
"VASP"
] | ac2a9b7ef5d839c6e2d615c4f31ee9dbc3fe2c241bceb4713d3d6162d9b5c30d |
# -*- coding: utf-8 -*-
#
# This program is designed to plot an interactive 3D cube of the HI gas in
# the compact group of galaxies HCG91 using the Mayavi module. It is an example
# of "real" astrophsyical data plotted interactively in 3-D using Mayavi.
# In particular, the code does not shy away from the intricasies related to
# dealing with WCS coordinates, large(-r) datasets, etc ...
#
# See the green and red dice examples for more basic introductions to Mayavi.
#
# To run (in a Python shell):
# run HI_to_x3d.py
#
# Created, April 2015, F.P.A. Vogt
#
# Questions, comments : frederic.vogt@alumni.anu.edu.au
#
# If you find this code useful for your research, please cite the following
# article accordingly:
#
# Vogt, Owen et al., Advanced Data Visualization in Astrophysics:
# the X3D Pathway, ApJ (2015).
#
# Copyright (C) 2015 Frédéric P.A. Vogt, Chris I. Owen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------------------
# Import the required modules
from enthought.mayavi import mlab # for the interactive 3D
from astropy.io import fits as pyfits # to open fits files
import numpy as np
from astropy import wcs # to work with WCS (not required for plotting as such)
# ------------------------------------------------------------------------------
# Define some useful functions to deal with WCS coordinates
# Declination to degrees
def dectodeg (dec) :
# BUG Correction (F.P.A.Vogt, 21.12.2015): properly handle objects at
# Dec = 0.x, where np.sign(dec) = 0 !
if np.sign(dec[0]) != 0:
return np.sign(dec[0])*(abs(dec[0]) + dec[1]/60. + dec[2]/3600.)
else:
return abs(dec[0]) + dec[1]/60. + dec[2]/3600.
# R.A. to degrees
def ratodeg (ra) :
return (ra[0] + ra[1]/60. + ra[2]/3600.)/24.*360
# ------------------------------------------------------------------------------
# Start the program here
# Where to save the diagrams ?
plot_loc = './'
# Open the FITS file from the VLA
hdulist=pyfits.open('./data/HCG91.fits')
# Load the array
# It must be scaled to a 'reasonable' range to avoid small number issues.
scidata = hdulist['PRIMARY'].data[0] * 1000.
header = hdulist['PRIMARY'].header
# Load the correspondance between channels and velocity
c_v = np.loadtxt('./data/HCG91.dat')
# Extract some useful parameters
n_slice=scidata.shape[0]
size_y=scidata.shape[1]
size_x=scidata.shape[2]
hdulist.close()
# Define the min and max velocity slice of interest (no HI outside these)
slice_min = 5
slice_max = 50
# Define the x/y limits (in pixels; no HI signal outside these)
limx = [100,160]
limy = [105,165]
# Extract some more parameters of the datacube, and run some safety checks.
dv = np.mean(c_v[:-1,1] - c_v[1:,1])
stdv = np.std(c_v[:-1,1] - c_v[1:,1])
vmin = c_v[slice_max,1]
vmax = c_v[slice_min,1]
# The galaxies in the field RA,DEC,v,z
# From Hickson (1992)
gals = {'hcg91a':{'ra':[22,9,07.7],
'dec':[-27,48,34.0],
'v':6832.,
'col':(0,1,0),
},
'hcg91b':{'ra':[22,9,16.3],
'dec':[-27,43,49.0],
'v':7196.,
'col':(0.2,0.6,1),
},
'hcg91c':{'ra':[22,9,14.0],
'dec':[-27,46,56.0],
'v':7319.,
'col':(0.8,0,0.8),
},
'hcg91d':{'ra':[22,9,08.4],
'dec':[-27,48,02.0],
'v':7195.,
'col':(1,0,0),
},
}
# All of the code below is only required to plot the galaxies in the good place
# in the cube. Forget this part if you don't care about handling WCS elements.
# This is NOT required to use mayavi per say.
# ---
# Create a new WCS object.
w = wcs.WCS(header)
# Ok, now, create the grids of indexes for the plotting
# One must work directly in WCS coord if one wants the correct coordinates
# This is NOT perfect, but close enough given the field-of-view of the data.
# (i.e. the sky is still flat at that scale)
# Also assumes that North is perfectly up in VLA data !
# Just to be safe, check how big the distortion would be.
# Look at the WCS coordinates of the 4 corners of the cube, and look at the
# mismatch
# ll = lower left, ur = upper-right, etc ...
ra_ll = w.wcs_pix2world(np.array([[limx[0],limy[0],0,0]]),0)[0][0]
dec_ll = w.wcs_pix2world( np.array([[limx[0],limy[0],0,0]]),0)[0][1]
ra_lr = w.wcs_pix2world(np.array([[limx[1],limy[0],0,0]]),0)[0][0]
dec_lr = w.wcs_pix2world( np.array([[limx[1],limy[0],0,0]]),0)[0][1]
ra_ul = w.wcs_pix2world(np.array([[limx[0],limy[1],0,0]]),0)[0][0]
dec_ul = w.wcs_pix2world( np.array([[limx[0],limy[1],0,0]]),0)[0][1]
ra_ur = w.wcs_pix2world(np.array([[limx[1],limy[1],0,0]]),0)[0][0]
dec_ur = w.wcs_pix2world( np.array([[limx[1],limy[1],0,0]]),0)[0][1]
# The corner mismatch, in arcsec
err_dec_l = np.abs(dec_ll - dec_lr)*3600
err_dec_u = np.abs(dec_ul - dec_ur)*3600
err_ra_l = np.abs(ra_ll - ra_ul)*3600*np.cos(np.radians(dec_ll))
err_ra_r = np.abs(ra_lr - ra_ur)*3600*np.cos(np.radians(dec_lr))
print ' WARNING: WCS coord errors (in arcsec):'
print ' err_dec South/North:',np.round(err_dec_l,2),np.round(err_dec_u,2)
print ' err_ra East/West:',np.round(err_ra_l,2),np.round(err_ra_r,2)
print ' '
# Select the final WCS range
ramin = w.wcs_pix2world(np.array([[limx[1],limy[0],0,0]]),0)[0][0]
ramax = w.wcs_pix2world(np.array([[limx[0],limy[0],0,0]]),0)[0][0]
ramean = np.mean([ramin,ramax])
decmin = w.wcs_pix2world( np.array([[limx[0],limy[0],0,0]]),0)[0][1]
decmax = w.wcs_pix2world( np.array([[limx[0],limy[1],0,0]]),0)[0][1]
decmean = np.mean([decmin,decmax])
# For info, print the central WCS coordinate of the cube
print ' Central location [RA;Dec]:',ramean,decmean
# ------------------------------------------------------------------------------
# BUG Correction: 21.12.2015: set the velocity slices to be what they really
# should be (in case the velocity steps are not constant). F.P.A.Vogt
# Extract the velocity values in question - handle both cases when they are
# increasing or decreasing
if np.where(c_v[:,1]==vmin)[0] < np.where(c_v[:,1]==vmax)[0] :
vs_1d = c_v[:,1][np.where(c_v[:,1]==vmin)[0]:np.where(c_v[:,1]==vmax)[0]+1]
else :
vs_1d = c_v[:,1][np.where(c_v[:,1]==vmax)[0]:np.where(c_v[:,1]==vmin)[0]+1]
# Reorder them to be increasing:
vs_1d.sort()
# Finally, create grids with the coordinates of all the elements in the VLA data
# Use relative offsets in arcsec for R.A. and Dec., and absolute velocity for v
ras,decs,vs = np.meshgrid(np.linspace((ramin-ramean)*3600.*np.cos(np.radians(decmin)),
(ramax-ramean)*3600.*np.cos(np.radians(decmin)),
num = (limx[1]-limx[0]+1)),
np.linspace((decmin-decmean)*3600.,
(decmax-decmean)*3600.,
num = (limy[1]-limy[0]+1)),
vs_1d,
indexing='ij')
''' ### Retired 21.12.2015; F.P.A. Vogt
# Finally, create grids with the coordinates of all the elements in the VLA data
# Use relative offsets in arcsec for R.A. and Dec., and absolute velocity for v
ras_old,decs_old,vs_old = np.mgrid[ (ramin-ramean)*3600.*np.cos(np.radians(decmin)):
(ramax-ramean)*3600.*np.cos(np.radians(decmin)):
(limx[1]-limx[0]+1)*1j,
(decmin-decmean)*3600.:
(decmax-decmean)*3600.:
(limy[1]-limy[0]+1)*1j,
vmin:vmax+0.1:dv]
'''
# ------------------------------------------------------------------------------
# Re-order the VLA array to have the dimensions in the good direction !
# (z = v, x= R.A.), etc ...
# Also flip the velocity axis to be in the good direction
HI_cube = scidata[slice_min:slice_max+1,limy[0]:limy[1]+1,limx[0]:limx[1]+1 ]
HI_cube = np.transpose(HI_cube, (2,1,0))[::-1,:,::-1]
# Start the plotting
mlab.close(1)
fig = mlab.figure(1, size=(1100,1100))
# What contours levels do I want ? Can be chosen by defaults, or scripted.
isolevels = [1.3,2.5,3.5,6.0]
# --- !!! ---
# MAYAVI BUG & WORK-AROUND (#1)
# Currently, the x3d export function from Mayavi ignores the "vmin" and "vmax"
# parameters in the plotting function, and only uses the min and max of the
# datasets. Hence, to export the "exact same" color for the different plot
# elements shown in the interactive Mayavi window, the data itself MUST be
# modified; i.e. all values outside the [vmin->vmax] range must be replaced.
# This is NOT elegant, and will hopefully be fixed in future releases of Mayavi.
color_scale = [0.8,6.1]
HI_cube[HI_cube<color_scale[0]] = color_scale[0]
HI_cube[HI_cube>color_scale[1]] = color_scale[1]
# --- !!! ---
# Plot the different iso-contours of the HI emission
# Draw them one-at-a-time to a) control their transparency individually and b)
# export them as individual structures (useful for the interactive html model).
for (j,level) in enumerate(isolevels):
if j == len(isolevels)-1:
op = 1
else:
op = 0.2
# Plot the said contour - and flip the colorscheme for aesthetic purposes.
cm_tweak = -1.0
mlab.contour3d(ras,decs,vs,
HI_cube*cm_tweak,
contours = [level*cm_tweak],
opacity =op,
vmin =color_scale[1]*cm_tweak, vmax = color_scale[0]*cm_tweak,
name = 'I: '+np.str(level),
colormap = 'Set1')
# Draw a box around the cube to aid in the visualization
mlab.outline(extent=[ np.min(ras),np.max(ras),
np.min(decs), np.max(decs),
c_v[slice_min,1],
c_v[slice_max,1]],
color=(0,0,0),
line_width = 2.0) # Draw a box around it
# Now, add some axes
ax = mlab.axes(extent=[np.min(ras),np.max(ras),
np.min(decs), np.max(decs),
c_v[slice_min,1],
c_v[slice_max,1]],
nb_labels=3, color = (0,0,0))
# Fine tune the look of the axis
ax.axes.fly_mode = 'outer_edges'
ax.title_text_property.font_size = 10
ax.title_text_property.font_family = 'courier'
ax.title_text_property.italic = False
ax.label_text_property.font_family = 'courier'
ax.label_text_property.italic = False
ax.scene.parallel_projection = True
ax.scene.background = (1.0,1.0,1.0)
ax.title_text_property.color = (0,0,0)
ax.label_text_property.color = (0,0,0)
ax.axes.x_label = 'R.A. ["]'
ax.axes.y_label = 'Dec. ["]'
ax.axes.z_label = 'V [km/s]'
ax.axes.label_format = '%-#6.1f'
# --- !!! ---
# MAYAVI BUG & WORK-AROUND (#2)
# Currently, the default axis drawn by Mayavi are NOT exported to the x3d model.
# This is very inconvenient, and will hopefully be addressed in future releases.
# A possible work around is to draw the axis, axis labels and axis tick labels
# manually, one-at-a-time. This is what is done below.
# So, for clarity, turn off the "default" mayavi axis for now.
ax.visible = False
# One should note that the diagrams visible in Figure 2 in Vogt, Owen, et al.,
# ApJ (2015) were generated with the default Mayavi axis, i.e. by setting
#
#ax.visible = True
#
# --- !!! ---
# Now, add spheres and cubes to mark locations of interest in the data.
# First, the 4 galaxies members of the compact groups.
# Also add black crosses for clarity.
cross_size = 100
sphere_size = 50
for (k,gal) in enumerate(gals.keys()):
# Go from RA/Dec -> rescaled pixel space !
coords = [[ratodeg(gals[gal]['ra']),
dectodeg(gals[gal]['dec']),0,0]]
pixcrd = w.wcs_world2pix(coords, 0)
coords_plot = (np.array(coords)-np.array([ramean,decmean,0,0]))*3600.
coords_plot[0][0] *= np.cos(np.radians(decmin))
# Remember that we also flipped x before ...
pixcrd[0][0] = size_x - pixcrd[0][0]
vel_n = gals[gal]['v']
my_x = coords_plot[0][0]
my_y = coords_plot[0][1]
my_z = vel_n
lw = 5
# Plot the black crosses first ...
mlab.points3d([my_x],[my_y],
[my_z],[1.0],
color=(0,0,0),
mode = 'axes',
scale_factor= sphere_size)
# ... and a sphere at the same location.
mlab.quiver3d([my_x],
[my_y - sphere_size/4.],
[my_z],
[0],[1],[0],
scalars = [1],
scale_factor = sphere_size/2.,
scale_mode = 'scalar',
mode = 'sphere',
line_width = lw*0.75,
name = gal,
color=gals[gal]['col'])
# Finally, add the galaxy name as 3D text.
#mlab.text3d(my_x,my_y,my_z,'HCG91'+gal[-1],scale=20,color = (0,0,0))
mlab.text(my_x,my_y,'HCG91'+gal[-1],color=(0,0,0),z=my_z,width=0.1)
# Next, add peculiar HII regions of interest inside HCG 91c
# (See Vogt+, MNRAS (2015) for details)
# First, compute their coordinates
coords_91c = [[ratodeg(gals['hcg91c']['ra']),
dectodeg(gals['hcg91c']['dec']),0,0]]
coords_91c_plot = (np.array(coords_91c)-np.array([ramean,decmean,0,0]))*3600.
coords_91c_plot[0][0] *= np.cos(np.radians(decmin))
cube_size = 5
hx1 = coords_91c_plot[0][0] - 3.2
hx2 = coords_91c_plot[0][0] - 7.0
hx3 = coords_91c_plot[0][0] - 10.5
hy1 = coords_91c_plot[0][1] + 13.5
hy2 = coords_91c_plot[0][1] + 13.5
hy3 = coords_91c_plot[0][1] + 15.3
hv1 = 7244
hv2 = 7235
hv3 = 7230
# Once again, draw both a cube ...
mlab.points3d([hx1,hx2,hx3],[hy1,hy2,hy3],
[hv1,hv2,hv3],[1.0,1.0,1.0],
color=(1,1,1),
mode = 'cube',
scale_factor= cube_size,
name = 'HII_regions')
# And crosses for clarity.
mlab.points3d([hx1,hx2,hx3],[hy1,hy2,hy3],
[hv1,hv2,hv3],[1.0,1.0,1.0],
color=(0,0,0),
mode = 'axes',
scale_factor= 3*cube_size,
name = 'HII_regions')
# Finally, also add a symbols to mark the location of two HI clumps of interest.
# (See Vogt+, MNRAS (2015) for details)
lw = 5
bx1 = -20
by1 = 65
bz1 = 7190
bx2 = 5
by2 = 125
bz2 = 7230
# Once again, some black axes ...
mlab.points3d([bx1,bx2],[by1,by2],
[bz1,bz2],[1.0,1.0],
color=(0,0,0),
mode = 'axes',
scale_factor= cube_size*6)
# and a yellow cube.
mlab.quiver3d([bx1,bx1,bx2,bx2],[by1,by1-cube_size*3/2.,by2,by2-cube_size*3/2.],
[bz1-cube_size*3/2.,bz1,bz2-cube_size*3/2.,bz2],
[0,0,0,0],[0,1,0,1],[1,0,1,0],color=(1,0.7,0),
scalars = [1,1,1,1],
scale_factor = cube_size*3,
scale_mode = 'scalar',
mode = 'cube',
line_width = lw*0.75,
name = 'HI_clumps')
# Finally, trace the tidal tail in HI inside the cube, using a cyan cylinder.
tails_x = np.array([-80,-50, 0, 30, 50, 80, 80, 90, 70,50])
tails_y = np.array([0,0, -10,-15, -50, -70, -100, -110,-140,-160])
tails_z = np.array([6995,7010, 7020, 7035, 7070, 7120, 7121, 7170,7215,7220])
rad = 4
mlab.plot3d(tails_x,tails_y,tails_z, color=(0,1,1), tube_radius=rad,
name= 'HCG91a_tail')
# --- !!! ---
# MAYAVI BUG & WORK-AROUND (#2 continued)
# Normally, the diagram would be complete at this point.
# However, for the x3d export, we still need to manually include the axes,
# axes labels, and some tick labels. This is tedious, but here we go.
#
# If you do not care about x3d export remove those lines, and set
#
#ax.visible = True
#
# --- !!! ---
# Then, add the axes labels. Include multiple occurance to be visible in the
# top - front - side views in the interactive html model.
mlab.text(200, -350, 'R.A. [arcsec] ',z=6671.3, color = (0,0,0),width=0.2)
mlab.text(350,-200,'Dec. [arsec]', z=6671.3, color=(0,0,0),width=0.2)
mlab.text(-300,-350,'V [km/s]',z=7050, color=(0,0,0),width=0.1)
mlab.text(-300,-200,'Dec. [arcsec]',z=6620, color=(0,0,0),width=0.2)
mlab.text(-350,300,'V [km/s]',z=7050, color=(0,0,0),width=0.1)
mlab.text(-200,300,'R.A. [arcsec]',z=6620, color = (0,0,0),width=0.2)
# Add the axis tick labels ... again more than once, for the
# interactive html views.
mlab.text(340, -350, '300',z=6671.3, color = (0,0,0),width=0.05)
mlab.text(-260, -350,'-300',z=6671.3, color = (0,0,0),width=0.05)
mlab.text(420, 280, '300',z=6671.3, color = (0,0,0),width=0.05)
mlab.text(420, -320, '-300',z=6671.3, color = (0,0,0),width=0.05)
mlab.text(-300, -350,'6671.3',z=6620, color = (0,0,0),width=0.05)
mlab.text(-300, -350,'7643.7',z=7590, color = (0,0,0),width=0.05)
mlab.text(-300, 280, '300',z=6560, color = (0,0,0),width=0.05)
mlab.text(-300, -320,'-300',z=6560, color = (0,0,0),width=0.05)
mlab.text(-350, 300, '6671.3',z=6620, color = (0,0,0),width=0.05)
mlab.text(-350, 300, '7643.7',z=7590, color = (0,0,0),width=0.05)
mlab.text( 280, 300, '300', z=6560, color = (0,0,0),width=0.05)
mlab.text(-320, 300, '-300',z=6560, color = (0,0,0),width=0.05)
# Finally, add some tick lines in the middle of the top-front-side panels
mlab.plot3d([-300,300],[0,0],[np.min(vs),np.min(vs)], color=(0,0,0),
tube_radius=1)
mlab.plot3d([0,0],[-300,300],[np.min(vs),np.min(vs)], color=(0,0,0),
tube_radius=1)
mlab.plot3d([0,0],[np.max(decs),np.max(decs)],[6671.3,7643.7], color=(0,0,0),
tube_radius=1)
mlab.plot3d([-300,300],[np.max(decs),np.max(decs)],[7157.5,7157.5],color=(0,0,0),
tube_radius=1)
mlab.plot3d([np.min(ras),np.min(ras)],[0,0],[6671.3,7643.7], color=(0,0,0),
tube_radius=1)
mlab.plot3d([np.min(ras),np.min(ras)],[-300,300],[7157.5,7157.5], color=(0,0,0),
tube_radius=1)
# All done !
# Save it & export the diagram
mlab.savefig(plot_loc + 'HCG91.x3d')
mlab.savefig(plot_loc + 'HCG91.png')
# Show it !
mlab.show()
| fpavogt/x3d-pathway | fits_to_x3d/HCG91/HCG91.py | Python | gpl-3.0 | 18,943 | [
"Galaxy",
"Mayavi"
] | 7ab08880e4ec206ee334c810b4f63d2fa860d33145f3d8d22282eeaaaef7b310 |
import importlib
import click
import os
import numpy
from PIL.Image import ANTIALIAS
from pyproj import Proj
from netCDF4 import Dataset
from trefoil.utilities.color import Color
from trefoil.netcdf.utilities import collect_statistics, resolve_dataset_variable
from trefoil.render.renderers.stretched import StretchedRenderer
from trefoil.render.renderers.classified import ClassifiedRenderer
def render_image(renderer, data, filename, scale=1, flip_y=False, format='png'):
if flip_y:
data = data[::-1]
img = renderer.render_image(data)
if scale != 1:
img = img.resize((numpy.array(data.shape[::-1]) * scale).astype(numpy.uint), ANTIALIAS)
kwargs = {}
if format == 'png':
kwargs['optimize'] = True
elif format == 'jpg':
img = img.convert('RGB')
kwargs['progressive'] = True
elif format == 'webp':
img = img.convert('RGBA')
kwargs['lossless'] = True
img.save(filename, **kwargs)
def colormap_to_stretched_renderer(colormap, colorspace='hsv', filenames=None, variable=None, fill_value=None, mask=None):
statistics = None
if 'min:' in colormap or 'max:' in colormap or 'mean' in colormap:
if not filenames and variable:
raise ValueError('filenames and variable are required inputs to use colormap with statistics')
statistics = collect_statistics(filenames, (variable,), mask=mask)[variable]
colors = []
for entry in colormap.split(','):
value, color = entry.split(':')
# TODO: add proportions of statistics
if value in ('min', 'max', 'mean'):
value = statistics[value]
else:
value = float(value)
colors.append((value, Color.from_hex(color)))
return StretchedRenderer(colors, colorspace=colorspace, fill_value=fill_value)
def get_palette(palette_path):
index = palette_path.rindex('.')
return getattr(importlib.import_module('palettable.' + palette_path[:index]), palette_path[index+1:])
def palette_to_stretched_renderer(palette_path, values, filenames=None, variable=None, fill_value=None, mask=None):
palette = get_palette(palette_path)
values = values.split(',')
if not len(values) > 1:
raise ValueError('Must provide at least 2 values for palette-based stretched renderer')
if 'min' in values or 'max' in values:
if not filenames and variable:
raise ValueError('filenames and variable are required inputs to use palette with statistics')
statistics = collect_statistics(filenames, (variable,), mask=mask)[variable]
for statistic in ('min', 'max'):
if statistic in values:
values[values.index(statistic)] = statistics[statistic]
values = [float(v) for v in values] # in case any are still strings
hex_colors = palette.hex_colors
# TODO: this only works cleanly for min:max or 2 endpoint values. Otherwise require that the number of palette colors match the number of values
colors = [(values[0], Color.from_hex(hex_colors[0]))]
intermediate_colors = hex_colors[1:-1]
if intermediate_colors:
interval = (values[-1] - values[0]) / (len(intermediate_colors) + 1)
for i, color in enumerate(intermediate_colors):
colors.append((values[0] + (i + 1) * interval, Color.from_hex(color)))
colors.append((values[-1], Color.from_hex(hex_colors[-1])))
return StretchedRenderer(colors, colorspace='rgb', fill_value=fill_value) # I think all palettable palettes are in RGB ramps
def palette_to_classified_renderer(palette_path, filenames, variable, method='equal', fill_value=None, mask=None):
palette = get_palette(palette_path)
num_breaks = palette.number
colors = [Color(r, g, b) for (r, g, b) in palette.colors]
if method == 'equal':
statistics = collect_statistics(filenames, (variable,), mask=mask)[variable]
step = (statistics['max'] - statistics['min']) / num_breaks
breaks = numpy.linspace(statistics['min'] + step, statistics['max'], num_breaks)
return ClassifiedRenderer(zip(breaks, colors), fill_value=fill_value)
def get_leaflet_anchors(bbox):
"""
Returns Leaflet anchor coordinates for creating an ImageOverlay layer.
"""
wgs84_bbox = bbox.project(Proj(init='EPSG:4326'))
return [[wgs84_bbox.ymin, wgs84_bbox.xmin], [wgs84_bbox.ymax, wgs84_bbox.xmax]]
def get_mask(mask_path):
"""
Returns a numpy style mask from a netCDF file.
Parameters
----------
mask_path: string, a compound path of dataset:variable
Returns
-------
boolean mask (True where mask will be applied)
"""
mask_path, mask_variable = resolve_dataset_variable(mask_path)
if not mask_variable:
mask_variable = 'mask'
with Dataset(mask_path) as mask_ds:
if not mask_variable in mask_ds.variables:
raise click.BadParameter(
'mask variable not found: {0}'.format(mask_variable),
param='--mask', param_hint='--mask'
)
return mask_ds.variables[mask_variable][:].astype('bool') | consbio/clover | trefoil/cli/utilities.py | Python | bsd-3-clause | 5,125 | [
"NetCDF"
] | e26576b265b859b27b1b4892c42f866c7c9e705a51c9f18f1bae6f6dd8f1918f |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
This modul is for controlling powerpiont. PPT API documentation:
`http://msdn.microsoft.com/en-us/library/aa269321(office.10).aspx`_
"""
import os
import logging
if os.name == 'nt':
from win32com.client import Dispatch
import winreg
import win32ui
import pywintypes
from openlp.core.lib import ScreenList
from .presentationcontroller import PresentationController, PresentationDocument
log = logging.getLogger(__name__)
class PowerpointController(PresentationController):
"""
Class to control interactions with PowerPoint Presentations. It creates the runtime Environment , Loads the and
Closes the Presentation. As well as triggering the correct activities based on the users input.
"""
log.info('PowerpointController loaded')
def __init__(self, plugin):
"""
Initialise the class
"""
log.debug('Initialising')
super(PowerpointController, self).__init__(plugin, 'Powerpoint', PowerpointDocument)
self.supports = ['ppt', 'pps', 'pptx', 'ppsx']
self.process = None
def check_available(self):
"""
PowerPoint is able to run on this machine.
"""
log.debug('check_available')
if os.name == 'nt':
try:
winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'PowerPoint.Application').Close()
return True
except WindowsError:
pass
return False
if os.name == 'nt':
def start_process(self):
"""
Loads PowerPoint process.
"""
log.debug('start_process')
if not self.process:
self.process = Dispatch('PowerPoint.Application')
self.process.Visible = True
self.process.WindowState = 2
def kill(self):
"""
Called at system exit to clean up any running presentations.
"""
log.debug('Kill powerpoint')
while self.docs:
self.docs[0].close_presentation()
if self.process is None:
return
try:
if self.process.Presentations.Count > 0:
return
self.process.Quit()
except pywintypes.com_error:
pass
self.process = None
class PowerpointDocument(PresentationDocument):
"""
Class which holds information and controls a single presentation.
"""
def __init__(self, controller, presentation):
"""
Constructor, store information about the file and initialise.
"""
log.debug('Init Presentation Powerpoint')
super(PowerpointDocument, self).__init__(controller, presentation)
self.presentation = None
def load_presentation(self):
"""
Called when a presentation is added to the SlideController. Opens the PowerPoint file using the process created
earlier.
"""
log.debug('load_presentation')
if not self.controller.process or not self.controller.process.Visible:
self.controller.start_process()
try:
self.controller.process.Presentations.Open(self.filepath, False, False, True)
except pywintypes.com_error:
log.debug('PPT open failed')
return False
self.presentation = self.controller.process.Presentations(self.controller.process.Presentations.Count)
self.create_thumbnails()
return True
def create_thumbnails(self):
"""
Create the thumbnail images for the current presentation.
Note an alternative and quicker method would be do::
self.presentation.Slides[n].Copy()
thumbnail = QApplication.clipboard.image()
However, for the moment, we want a physical file since it makes life easier elsewhere.
"""
log.debug('create_thumbnails')
if self.check_thumbnails():
return
for num in range(self.presentation.Slides.Count):
self.presentation.Slides(num + 1).Export(
os.path.join(self.get_thumbnail_folder(), 'slide%d.png' % (num + 1)), 'png', 320, 240)
def close_presentation(self):
"""
Close presentation and clean up objects. This is triggered by a new object being added to SlideController or
OpenLP being shut down.
"""
log.debug('ClosePresentation')
if self.presentation:
try:
self.presentation.Close()
except pywintypes.com_error:
pass
self.presentation = None
self.controller.remove_doc(self)
def is_loaded(self):
"""
Returns ``True`` if a presentation is loaded.
"""
log.debug('is_loaded')
try:
if not self.controller.process.Visible:
return False
if self.controller.process.Windows.Count == 0:
return False
if self.controller.process.Presentations.Count == 0:
return False
except (AttributeError, pywintypes.com_error):
return False
return True
def is_active(self):
"""
Returns ``True`` if a presentation is currently active.
"""
log.debug('is_active')
if not self.is_loaded():
return False
try:
if self.presentation.SlideShowWindow is None:
return False
if self.presentation.SlideShowWindow.View is None:
return False
except (AttributeError, pywintypes.com_error):
return False
return True
def unblank_screen(self):
"""
Unblanks (restores) the presentation.
"""
log.debug('unblank_screen')
self.presentation.SlideShowSettings.Run()
self.presentation.SlideShowWindow.View.State = 1
self.presentation.SlideShowWindow.Activate()
if self.presentation.Application.Version == '14.0':
# Unblanking is broken in PowerPoint 2010, need to redisplay
slide = self.presentation.SlideShowWindow.View.CurrentShowPosition
click = self.presentation.SlideShowWindow.View.GetClickIndex()
self.presentation.SlideShowWindow.View.GotoSlide(slide)
if click:
self.presentation.SlideShowWindow.View.GotoClick(click)
def blank_screen(self):
"""
Blanks the screen.
"""
log.debug('blank_screen')
self.presentation.SlideShowWindow.View.State = 3
def is_blank(self):
"""
Returns ``True`` if screen is blank.
"""
log.debug('is_blank')
if self.is_active():
return self.presentation.SlideShowWindow.View.State == 3
else:
return False
def stop_presentation(self):
"""
Stops the current presentation and hides the output.
"""
log.debug('stop_presentation')
self.presentation.SlideShowWindow.View.Exit()
if os.name == 'nt':
def start_presentation(self):
"""
Starts a presentation from the beginning.
"""
log.debug('start_presentation')
#SlideShowWindow measures its size/position by points, not pixels
try:
dpi = win32ui.GetActiveWindow().GetDC().GetDeviceCaps(88)
except win32ui.error:
try:
dpi = win32ui.GetForegroundWindow().GetDC().GetDeviceCaps(88)
except win32ui.error:
dpi = 96
size = ScreenList().current['size']
ppt_window = self.presentation.SlideShowSettings.Run()
if not ppt_window:
return
ppt_window.Top = size.y() * 72 / dpi
ppt_window.Height = size.height() * 72 / dpi
ppt_window.Left = size.x() * 72 / dpi
ppt_window.Width = size.width() * 72 / dpi
def get_slide_number(self):
"""
Returns the current slide number.
"""
log.debug('get_slide_number')
return self.presentation.SlideShowWindow.View.CurrentShowPosition
def get_slide_count(self):
"""
Returns total number of slides.
"""
log.debug('get_slide_count')
return self.presentation.Slides.Count
def goto_slide(self, slideno):
"""
Moves to a specific slide in the presentation.
"""
log.debug('goto_slide')
self.presentation.SlideShowWindow.View.GotoSlide(slideno)
def next_step(self):
"""
Triggers the next effect of slide on the running presentation.
"""
log.debug('next_step')
self.presentation.SlideShowWindow.View.Next()
if self.get_slide_number() > self.get_slide_count():
self.previous_step()
def previous_step(self):
"""
Triggers the previous slide on the running presentation.
"""
log.debug('previous_step')
self.presentation.SlideShowWindow.View.Previous()
def get_slide_text(self, slide_no):
"""
Returns the text on the slide.
``slide_no``
The slide the text is required for, starting at 1.
"""
return _get_text_from_shapes(self.presentation.Slides(slide_no).Shapes)
def get_slide_notes(self, slide_no):
"""
Returns the text on the slide.
``slide_no``
The slide the notes are required for, starting at 1.
"""
return _get_text_from_shapes(self.presentation.Slides(slide_no).NotesPage.Shapes)
def _get_text_from_shapes(shapes):
"""
Returns any text extracted from the shapes on a presentation slide.
``shapes``
A set of shapes to search for text.
"""
text = ''
for index in range(shapes.Count):
shape = shapes(index + 1)
if shape.HasTextFrame:
text += shape.TextFrame.TextRange.Text + '\n'
return text
| marmyshev/item_title | openlp/plugins/presentations/lib/powerpointcontroller.py | Python | gpl-2.0 | 12,163 | [
"Brian"
] | bff57752cbb561ceb8e5a22ee4af8bae810f56c20554d09a0e4d4b109e24f07a |
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
targets: list of targets to search for. The target names are unqualified.
The following is output:
error: only supplied if there is an error.
warning: only supplied if there is a warning.
targets: the set of targets passed in via targets that either directly or
indirectly depend upon the set of paths supplied in files.
build_targets: minimal set of targets that directly depend on the changed
files and need to be built. The expectation is this set of targets is passed
into a build step.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case targets and build_targets are not output) or at
least one file was found.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = _ToGypPath(target)
if base_path == toplevel_dir:
base_path = ''
elif base_path.startswith(toplevel_dir + '/'):
base_path = base_path[len(toplevel_dir) + len('/'):]
base_path = posixpath.dirname(base_path)
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.targets = set(config.get('targets', []))
def _WasBuildFileModified(build_file, data, files):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|."""
if _ToGypPath(build_file) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if rel_include_file in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return target_dict['type'] != 'none' or \
target_dict.get('actions') or target_dict.get('rules')
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Set of root Targets reachable from the the files |build_files|.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
targets = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(targets, target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target.is_executable = target_dicts[target_name]['type'] == 'executable'
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if source in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(targets, dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return targets, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|."""
result = {}
if not to_find:
return result
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result
return result
def _DoesTargetDependOn(target):
"""Returns true if |target| or any of its dependencies matches the supplied
set of paths. This updates |matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOn(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOn(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on the matched targets.
possible_targets: targets to search from."""
found = []
for target in possible_targets:
if _DoesTargetDependOn(target):
found.append(target)
return found
def _AddBuildTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = not target.back_deps and target in roots
for back_dep_target in target.back_deps:
_AddBuildTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build))):
result.add(target)
target.added_to_compile_targets = True
def _GetBuildTargets(matching_targets, roots):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
roots: set of root targets in the build files to search from."""
result = set()
for target in matching_targets:
_AddBuildTargets(target, roots, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(include) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'targets': list(config.targets) }
_WriteOutput(params, **result_dict)
return
all_targets, matching_targets, roots = _GenerateTargets(
data, target_list, target_dicts, toplevel_dir, frozenset(config.files),
params['build_files'])
warning = None
unqualified_mapping = _GetUnqualifiedToTargetMapping(all_targets,
config.targets)
if len(unqualified_mapping) != len(config.targets):
not_found = _NamesNotIn(config.targets, unqualified_mapping)
warning = 'Unable to find all targets: ' + str(not_found)
if matching_targets:
search_targets = _LookupTargets(config.targets, unqualified_mapping)
matched_search_targets = _GetTargetsDependingOn(search_targets)
# Reset the visited status for _GetBuildTargets.
for target in all_targets.itervalues():
target.visited = False
build_targets = _GetBuildTargets(matching_targets, roots)
matched_search_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matched_search_targets]
build_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in build_targets]
else:
matched_search_targets = []
build_targets = []
result_dict = { 'targets': matched_search_targets,
'status': found_dependency_string if matching_targets else
no_dependency_string,
'build_targets': build_targets}
if warning:
result_dict['warning'] = warning
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
| kishikawakatsumi/Mozc-for-iOS | src/third_party/gyp/pylib/gyp/generator/analyzer.py | Python | apache-2.0 | 20,997 | [
"VisIt"
] | c7dcce8fcaf7bdcc79355a5ab4b586706afeb38ff6aa10eb64b31661f41d6bb4 |
# -*- coding: utf-8
# pylint: disable=line-too-long
"""Lonely, helper functions that are broadly used and don't fit anywhere"""
import os
import sys
import ssl
import yaml
import gzip
import time
import copy
import socket
import shutil
import smtplib
import tarfile
import hashlib
import textwrap
import linecache
import webbrowser
import subprocess
import tracemalloc
import configparser
import urllib.request, urllib.error, urllib.parse
import numpy as np
import pandas as pd
import Bio.PDB as PDB
import itertools as it
from numba import jit
from collections import Counter
from email.mime.text import MIMEText
import anvio
import anvio.db as db
import anvio.tables as t
import anvio.fastalib as u
import anvio.constants as constants
import anvio.filesnpaths as filesnpaths
from anvio.dbinfo import DBInfo as dbi
from anvio.errors import ConfigError, FilesNPathsError
from anvio.sequence import Composition
from anvio.terminal import Run, Progress, SuppressAllOutput, get_date, TimeCode, pluralize
with SuppressAllOutput():
from ete3 import Tree
# psutil is causing lots of problems for lots of people :/
with SuppressAllOutput():
try:
import psutil
PSUTIL_OK=True
except:
PSUTIL_OK=False
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "A. Murat Eren"
__email__ = "a.murat.eren@gmail.com"
__status__ = "Development"
# for full output
pd.options.display.max_columns=100
pd.options.display.max_rows=100
# Mock progress object that will not report anything, for general clarity.
progress = Progress()
progress.verbose = False
run = Run()
run.verbose = False
def get_total_memory_usage(keep_raw=False):
"""Get the total memory, including children
Parameters
==========
keep_raw : bool, False
A human readable format is returned, e.g. "1.41 GB". If keep_raw, the raw number is
returned, e.g. 1515601920
"""
if not PSUTIL_OK:
return None
current_process = psutil.Process(os.getpid())
mem = current_process.memory_info().rss
for child in current_process.children(recursive=True):
try:
mem += child.memory_info().rss
except:
pass
return mem if keep_raw else human_readable_file_size(mem)
def display_top_memory_usage(snapshot, key_type='lineno', limit=10):
"""A pretty-print for the tracemalloc memory usage module
Modified from https://docs.python.org/3/library/tracemalloc.html
Examples
========
>>> import tracemalloc
>>> import anvio.utils as utils
>>> tracemalloc.start()
>>> snap = tracemalloc.take_snapshot
>>> utils.display_top_memory_usage(snap)
Top 10 lines
#1: anvio/bamops.py:160: 4671.3 KiB
constants.cigar_consumption,
#2: anvio/bamops.py:96: 2571.6 KiB
self.cigartuples = np.array(read.cigartuples)
#3: python3.6/linecache.py:137: 1100.0 KiB
lines = fp.readlines()
#4: <frozen importlib._bootstrap_external>:487: 961.4 KiB
#5: typing/templates.py:627: 334.3 KiB
return type(base)(name, (base,), dct)
#6: typing/templates.py:923: 315.7 KiB
class Template(cls):
#7: python3.6/_weakrefset.py:84: 225.2 KiB
self.data.add(ref(item, self._remove))
#8: targets/npyimpl.py:411: 143.2 KiB
class _KernelImpl(_Kernel):
#9: _vendor/pyparsing.py:3349: 139.7 KiB
self.errmsg = "Expected " + _ustr(self)
#10: typing/context.py:456: 105.1 KiB
def on_disposal(wr, pop=self._globals.pop):
3212 other: 4611.9 KiB
Total allocated size: 15179.4 KiB
"""
snapshot = snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
))
top_stats = snapshot.statistics(key_type)
print("Top %s lines" % limit)
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
# replace "/path/to/module/file.py" with "module/file.py"
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
print("#%s: %s:%s: %.1f KiB"
% (index, filename, frame.lineno, stat.size / 1024))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(' %s' % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("%s other: %.1f KiB" % (len(other), size / 1024))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: %.1f KiB" % (total / 1024))
def rev_comp(seq):
return seq.translate(constants.complements)[::-1]
def rev_comp_gene_calls_dict(gene_calls_dict, contig_sequence):
contig_length = len(contig_sequence)
gene_caller_ids = list(gene_calls_dict.keys())
gene_caller_id_conversion_dict = dict([(gene_caller_ids[-i - 1], i) for i in range(0, len(gene_caller_ids))])
G = lambda g: gene_caller_id_conversion_dict[g]
reverse_complemented_gene_calls = {}
for gene_callers_id in gene_calls_dict:
g = copy.deepcopy(gene_calls_dict[gene_callers_id])
g['start'], g['stop'] = contig_length - g['stop'], contig_length - g['start']
g['direction'] = 'f' if g['direction'] == 'r' else 'r'
reverse_complemented_gene_calls[G(gene_callers_id)] = g
return reverse_complemented_gene_calls, gene_caller_id_conversion_dict
def serialize_args(args, single_dash=False, use_underscore=False, skip_keys=None, translate=None):
cmdline = []
for param, value in args.__dict__.items():
if isinstance(skip_keys, list):
if param in skip_keys:
continue
if translate and param in translate:
param = translate[param]
dash = '-' if single_dash else '--'
if not use_underscore:
param = param.replace('_', '-')
if value is True:
cmdline.append('%s%s' % (dash, param))
elif value is not False and value is not None:
cmdline.append('%s%s' % (dash, param))
cmdline.append(str(value))
return cmdline
def get_predicted_type_of_items_in_a_dict(d, key):
"""Gets a dictionary `d` and a `key` in it, and returns a type function.
It is a bit counter intuitive. dictionary should look like this:
d = {'x': {'key': item, (...)},
'y': {'key': item, (...)},
(...),
}
This is a shitty function, but there was a real need for it, so here we are :/
"""
items = [x[key] for x in d.values()]
if not items:
# there is nothing to see here
return None
try:
if(set(items) == set([None])):
# all items is of type None.
return None
except TypeError:
# this means we are working with an unhashable type.
# it is either list or dict. we will go through items
# and return the type of first item that is not None:
for item in items:
if item == None:
continue
else:
return type(item)
# the code should never come to this line since if everything
# was None that would have been captured by the try block and the
# exception would have never been thrown, but here is a final line
# just to be sure we are not moving on with the rest of the code
# if we entered into this block:
return None
# if we are here, it means not all items are None, and they are not of
# unhashable types (so they must be atomic types such as int, float, or str)
not_float = False
for item in items:
try:
float(item or 0)
except ValueError:
not_float = True
break
if not_float:
return str
else:
for item in items:
try:
if int(item or 0) == float(item or 0):
continue
else:
return float
except ValueError:
return float
return int
def human_readable_file_size(nbytes):
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
def get_port_num(port_num = 0, ip='0.0.0.0', run=run):
"""Get a port number for the `ip` address."""
try:
port_num = int(port_num) if port_num else 0
except Exception as e:
raise ConfigError("Not a happy port number :/ %s." % e)
if not port_num:
port_num = get_next_available_port_num(constants.default_port_number)
if not port_num:
raise ConfigError("Anvi'o searched a bunch of port numbers starting from %d, but failed "
"to find an available one for you. Maybe you should specify one :/")
else:
if is_port_in_use(port_num):
raise ConfigError("The port number %d seems to be in use :/" % port_num)
if os.getuid() and port_num < 1024:
run.warning("Using the port number %d requires superuser priviliges, which your user does not "
"seem to have. Since anvi'o does not know anything about your system configuraiton, "
"you are free to go for now. But be prepared for a failed attempt to use this port "
"number to serve stuff." % port_num)
return port_num
def get_next_available_port_num(start=constants.default_port_number, look_upto_next_num_ports=100, ip='0.0.0.0'):
"""Starts from 'start' and incrementally looks for an available port
until 'start + look_upto_next_num_ports', and returns the first
available one."""
for p in range(start, start + look_upto_next_num_ports):
if not is_port_in_use(p, ip):
return p
return None
def is_port_in_use(port, ip='0.0.0.0'):
in_use = False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((ip, port))
if result == 0:
in_use = True
sock.close()
return in_use
def is_program_exists(program, dont_raise=False):
IsExe = lambda p: os.path.isfile(p) and os.access(p, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if IsExe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = os.path.expanduser(path).strip('"')
exe_file = os.path.join(path, program)
if IsExe(exe_file):
return exe_file
if dont_raise:
return False
raise ConfigError("An anvi'o function needs '%s' to be installed on your system, but it doesn't seem to appear "
"in your path :/ If you are certain you have it on your system (for instance you can run it "
"by typing '%s' in your terminal window), you may want to send a detailed bug report. Sorry!"\
% (program, program))
def format_cmdline(cmdline):
"""Takes a cmdline for `run_command` or `run_command_STDIN`, and makes it beautiful."""
if not cmdline or (not isinstance(cmdline, str) and not isinstance(cmdline, list)):
raise ConfigError("You made utils::format_cmdline upset. The parameter you sent to run kinda sucks. It should be string "
"or list type. Note that the parameter `shell` for subprocess.call in this `run_command` function "
"is always False, therefore if you send a string type, it will be split into a list prior to being "
"sent to subprocess.")
if isinstance(cmdline, str):
cmdline = [str(x) for x in cmdline.split(' ')]
else:
cmdline = [str(x) for x in cmdline]
return cmdline
def gzip_compress_file(input_file_path, output_file_path=None, keep_original=False):
filesnpaths.is_file_exists(input_file_path)
if not output_file_path:
output_file_path = input_file_path + '.gz'
filesnpaths.is_output_file_writable(output_file_path)
import gzip
with open(input_file_path, 'rb') as f_in, gzip.open(output_file_path, 'wb') as f_out:
f_out.writelines(f_in)
if not keep_original:
os.remove(input_file_path)
def gzip_decompress_file(input_file_path, output_file_path=None, keep_original=True):
filesnpaths.is_file_exists(input_file_path)
if not input_file_path.endswith('.gz'):
raise ConfigError("gzip_decompress_file function is upset because your input file ('%s') does not "
"end with a '.gz' extension :(")
if not output_file_path:
output_file_path = input_file_path[:-3]
filesnpaths.is_output_file_writable(output_file_path)
import gzip
with gzip.open(input_file_path, 'rb') as f_in, open(output_file_path, 'wb') as f_out:
f_out.writelines(f_in)
if not keep_original:
os.remove(input_file_path)
return output_file_path
def tar_extract_file(input_file_path, output_file_path=None, keep_original=True):
filesnpaths.is_file_tar_file(input_file_path)
if not output_file_path:
raise ConfigError("The tar_extract_file function is displeased because an output file path has not been specified. "
"If you are seeing this message, you are probably a developer, so go fix your code please, and "
"everyone will be happy then.")
tf = tarfile.open(input_file_path)
tf.extractall(path = output_file_path)
if not keep_original:
os.remove(input_file_path)
class CoverageStats:
"""A class to return coverage stats for an array of nucleotide level coverages.
FIXME: This class should replace `coverage_c` function in bamops to avoid redundancy.
"""
def __init__(self, coverage, skip_outliers=False):
self.min = np.amin(coverage)
self.max = np.amax(coverage)
self.median = np.median(coverage)
self.mean = np.mean(coverage)
self.std = np.std(coverage)
self.detection = np.sum(coverage > 0) / len(coverage)
if coverage.size < 4:
self.mean_Q2Q3 = self.mean
else:
sorted_c = sorted(coverage)
Q = int(coverage.size * 0.25)
Q2Q3 = sorted_c[Q:-Q]
self.mean_Q2Q3 = np.mean(Q2Q3)
if skip_outliers:
self.is_outlier = None
else:
self.is_outlier = get_list_of_outliers(coverage, median=self.median) # this is an array not a list
class RunInDirectory(object):
""" Run any block of code in a specified directory. Return to original directory
Parameters
==========
run_dir : str or Path-like
The directory the block of code should be run in
"""
def __init__(self, run_dir):
self.run_dir = run_dir
self.cur_dir = os.getcwd()
if not os.path.isdir(self.run_dir):
raise ConfigError("RunInDirectory :: %s is not a directory." % str(self.run_dir))
def __enter__(self):
os.chdir(self.run_dir)
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self.cur_dir)
def run_command(cmdline, log_file_path, first_line_of_log_is_cmdline=True, remove_log_file_if_exists=True):
""" Uses subprocess.call to run your `cmdline`
Parameters
==========
cmdline : str or list
The command to be run, e.g. "echo hello" or ["echo", "hello"]
log_file_path : str or Path-like
All stdout from the command is sent to this filepath
Raises ConfigError if ret_val < 0, or on OSError. Does NOT raise if program terminated with exit code > 0.
"""
cmdline = format_cmdline(cmdline)
if anvio.DEBUG:
Progress().reset()
Run().info("[DEBUG] `run_command` is running", \
' '.join(['%s' % (('"%s"' % str(x)) if ' ' in str(x) else ('%s' % str(x))) for x in cmdline]), \
nl_before=1, nl_after=1, mc='red', lc='yellow')
filesnpaths.is_output_file_writable(log_file_path)
if remove_log_file_if_exists and os.path.exists(log_file_path):
os.remove(log_file_path)
try:
if first_line_of_log_is_cmdline:
with open(log_file_path, "a") as log_file: log_file.write('# DATE: %s\n# CMD LINE: %s\n' % (get_date(), ' '.join(cmdline)))
log_file = open(log_file_path, 'a')
ret_val = subprocess.call(cmdline, shell=False, stdout=log_file, stderr=subprocess.STDOUT)
log_file.close()
# This can happen in POSIX due to signal termination (e.g., SIGKILL).
if ret_val < 0:
raise ConfigError("Command failed to run. What command, you say? This: '%s'" % ' '.join(cmdline))
else:
return ret_val
except OSError as e:
raise ConfigError("command was failed for the following reason: '%s' ('%s')" % (e, cmdline))
def start_command(cmdline, log_file_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, first_line_of_log_is_cmdline=True, remove_log_file_if_exists=True):
"""Start a command using subprocess.Popen, returning an object that can be monitored."""
cmdline = format_cmdline(cmdline)
if anvio.DEBUG:
Progress().reset()
Run().info("[DEBUG] `start_command`",
' '.join(['%s' % (('"%s"' % str(x)) if ' ' in str(x) else ('%s' % str(x))) for x in cmdline]),
nl_before=1, nl_after=1, mc='red', lc='yellow')
filesnpaths.is_output_file_writable(log_file_path)
if remove_log_file_if_exists and os.path.exists(log_file_path):
os.remove(log_file_path)
try:
if first_line_of_log_is_cmdline:
with open(log_file_path, 'a') as log_file:
log_file.write(f"# DATE: {get_date()}\n# CMD LINE: {' '.join(cmdline)}\n")
p = subprocess.Popen(cmdline, stdout=stdout, stderr=stderr)
return p
except OSError as e:
raise ConfigError("The command failed for the following reason: '%s' ('%s')" % (e, cmdline))
def run_command_STDIN(cmdline, log_file_path, input_data, first_line_of_log_is_cmdline=True, remove_log_file_if_exists=True):
"""Uses subprocess.Popen and sends data to your `cmdline` through STDIN"""
cmdline = format_cmdline(cmdline)
filesnpaths.is_output_file_writable(log_file_path)
if remove_log_file_if_exists and os.path.exists(log_file_path):
os.remove(log_file_path)
try:
if first_line_of_log_is_cmdline:
with open(log_file_path, "a") as log_file: log_file.write('# DATE: %s\n# CMD LINE: %s\n' % (get_date(), ' '.join(cmdline)))
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
ret_val = p.communicate(input=input_data.encode('utf-8'))[0]
return ret_val.decode()
except OSError as e:
raise ConfigError("command was failed for the following reason: '%s' ('%s')" % (e, cmdline))
def get_command_output_from_shell(cmd_line):
ret_code = 0
try:
out_bytes = subprocess.check_output(cmd_line.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
out_bytes = e.output.decode("utf-8")
ret_code = e.returncode
return out_bytes, ret_code
def store_array_as_TAB_delimited_file(a, output_path, header, exclude_columns=[]):
filesnpaths.is_output_file_writable(output_path)
num_fields = len(a[0])
if len(header) != num_fields:
raise ConfigError("store array: header length (%d) differs from data (%d)..." % (len(header), num_fields))
for col in exclude_columns:
if not col in header:
raise ConfigError("store array: column %s is not in the header array...")
exclude_indices = set([header.index(c) for c in exclude_columns])
header = [header[i] for i in range(0, len(header)) if i not in exclude_indices]
f = open(output_path, 'w')
f.write('%s\n' % '\t'.join(header))
for row in a:
f.write('\t'.join([str(row[i]) for i in range(0, num_fields) if i not in exclude_indices]) + '\n')
f.close()
return output_path
def multi_index_pivot(df, index = None, columns = None, values = None):
# https://github.com/pandas-dev/pandas/issues/23955
output_df = df.copy(deep = True)
if index is None:
names = list(output_df.index.names)
output_df = output_df.reset_index()
else:
names = index
output_df = output_df.assign(tuples_index = [tuple(i) for i in output_df[names].values])
if isinstance(columns, list):
output_df = output_df.assign(tuples_columns = [tuple(i) for i in output_df[columns].values]) # hashable
output_df = output_df.pivot(index = 'tuples_index', columns = 'tuples_columns', values = values)
output_df.columns = pd.MultiIndex.from_tuples(output_df.columns, names = columns) # reduced
else:
output_df = output_df.pivot(index = 'tuples_index', columns = columns, values = values)
output_df.index = pd.MultiIndex.from_tuples(output_df.index, names = names)
return output_df
def store_dataframe_as_TAB_delimited_file(d, output_path, columns=None, include_index=False, index_label="index", naughty_characters=[-np.inf, np.inf], rep_str=""):
""" Stores a pandas DataFrame as a tab-delimited file.
Parameters
==========
d: pandas DataFrame
DataFrame you want to save.
output_path: string
Output_path for the file. Checks if file is writable.
columns: list, pandas.Index, tuple (default = d.columns)
Columns in DataFrame to write. Default is all, in the order they appear.
include_index: Boolean (default = False)
Should the index be included as the first column? Default is no.
index_label: String (default = "index")
If include_index is True, this is the header for the index.
naughty_characters: list (default = [np.inf, -np.inf])
A list of elements that are replaced with rep_str. Note that all np.nan's (aka NaN's) are also replaced with
rep_str.
rep_str: String (default = "")
The string that elements belonging to naughty_characters are replaced by.
Returns
=======
output_path
"""
filesnpaths.is_output_file_writable(output_path)
if not columns:
columns = d.columns
d.replace(naughty_characters, np.nan, inplace=True)
d.to_csv(output_path, sep="\t", columns=columns, index=include_index, index_label=index_label, na_rep=rep_str)
return output_path
def store_dict_as_TAB_delimited_file(d, output_path, headers=None, file_obj=None, key_header=None, keys_order=None, header_item_conversion_dict=None, do_not_close_file_obj=False):
"""Store a dictionary of dictionaries as a TAB-delimited file.
Parameters
==========
d: dictionary
A dictionary of dictionaries where each first order key represents a row,
and each key in the subdictionary represents a column.
output_path: string
Output path for the TAB delmited file.path
headers: list
Headers of the subdictionary to include (by default include all)
these are the columns that will be included in the output file (this
doesn't include the first column which is the keys of the major dictionary)
file_obj: file_object
A file object ot write (instead of the output file path)
key_header: string
The header for the first column ('key' if None)
header_item_conversion_dict: dictionary
To replace the column names at the time of writing.
do_not_close_file_obj: boolean
If True, file object will not be closed after writing the dictionary to the file
Returns
=======
output_path
"""
if not file_obj:
filesnpaths.is_output_file_writable(output_path)
if not file_obj:
f = open(output_path, 'w')
else:
f = file_obj
key_header = key_header if key_header else 'key'
if not headers:
headers = [key_header] + sorted(list(d.values())[0].keys())
# write header after converting column names (if necessary)
if header_item_conversion_dict:
missing_headers = [h for h in headers[1:] if h not in header_item_conversion_dict]
if len(missing_headers):
raise ConfigError("Your header item conversion dict is missing keys for one or "
"more headers :/ Here is a list of those that do not have any "
"entry in the dictionary you sent: '%s'." % (', '.join(missing_headers)))
header_text = '\t'.join([headers[0]] + [header_item_conversion_dict[h] for h in headers[1:]])
else:
header_text = '\t'.join(headers)
if anvio.AS_MARKDOWN:
tab = '\t'
f.write(f"|{header_text.replace(tab, '|')}|\n")
f.write(f"|{':--|' + '|'.join([':--:'] * (len(headers[1:])))}|\n")
else:
f.write(f"{header_text}\n")
if not keys_order:
keys_order = sorted(d.keys())
else:
missing_keys = [k for k in keys_order if k not in d]
if len(missing_keys):
if anvio.DEBUG:
if len(missing_keys) > 10:
raise ConfigError("Some keys (n=%d) are not in your dictionary :/ Here is the first ten "
" of them: %s" % (len(missing_keys), missing_keys[:10].__str__()))
else:
raise ConfigError("Some keys are not in your dictionary :/ Here they are: %s" % missing_keys.__str__())
else:
raise ConfigError("Some keys are not in your dictionary :/ Use `--debug` to see where this "
"error is coming from the codebase with a list of example keys that are "
"missing.")
for k in keys_order:
line = [str(k)]
for header in headers[1:]:
try:
val = d[k][header]
except KeyError:
raise ConfigError("Header ('%s') is not found in the dict :/" % (header))
except TypeError:
raise ConfigError("Your dictionary is not properly formatted to be exported "
"as a TAB-delimited file :/ You ask for '%s', but it is not "
"even a key in the dictionary" % (header))
line.append(str(val) if not isinstance(val, type(None)) else '')
if anvio.AS_MARKDOWN:
f.write(f"|{'|'.join(map(str, line))}|\n")
else:
f.write('%s\n' % '\t'.join(line))
if not do_not_close_file_obj:
f.close()
return output_path
def convert_numpy_array_to_binary_blob(array, compress=True):
if compress:
return gzip.compress(memoryview(array), compresslevel=1)
else:
return memoryview(array)
def convert_binary_blob_to_numpy_array(blob, dtype, decompress=True):
if decompress:
return np.frombuffer(gzip.decompress(blob), dtype=dtype)
else:
return np.frombuffer(blob, dtype=dtype)
@jit(nopython=True)
def add_to_2D_numeric_array(x, y, a, count=1):
"""just-in-time compiled function
Parameters
==========
x : array
array of row indices
y : array
array of corresponding y indices
count : int, 1
How much to add to each coordinate
Examples
========
Make a 5x20000 array (a) and define 95 coordinate positions to update (i and p)
>>> a = np.zeros((5, 20000))
>>> i = np.random.choice(range(5), size=95, replace=True)
>>> p = np.random.choice(range(100), size=95, replace=False) + 1000
For comparison, define the slow method
>>> def add_to_2D_numeric_array_slow(x, y, a, count=1):
>>> for idx, pos in zip(x, y):
>>> a[idx, pos] += count
>>> return a
Compare the speeds
>>> %timeit add_to_2D_numeric_array_slow(i, p, a)
74.5 µs ± 4.42 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
>>> %timeit _add_to_2D_numeric_array(i, p, a)
798 ns ± 12.7 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
"""
for idx, pos in zip(x, y):
a[idx, pos] += count
return a
def is_all_columns_present_in_TAB_delim_file(columns, file_path):
columns = get_columns_of_TAB_delim_file(file_path)
return False if len([False for c in columns if c not in columns]) else True
def HTMLColorToRGB(colorstring, scaled=True):
""" convert #RRGGBB to an (R, G, B) tuple """
colorstring = colorstring.strip()
if colorstring[0] == '#': colorstring = colorstring[1:]
if len(colorstring) != 6:
raise ValueError("input #%s is not in #RRGGBB format" % colorstring)
r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:]
r, g, b = [int(n, 16) for n in (r, g, b)]
if scaled:
return (r / 255.0, g / 255.0, b / 255.0)
else:
return (r, g, b)
def transpose_tab_delimited_file(input_file_path, output_file_path, remove_after=False):
filesnpaths.is_file_tab_delimited(input_file_path)
filesnpaths.is_output_file_writable(output_file_path)
file_content = [line.strip('\n').split('\t') for line in open(input_file_path, 'rU').readlines()]
output_file = open(output_file_path, 'w')
for entry in zip(*file_content):
output_file.write('\t'.join(entry) + '\n')
output_file.close()
if remove_after:
os.remove(input_file_path)
return output_file_path
def split_fasta(input_file_path, parts=1, file_name_prefix=None, shuffle=False, output_dir=None):
"""Splits a given FASTA file into multiple parts.
Please note that this function will not clean after itself. You need to take care of the
output files in context.
Parameters
==========
input_file_path : str
FASTA-formatted flat text file to be split
parts : int
Number of parts the input file to be split into
file_name_prefix : str
Preferably a single-word prefix for the output files
shuffle : bool
Whether input sequences should be randomly shuffled (so the input sequences
randomly distribute across output files)
output_dir : str, path
Output directory. By default, anvi'o will store things in a new directory under
the system location for temporary files
Returns
=======
output_file_paths : list
Array with `parts` number of elements where each item is an output file path
"""
if not file_name_prefix:
file_name_prefix = os.path.basename(input_file_path)
else:
if '/' in file_name_prefix:
raise ConfigError("File name prefix for split fasta can't contain slash characters. It is not "
"supposed to be a path after all :/")
# check input
filesnpaths.is_file_fasta_formatted(input_file_path)
# check output
if not output_dir:
output_dir = filesnpaths.get_temp_directory_path()
else:
filesnpaths.gen_output_directory(output_dir)
filesnpaths.is_output_dir_writable(output_dir)
source = u.ReadFasta(input_file_path, quiet=True)
length = len(source.ids)
if length < parts:
parts = length
chunk_size = length // parts
output_file_paths = []
GET_OUTPUT_FILE_PATH = lambda p: os.path.join(output_dir, ".".join([file_name_prefix, str(p)]))
if shuffle:
output_file_paths = [f'{GET_OUTPUT_FILE_PATH(part_no)}' for part_no in range(parts)]
output_fastas = [u.FastaOutput(file_name) for file_name in output_file_paths]
# The first sequence goes to the first outfile, the second seq to the second outfile, and so on.
for seq_idx, (seq_id, seq) in enumerate(zip(source.ids, source.sequences)):
which = seq_idx % parts
output_fastas[which].write_id(seq_id)
output_fastas[which].write_seq(seq)
for output_fasta in output_fastas:
output_fasta.close()
else:
for part_no in range(parts):
output_file = GET_OUTPUT_FILE_PATH(part_no)
output_fasta = u.FastaOutput(output_file)
chunk_start = chunk_size * part_no
chunk_end = chunk_start + chunk_size
if (part_no + 1 == parts):
# if this is the last chunk make sure it contains everything till end.
chunk_end = length
for i in range(chunk_start, chunk_end):
output_fasta.write_id(source.ids[i])
output_fasta.write_seq(source.sequences[i])
output_fasta.close()
output_file_paths.append(output_file)
source.close()
return output_file_paths
def get_random_colors_dict(keys):
# FIXME: someone's gotta implement this
# keys : set(1, 2, 3, ..)
# returns: {1: '#ffffff', 2: '#888888', 3: '#222222', ...}
return dict([(k, None) for k in keys])
def summarize_alignment(sequence):
"""Takes an alignment, and returns its summary.
>>> alignment = '----AA---TTTT-----CC-GGGGGGGG----------------ATCG--'
>>> sequence = alignment.replace('-')
>>> summarize_alignment(alilgnment)
'-|4|2|3|4|5|2|1|8|16|4|2'
>>> summary = summarize_alignment(alignment)
>>> restore_alignment(sequence, summary)
'----AA---TTTT-----CC-GGGGGGGG----------------ATCG--'
"""
alignment_summary = []
starts_with_gap = sequence[0] == '-'
in_gap, in_nt = (True, False) if starts_with_gap else (False, True)
gap, nt = 0, 0
for i in range(0, len(sequence)):
if sequence[i] == '-':
if in_nt:
alignment_summary.append(nt) if nt else None
in_gap, in_nt = True, False
nt = 0
gap = 1
else:
gap += 1
else:
if in_gap:
alignment_summary.append(gap) if gap else None
in_gap, in_nt = False, True
gap = 0
nt = 1
else:
nt += 1
alignment_summary.append(gap or nt)
return '|'.join(['-' if starts_with_gap else '.'] + [str(s) for s in alignment_summary])
def restore_alignment(sequence, alignment_summary, from_aa_alignment_summary_to_dna=False):
"""Restores an alignment from its sequence and alignment summary.
See `summarize_alignment` for the `alignment_summary` compression.
"""
if not alignment_summary:
return sequence
if isinstance(sequence, bytes):
sequence = list(sequence.decode('utf-8'))
elif isinstance(sequence, str):
sequence = list(sequence)
else:
raise ConfigError("Sequence must be of type str or bytes. What you sent is of %s :/" % type(sequence))
in_gap = alignment_summary[0] == '-'
alignment = ''
for part in [(int(p) * 3) if from_aa_alignment_summary_to_dna else int(p) for p in alignment_summary.split('|')[1:]]:
if in_gap:
alignment += '-' * part
in_gap = False
else:
for i in range(0, part):
alignment += sequence.pop(0)
in_gap = True
if from_aa_alignment_summary_to_dna:
return alignment + ''.join(sequence)
else:
return alignment
def get_column_data_from_TAB_delim_file(input_file_path, column_indices=[], expected_number_of_fields=None, separator='\t'):
"""Returns a dictionary where keys are the column indices, and items are the list of entries
found in that that column"""
filesnpaths.is_file_exists(input_file_path)
filesnpaths.is_file_tab_delimited(input_file_path, expected_number_of_fields=expected_number_of_fields)
d = {}
for index in column_indices:
d[index] = []
with open(input_file_path, "rU") as input_file:
for line in input_file.readlines():
fields = line.strip('\n').split(separator)
for index in column_indices:
try:
d[index].append(fields[index])
except:
raise ConfigError("get_column_data_from_TAB_delim_file is speaking: The file you sent "
"does not have data for the column index %d. Something is wrong :/" % (index))
return d
def get_columns_of_TAB_delim_file(file_path, include_first_column=False):
filesnpaths.is_file_exists(file_path)
if include_first_column:
return open(file_path, 'rU').readline().strip('\n').split('\t')
else:
return open(file_path, 'rU').readline().strip('\n').split('\t')[1:]
def get_names_order_from_newick_tree(newick_tree, newick_format=1, reverse=False, names_with_only_digits_ok=False):
filesnpaths.is_proper_newick(newick_tree, names_with_only_digits_ok=names_with_only_digits_ok)
tree = Tree(newick_tree, format=newick_format)
names = [n.name for n in tree.get_leaves()]
return list(reversed(names)) if reverse else names
def get_vectors_from_TAB_delim_matrix(file_path, cols_to_return=None, rows_to_return=[], transpose=False):
filesnpaths.is_file_exists(file_path)
filesnpaths.is_file_tab_delimited(file_path)
if transpose:
transposed_file_path = filesnpaths.get_temp_file_path()
transpose_tab_delimited_file(file_path, transposed_file_path)
file_path = transposed_file_path
rows_to_return = set(rows_to_return)
vectors = []
id_to_sample_dict = {}
sample_to_id_dict = {}
input_matrix = open(file_path, 'rU')
columns = input_matrix.readline().strip('\n').split('\t')[1:]
fields_of_interest = []
if cols_to_return:
fields_of_interest = [columns.index(col) for col in cols_to_return]
else:
fields_of_interest = [f for f in range(0, len(columns)) if constants.IS_ESSENTIAL_FIELD(columns[f])]
# update columns:
columns = [columns[i] for i in fields_of_interest]
if not len(columns):
raise ConfigError("Only a subset (%d) of fields were requested by the caller, but none of them was found "
"in the matrix (%s) :/" % (len(cols_to_return), file_path))
id_counter = 0
for line in input_matrix.readlines():
row_name = line.strip().split('\t')[0]
if rows_to_return and row_name not in rows_to_return:
continue
id_to_sample_dict[id_counter] = row_name
fields = line.strip().split('\t')[1:]
try:
if fields_of_interest:
vector = [float(fields[i]) for i in fields_of_interest]
else:
vector = [float(f) for f in fields]
except ValueError:
raise ConfigError("Matrix should contain only numerical values.")
vectors.append(vector)
id_counter += 1
input_matrix.close()
if transpose:
# remove clutter
os.remove(file_path)
sample_to_id_dict = dict([(v, k) for k, v in id_to_sample_dict.items()])
return id_to_sample_dict, sample_to_id_dict, columns, vectors
def apply_and_concat(df, fields, func, column_names, func_args=tuple([])):
""" This function has been taken from https://tinyurl.com/y9ylqy4l
and has been modified for speed considerations using this blog post:
https://tinyurl.com/ya4e5tz3. Its utility is to append multiple columns to an existing
dataframe row by row. This is usually a bad idea because usually operations can be
vectorized. However when they cannot, looping through each row becomes a necessary evil.
df: pandas DataFrame object
An existing dataframe to loop through append columns to.
fields: list
A list of columns in the existing dataframe used to calculate the new columns
func: function
A function that takes as its first argument a row of `df` (i.e. a pd.Series
object) and potential additional positional arguments `func_args`. It should return a
tuple of values with with the same length as `column_names`.
func_args: tuple
A tuple of arguments passed to `func` besides the assumed first argument (a pd.Series
object). For example, is `def func(row, a)`, then `func_args = (a,)`. If func_args is an
empty tuple, `func` should take no other args.
column_names: list
A list of column headers for the newly appended columns
"""
d = {column_name: [] for column_name in column_names}
for _, row in df[fields].iterrows():
out_values = func(row, *func_args)
for ind, column_name in enumerate(column_names):
d[column_name].append(out_values[ind])
df2 = pd.DataFrame(d, index=df.index)
return pd.concat((df, df2), axis=1, sort=True)
def run_functional_enrichment_stats(functional_occurrence_stats_input_file_path, enrichment_output_file_path=None, run=run, progress=progress):
"""This function runs the enrichment analysis implemented by Amy Willis.
Since the enrichment analysis is an R script, we interface with that program by
producing a compatible input file first, and then calling this function from various
places in the anvi'o code.
Parameters
==========
functional_occurrence_stats_input_file_path, str file path
This is the primary input file for the R script, `anvi-script-enrichment-stats`.
For the most up-do-date file header, please see the header section of the R
script.
enrichment_output_file_path, str file path
An optional output file path for the enrichment analysis.
Returns
=======
enrichment_output: dict
The enrichment analysis results
"""
run.warning("This program will compute enrichment scores using an R script developed by Amy Willis. "
"You can find more information about it in the following paper: Shaiber, Willis et al "
"(https://doi.org/10.1186/s13059-020-02195-w). When you publish your findings, please "
"do not forget to properly credit this work. :)", lc='green', header="CITATION")
# sanity check for R packages
package_dict = get_required_packages_for_enrichment_test()
check_R_packages_are_installed(package_dict)
# make sure the input file path is a TAB delmited file that exists.
filesnpaths.is_file_tab_delimited(functional_occurrence_stats_input_file_path)
if not enrichment_output_file_path:
enrichment_output_file_path = filesnpaths.get_temp_file_path()
elif filesnpaths.is_file_exists(enrichment_output_file_path, dont_raise=True):
raise ConfigError(f"The file {enrichment_output_file_path} already exists and anvi'o doesn't like to overwrite it :/ "
f"Please either delete the existing file, or provide another file path before re-running this "
f"program again.")
log_file_path = filesnpaths.get_temp_file_path()
run.warning(None, header="AMY's ENRICHMENT ANALYSIS 🚀", lc="green")
run.info("Functional occurrence stats input file path: ", functional_occurrence_stats_input_file_path)
run.info("Functional enrichment output file path: ", enrichment_output_file_path)
run.info("Temporary log file (use `--debug` to keep): ", log_file_path, nl_after=2)
# run enrichment script
progress.new('Functional enrichment analysis')
progress.update("Running Amy's enrichment")
run_command(['anvi-script-enrichment-stats',
'--input', f'{functional_occurrence_stats_input_file_path}',
'--output', f'{enrichment_output_file_path}'], log_file_path)
progress.end()
if not filesnpaths.is_file_exists(enrichment_output_file_path, dont_raise=True):
raise ConfigError(f"Something went wrong during the functional enrichment analysis :( We don't "
f"know what happened, but this log file could contain some clues: {log_file_path}")
if filesnpaths.is_file_empty(enrichment_output_file_path):
raise ConfigError(f"Something went wrong during the functional enrichment analysis :( "
f"An output file was created, but it was empty... We hope that this "
f"log file offers some clues: {log_file_path}")
# if everything went okay, we remove the log file
if anvio.DEBUG:
run.warning(f"Due to the `--debug` flag, anvi'o keeps the log file at '{log_file_path}'.", lc='green', header="JUST FYI")
else:
os.remove(log_file_path)
enrichment_stats = get_TAB_delimited_file_as_dictionary(enrichment_output_file_path)
# here we will naively try to cast every column that matches `p_*` to float, and every
# column that matches `N_*` to int.
column_names = list(enrichment_stats.values())[0].keys()
column_names_to_cast = [(c, float) for c in ['unadjusted_p_value', 'adjusted_q_value', 'enrichment_score']] + \
[(c, float) for c in column_names if c.startswith('p_')] + \
[(c, int) for c in column_names if c.startswith('N_')]
for entry in enrichment_stats:
for column_name, to_cast in column_names_to_cast:
try:
enrichment_stats[entry][column_name] = to_cast(enrichment_stats[entry][column_name])
except:
raise ConfigError(f"Something sad happened :( Anvi'o expects the functional enrichment output to contain "
f"values for the column name `{column_name}` that can be represented as `{to_cast}`. Yet, the "
f"entry `{entry}` in your output file contained a value of `{enrichment_stats[entry][column_name]}`. "
f"We have no idea how this happened, but it is not good :/ If you would like to mention this "
f"to someone, please attach to your inquiry the following file: '{enrichment_output_file_path}'.")
return enrichment_stats
def get_required_packages_for_enrichment_test():
''' Return a dict with the packages as keys and installation instrucstions as values'''
packages = ["tidyverse", "stringi", "magrittr", "qvalue", "optparse"]
installation_instructions = ["conda install -c r r-tidyverse",
"conda install -c r r-stringi",
"conda install -c bioconda r-magrittr",
"conda install -c bioconda bioconductor-qvalue",
"conda install -c conda-forge r-optparse"]
return dict(zip(packages,installation_instructions))
def check_R_packages_are_installed(required_package_dict):
"""Checks if R and the provided R packages are installed on the user's system.
If not, raises an error with installation instructions for any missing packages.
Credits to Ryan Moore (https://github.com/mooreryan) for this solution!
(https://github.com/merenlab/anvio/commit/91f9cf1531febdbf96feb74c3a68747b91e868de#r35353982)
Parameters
==========
required_package_dict, dictionary
keys should be R package names, values should be the corresponding installation instruction for the package
See get_required_packages_for_enrichment_test() for an example
"""
is_program_exists('Rscript')
missing_packages = []
log_file = filesnpaths.get_temp_file_path()
for lib in required_package_dict:
ret_val = run_command(["Rscript", "-e", "library('%s')" % lib], log_file)
if ret_val != 0:
missing_packages.append(lib)
if missing_packages:
raise ConfigError("The following R packages are required in order to run this, but seem to be missing or broken: '%(missing)s'. "
"If you have installed anvi'o through conda, BEFORE ANYTHING ELSE we would suggest you to run the command "
"Rscript -e \"update.packages(repos='https://cran.rstudio.com')\" in your terminal. This will try to update "
"all R libraries on your conda environment and will likely solve this problem. If it doesn't work, then you "
"will need to try a bit harder, so here are some pointers: if you are using conda, in an ideal world you"
"should be able to install these packages by running the following commands: %(conda)s. But if this option "
"doesn't seem to be working for you, then you can also try to install the problem libraries directly through R, "
"for instance by typing in your terminal, Rscript -e 'install.packages(\"%(example)s\", "
"repos=\"https://cran.rstudio.com\")' and see if it will address the installation issue. UNFORTUNATELY, in "
"some cases you may continue to see this error despite the fact that you have these packages installed :/ It "
"would most likely mean that some other issues interfere with their proper usage during run-time. If you have "
"these packages installed but you continue seeing this error, please run in your terminal Rscript -e "
"\"library(%(example)s)\" to see what is wrong with %(example)s on your system. Running this on your "
"terminal will test whether the package is properly loading or not and the resulting error messages will likely "
"be much more helpful solving the issue. Apologies for the frustration. R frustrates everyone." % \
{'missing': ', '.join(missing_packages),
'conda': ', '.join(['"%s"' % required_package_dict[i] for i in missing_packages]),
'example': missing_packages[0]})
else:
os.remove(log_file)
def get_values_of_gene_level_coverage_stats_as_dict(gene_level_coverage_stats_dict, key, genes_of_interest=None, samples_of_interest=None, as_pandas=False):
"""
This function takes the gene_level_coverage_stats_dict and return one of the values
as a matrix-like dict of dicts.
THIS FUNCTION IS IN utils AND NOT IN summarizer, or dbops, because it used to be in summarizer
and why should it be in summarizer?!? that makes no sense. And also mcg-classifier doesn't want
to initialize summarizer, it wants to be able to just get the gene_level_coverage_stats_dict as
input and then deal with it.
There is also an option to as to get the data back as a pandas dataframe.
"""
legal_keys = {'mean_coverage', 'detection', 'non_outlier_mean_coverage', 'non_outlier_coverage_std'}
if key not in legal_keys and as_pandas:
raise ConfigError("%s is not a valid key for creating a pandas dataframe of values of gene_level_coverage_stats_dict. "
"Here is a list of the valid keys: %s" % (key, list(legal_keys)))
gene_callers_ids = set(gene_level_coverage_stats_dict.keys())
samples = set(next(iter(gene_level_coverage_stats_dict.values())).keys())
if genes_of_interest is not None:
missing_genes = [g for g in genes_of_interest if g not in gene_callers_ids]
if len(missing_genes):
raise ConfigError("The following genes are not in the gene_level_coverage_stats_dict, and yet you are asking for them: %s" % missing_genes)
else:
genes_of_interest = gene_callers_ids
if samples_of_interest is not None:
missing_samples = [s for s in samples_of_interest if s not in samples]
if len(missing_samples):
raise ConfigError("The following samples are not in the gene_level_coverage_stats_dict, and yet you are asking for them: %s" % missing_samples)
else:
samples_of_interest = samples
d = {}
for gene_callers_id in genes_of_interest:
d[gene_callers_id] = {}
for sample_name in samples_of_interest:
d[gene_callers_id][sample_name] = gene_level_coverage_stats_dict[gene_callers_id][sample_name][key]
if as_pandas:
# This option is used by the mcg-classifier.
import pandas as pd
return pd.DataFrame.from_dict(d, orient='index')
else:
return d
def get_indices_for_outlier_values(c):
is_outlier = get_list_of_outliers(c)
return set([p for p in range(0, c.size) if is_outlier[p]])
def get_list_of_outliers(values, threshold=None, zeros_are_outliers=False, median=None):
"""Return boolean array of whether values are outliers (True means outlier)
Modified from Joe Kington's (https://stackoverflow.com/users/325565/joe-kington)
implementation computing absolute deviation around the median.
Parameters
==========
values : array-like
A num_observations by num_dimensions array of observations.
threshold : number, None
The modified z-score to use as a thresholdold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
median : array-like, None
Pass median of values if you already calculated it to save time.
Returns
=======
mask : numpy array (dtype=bool)
A num_observations-length boolean array. True means outlier
Examples
========
Create an array with 5 manually created outliers:
>>> import numpy as np
>>> import anvio.utils as utils
>>> array = 10*np.ones(30) + np.random.rand(30)
>>> array[5] = -10
>>> array[9] = -10
>>> array[12] = -10
>>> array[15] = -10
>>> array[23] = -10
>>> mask = utils.get_list_of_outliers(array, threshold=5)
>>> print(mask)
[False False False False False True False False False True False False
True False False True False False False False False False False True
False False False False False False]
As can be seen, mask returns a numpy array of True/False values, where True corresponds to
outlier values.
>>> print(outlier_indices)
>>> outlier_indices = np.where(mask == True)[0]
[ 5 9 12 15 23]
The `True` values indeed occur at the indices where the values hand been manually changed to
represent outliers.
References
==========
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
http://www.sciencedirect.com/science/article/pii/S0022103113000668
"""
if threshold is None:
threshold = 1.5
if len(values.shape) == 1:
values = values[:, None]
if not median: median = np.median(values, axis=0)
diff = np.sum((values - median) ** 2, axis=-1)
diff = np.sqrt(diff)
median_absolute_deviation = np.median(diff)
if not median_absolute_deviation:
if values[0] == 0:
# A vector of all zeros is considered "all outliers"
return np.array([True] * values.size)
else:
# A vector of uniform non-zero values is "all non-outliers"
# This could be important for silly cases (like in megahit) in which there is a maximum value for coverage
return np.array([False] * values.size)
modified_z_score = 0.6745 * diff / median_absolute_deviation
non_outliers = modified_z_score > threshold
if not zeros_are_outliers:
return non_outliers
else:
zero_positions = [x for x in range(len(values)) if values[x] == 0]
for i in zero_positions:
non_outliers[i] = True
return non_outliers
def get_gene_caller_ids_from_args(gene_caller_ids, delimiter=','):
gene_caller_ids_set = set([])
if gene_caller_ids:
if os.path.exists(gene_caller_ids):
gene_caller_ids_set = set([g.strip() for g in open(gene_caller_ids, 'rU').readlines()])
else:
gene_caller_ids_set = set([g.strip() for g in gene_caller_ids.split(delimiter)])
try:
gene_caller_ids_set = set([int(g) for g in gene_caller_ids_set])
except:
g = gene_caller_ids_set.pop()
raise ConfigError("The gene calls you provided do not look like gene callers anvi'o is used to working with :/ Here is "
"one of them: '%s' (%s)." % (g, type(g)))
return gene_caller_ids_set
def remove_sequences_with_only_gaps_from_fasta(input_file_path, output_file_path, inplace=True):
filesnpaths.is_file_fasta_formatted(input_file_path)
filesnpaths.is_output_file_writable(output_file_path)
total_num_sequences = 0
num_sequences_removed = 0
input_fasta = u.SequenceSource(input_file_path)
clean_fasta = u.FastaOutput(output_file_path)
while next(input_fasta):
total_num_sequences += 1
if input_fasta.seq.count('-') == len(input_fasta.seq):
num_sequences_removed += 1
else:
clean_fasta.store(input_fasta, split=False)
if inplace:
if num_sequences_removed:
shutil.move(output_file_path, input_file_path)
else:
os.remove(output_file_path)
return total_num_sequences, num_sequences_removed
def get_num_sequences_in_fasta(input_file):
fasta = u.SequenceSource(input_file)
num_sequences = 0
while next(fasta):
num_sequences += 1
return num_sequences
def get_all_ids_from_fasta(input_file):
fasta = u.SequenceSource(input_file)
ids = []
while next(fasta):
ids.append(fasta.id)
return ids
def check_fasta_id_formatting(fasta_path):
fasta = u.SequenceSource(fasta_path)
while next(fasta):
characters_anvio_doesnt_like = [
c for c in set(fasta.id) if c not in constants.allowed_chars]
if len(characters_anvio_doesnt_like):
raise ConfigError(
"At least one of the deflines in your FASTA file "
"does not comply with the 'simple deflines' requirement of Anvi'o. "
"You can either use the script, `anvi-script-reformat-fasta`, "
"to take care of this issue, or read this section in the tutorial "
"to understand the reason behind this requirement "
"(Anvi'o is very upset for making you do this): %s"
% "http://merenlab.org/2016/06/22/anvio-tutorial-v2/#take-a-look-at-your-fasta-file")
try:
int(fasta.id)
is_int = True
except:
is_int = False
if is_int:
raise ConfigError(
"At least one of the deflines in your FASTA file "
"(well, this one to be precise: '%s') looks like a number. "
"For reasons we can't really justify, "
"Anvi'o does not like those numeric names, "
"and hereby asks you to make sure every tRNA-seq name "
"contains at least one alphanumeric character :/ "
"Meanwhile we, the Anvi'o developers, are both surprised by and thankful for "
"your endless patience with such eccentric requests. "
"You the real MVP." % fasta.id)
fasta.close()
def check_fasta_id_uniqueness(fasta_path):
all_ids_in_FASTA = get_all_ids_from_fasta(fasta_path)
total_num_seqs = len(all_ids_in_FASTA)
if total_num_seqs != len(set(all_ids_in_FASTA)):
raise ConfigError(
"Every sequence in the input FASTA file must have a unique ID. You know...")
def get_ordinal_from_integer(num):
"""append 'st', 'nd', or 'th' to integer to make categorical. num must be integer"""
return'%d%s' % (num, {11:'th', 12:'th', 13:'th'}.get(num%100, {1:'st', 2:'nd', 3:'rd'}.get(num%10,'th')))
def get_read_lengths_from_fasta(input_file):
contig_lengths = {}
fasta = u.SequenceSource(input_file)
while next(fasta):
contig_lengths[fasta.id] = len(fasta.seq)
fasta.close()
return contig_lengths
def get_GC_content_for_FASTA_entries(file_path):
filesnpaths.is_file_exists(file_path)
filesnpaths.is_file_fasta_formatted(file_path)
GC_content_dict = {}
fasta = u.SequenceSource(file_path)
while next(fasta):
GC_content_dict[fasta.id] = get_GC_content_for_sequence(fasta.seq)
return GC_content_dict
def get_GC_content_for_sequence(sequence):
return Composition(sequence).GC_content
def get_synonymous_and_non_synonymous_potential(list_of_codons_in_gene, just_do_it=False):
"""
When calculating pN/pS or dN/dS, the number of variants classified as synonymous or non
synonymous need to be normalized by the sequence's potential for synonymous and
non-synonymous changes. That is calculated by mutating each position to the other 3
nucleotides and calculating whether the mutation is synonymous or non synonymous. Each
mutation gets a score of 1/3, since there are 3 possible mutations for each site. If the
sequence is of length L, the nonsynonymous and synonymous potentials sum to L.
list_of_codons_in_gene is a list of the codons as they appear in the gene sequence, e.g.
['ATG', ..., 'TAG'], which can be generated from utils.get_list_of_codons_for_gene_call
"""
if not any([list_of_codons_in_gene[-1] == x for x in ['TAG', 'TAA', 'TGA']]) and not just_do_it:
raise ConfigError("The sequence `get_synonymous_and_non_synonymous_potential` received does "
"end with a stop codon and may be irrelevant for this analysis. If you "
"want to continue anyways, include the flag `--just-do-it` in your call "
"(if you are a programmer see the function header).")
synonymous_potential = 0
num_ambiguous_codons = 0 # these are codons with Ns or other characters than ATCG
for codon in list_of_codons_in_gene:
# first test if it is proper codon
if not codon:
num_ambiguous_codons += 1
continue
# if we are here, this is a proper codon
for i, nt in enumerate(codon):
for mutant_nt in [m for m in 'ACGT' if m != nt]:
mutant_codon = list(codon)
mutant_codon[i] = mutant_nt
mutant_codon = ''.join(mutant_codon)
if constants.codon_to_AA[mutant_codon] == constants.codon_to_AA[codon]:
synonymous_potential += 1/3
non_synonymous_potential = 3 * (len(list_of_codons_in_gene) - num_ambiguous_codons) - synonymous_potential
return synonymous_potential, non_synonymous_potential, num_ambiguous_codons
def get_N50(contig_lengths):
h, S = sum(contig_lengths) / 2.0, 0
for l in sorted(contig_lengths, reverse=True):
S += l
if h < S:
return l
def get_cmd_line():
c_argv = []
for i in sys.argv:
if ' ' in i:
c_argv.append('"%s"' % i)
else:
c_argv.append(i)
return ' '.join(c_argv)
def get_time_to_date(local_time, fmt='%Y-%m-%d %H:%M:%S'):
try:
local_time = float(local_time)
except ValueError:
raise ConfigError("utils::get_time_to_date is called with bad local_time.")
return time.strftime(fmt, time.localtime(local_time))
def compare_times(calls, as_matrix=False, iterations_per_call=1):
"""Compare times between function calls
Parameters
==========
calls : list of tuples
Each element should be a (name, function, args, kwargs) tuples. If there are no args or
kwargs, the element should look like (name, function, [], {})
as_matrix : bool, False
If True, results are output as a pandas matrix, where each element is a time difference between
calls. Otherwise, a dictionary is returned
iterations_per_call : int, 1
How many times should each function call be ran? Time will be averaged
Returns
=======
times : pd.DataFrame or dict
If as_matrix, pd.DataFrame is returned, where times[i, j] is how much faster i is than j.
Otherwise, dictionary of {name: time} is returned
"""
call_times = np.zeros((len(calls), iterations_per_call))
names, *_ = zip(*calls)
for i, call in enumerate(calls):
name, function, args, kwargs = call
for j in range(iterations_per_call):
try:
with TimeCode(quiet=True) as t:
function(*args, **kwargs)
except:
raise ConfigError("compare_times :: function call with name '%s' failed." % name)
call_times[i, j] = t.time.total_seconds()
averaged_call_times = np.mean(call_times, axis=1)
if not as_matrix:
return dict(zip(names, averaged_call_times))
matrix = []
for i, _time in enumerate(call_times):
row = []
for j, _time in enumerate(call_times):
row.append(averaged_call_times[j] - averaged_call_times[i] if i > j else 'NA')
matrix.append(row)
return pd.DataFrame(matrix, columns=names, index=names)
def concatenate_files(dest_file, file_list, remove_concatenated_files=False):
if not dest_file:
raise ConfigError("Destination cannot be empty.")
filesnpaths.is_output_file_writable(dest_file)
if not len(file_list):
raise ConfigError("File list cannot be empty.")
for f in file_list:
filesnpaths.is_file_exists(f)
dest_file_obj = open(dest_file, 'w')
for chunk_path in file_list:
for line in open(chunk_path, 'rU'):
dest_file_obj.write(line)
dest_file_obj.close()
if remove_concatenated_files:
for f in file_list:
os.remove(f)
return dest_file
def merge_stretches(stretches, min_distance_between_independent_stretches):
"""A function to merge stretches of indices in an array.
It takes an array, `stretches`, that looks like this:
>>> [(3, 9), (14, 27), (32, 36), (38, 42)]
And returns an array like this, if `min_distance_between_independent_stretches`, say, 3:
>>> [(3, 9), (14, 27), (32, 42)]
"""
stretches_to_merge = []
# The following state machine determines which entries in a given array
# should be merged
CURRENT = 0
START, END = 0, 1
while 1:
if not len(stretches):
break
NEXT = CURRENT + 1
if NEXT == len(stretches):
stretches_to_merge.append([stretches[CURRENT]])
break
while 1:
if NEXT > len(stretches):
break
if stretches[NEXT][START] - stretches[CURRENT][END] < min_distance_between_independent_stretches:
NEXT = NEXT + 1
if NEXT == len(stretches):
break
else:
break
if NEXT > len(stretches):
break
elif NEXT - CURRENT == 1:
stretches_to_merge.append([stretches[CURRENT]])
CURRENT += 1
else:
stretches_to_merge.append(stretches[CURRENT:NEXT])
CURRENT = NEXT + 1
# here the array `stretches_to_merge` contains all the lists of
# stretches that need to be merged.
return [(s[0][0], s[-1][1]) for s in stretches_to_merge]
def get_chunk(stream, separator, read_size=4096):
"""Read from a file chunk by chunk based on a separator substring
This utility of this function is to avoid reading in the entire contents of a file all at once.
Instead, you can read in a chunk, process it, then read in the next chunk, and repeat this until
the EOF.
Parameters
==========
stream : _io.TextIOWrapper
A file handle, e.g. stream = open('<path_to_file>', 'r')
separator : str
Each value returned will be the string from the last `separator` to the next `separator`
read_size : int, 4096
How big should each read size be? Bigger means faster reading, but higher memory usage. This
has no effect on what is returned, but can greatly influence speed. Default is 4MB.
References
==========
https://stackoverflow.com/questions/47927039/reading-a-file-until-a-specific-character-in-python
"""
contents_buffer = ''
while True:
chunk = stream.read(read_size)
if not chunk:
yield contents_buffer
break
contents_buffer += chunk
while True:
try:
part, contents_buffer = contents_buffer.split(separator, 1)
except ValueError:
break
else:
yield part
def get_split_start_stops(contig_length, split_length, gene_start_stops=None):
"""Wrapper function for get_split_start_stops_with_gene_calls and get_split_start_stops_without_gene_calls"""
if gene_start_stops:
return get_split_start_stops_with_gene_calls(contig_length, split_length, gene_start_stops)
else:
return get_split_start_stops_without_gene_calls(contig_length, split_length)
def get_split_start_stops_with_gene_calls(contig_length, split_length, gene_start_stops):
"""Here we have a contig of `contig_length`, and a desired split length of `split_length`. also
we know where genes start and stop in this contigs. we would like to split this contig into
smaller pieces, i.e. sizes of `splits_length`, but in such a way that that splits do not
break contigs in the middle of a gene."""
# if the contig is too short, return it back.
if contig_length < 2 * split_length:
return [(0, contig_length)]
coding_positions_in_contig = []
# Pretend the beginning and end are coding (even if they aren't) so that we prevent very short pieces.
for position in it.chain(range(int(split_length / 2)), range(contig_length - int(split_length / 2), contig_length)):
coding_positions_in_contig.append(position)
# Track positions that code for genes.
for gene_unique_id, start, stop in gene_start_stops:
start = start - 5
stop = stop + 5
for position in range(start, stop):
coding_positions_in_contig.append(position)
non_coding_positions_in_contig = set(range(contig_length)) - set(coding_positions_in_contig)
# what would be our break points in an ideal world? compute an initial list of break
# points based on the length of the contig and desired split size:
optimal_number_of_splits = int(contig_length / split_length)
optimal_split_length = int(contig_length / optimal_number_of_splits)
optimal_break_points = list(range(optimal_split_length, contig_length - optimal_split_length + 1, optimal_split_length))
# now we will identify the very bad break points that we can't find a way to split around
bad_break_points = set([])
for i in range(0, len(optimal_break_points)):
break_point = optimal_break_points[i]
if break_point not in non_coding_positions_in_contig:
# optimal break point hits a gene. we shall search towards both directions
# to find a better break point:
new_break_point = None
for s in range(0, int(split_length / 2)):
if break_point + s in non_coding_positions_in_contig:
new_break_point = break_point + s
break
if break_point - s in non_coding_positions_in_contig:
new_break_point = break_point - s
break
if not new_break_point:
# nope. we failed. this is a bad bad break point.
bad_break_points.add(break_point)
else:
# we are satisfied with the new one we found for now. it may be a shitty one,
# but we will learn about that later. for now, let's replace the previous
# optimal break pont with this one.
optimal_break_points[i] = new_break_point
# remove all the bad breakpoints from our 'optimal' break points:
optimal_break_points = [p for p in optimal_break_points if p not in bad_break_points]
if not len(optimal_break_points):
# we have nothing left to work with after removal of the crappy break points. we will
# keep this bad boy the way it is.
return [(0, contig_length)]
# create start/stop positions from these break points
chunks = list(zip([0] + optimal_break_points[:-1], optimal_break_points)) + [(optimal_break_points[-1], contig_length)]
return chunks
def get_split_start_stops_without_gene_calls(contig_length, split_length):
"""Returns split start stop locations for a given contig length."""
num_chunks = int(contig_length / split_length)
if num_chunks < 2:
return [(0, contig_length)]
chunks = []
for i in range(0, num_chunks):
chunks.append((i * split_length, (i + 1) * split_length),)
chunks.append(((i + 1) * split_length, contig_length),)
if (chunks[-1][1] - chunks[-1][0]) < (split_length / 2):
# last chunk is too small :/ merge it to the previous one.
last_tuple = (chunks[-2][0], contig_length)
chunks.pop()
chunks.pop()
chunks.append(last_tuple)
return chunks
def get_split_and_contig_names_of_interest(contigs_db_path, gene_caller_ids):
"""Takes a set of gene caller ids, returns all split and contig names in a
contigs database that are affiliated with them.
"""
if not isinstance(gene_caller_ids, set):
raise ConfigError("`gene_caller_ids` must be of type `set`.")
is_contigs_db(contigs_db_path)
contigs_db = db.DB(contigs_db_path, anvio.__contigs__version__)
where_clause_genes = "gene_callers_id in (%s)" % ', '.join(['%d' % g for g in gene_caller_ids])
genes_in_contigs = contigs_db.get_some_rows_from_table_as_dict(t.genes_in_contigs_table_name, where_clause=where_clause_genes)
contig_names_of_interest = set([e['contig'] for e in genes_in_contigs.values()])
where_clause_contigs = "parent in (%s)" % ', '.join(['"%s"' % c for c in contig_names_of_interest])
splits_info = contigs_db.get_some_rows_from_table_as_dict(t.splits_info_table_name, where_clause=where_clause_contigs)
split_names_of_interest = set(splits_info.keys())
contigs_db.disconnect()
return (split_names_of_interest, contig_names_of_interest)
def get_contigs_splits_dict(split_ids, splits_basic_info):
"""
For a given list of split ids, create a dictionary of contig names
that represents all parents as keys, and ordered splits as items.
split_ids is a set of split IDs, splits_basic_info comes from the contigs database:
>>> contigs_db = dbops.ContigsDatabase(contigs_db_path)
>>> splits_basic_info = contigs_db.db.get_table_as_dict(t.splits_info_table_name)
>>> znnotation_db.disconnect()
>>> x = get_contigs_splits_dict(set([contig_A_split_00001, contig_A_split_00002, contig_A_split_00004,
contig_C_split_00003, contig_C_split_00004, contig_C_split_00005]),
splits_basic_info)
>>> print x
{
'contig_A': {
0: 'contig_A_split_00001',
1: 'contig_A_split_00002',
4: 'contig_A_split_00004'
},
'contig_C': {
3: 'contig_C_split_00003',
4: 'contig_C_split_00004',
5: 'contig_C_split_00005'
}
}
"""
contigs_splits_dict = {}
for split_id in split_ids:
s = splits_basic_info[split_id]
if s['parent'] in contigs_splits_dict:
contigs_splits_dict[s['parent']][s['order_in_parent']] = split_id
else:
contigs_splits_dict[s['parent']] = {s['order_in_parent']: split_id}
return contigs_splits_dict
def get_variabile_item_frequencies(e, engine='NT'):
"""
e is a row from variable_nucleotide_positions table defined in tables.
this function extends dictionary with consensus and departure from consensus.
"""
items = constants.nucleotides if engine=='NT' else constants.amino_acids
frequency_dict = Counter(dict([(item, e[item]) for item in items]))
return frequency_dict.most_common()
def get_consensus_and_departure_data(variable_item_frequencies):
"""Make sense of `variable_item_frequencies`.
The format of `variable_item_frequencies` follows this:
>>> [('A', 45), ('T', 5), ('G', 0), ('N', 0), ('C', 0)]
For a given entry of the variable_XX_frequencies table, the `variable_item_frequencies`
tuple can be obtained via `get_variabile_item_frequencies`.
"""
frequency_of_consensus = variable_item_frequencies[0][1]
total_frequency_of_all_but_the_consensus = sum([tpl[1] for tpl in variable_item_frequencies[1:]])
coverage = total_frequency_of_all_but_the_consensus + frequency_of_consensus
n2n1ratio = variable_item_frequencies[1][1] / frequency_of_consensus if frequency_of_consensus else -1
consensus = variable_item_frequencies[0][0]
departure_from_consensus = total_frequency_of_all_but_the_consensus / coverage if coverage else -1
return (n2n1ratio, consensus, departure_from_consensus)
def convert_sequence_indexing(index, source="M0", destination="M1"):
"""
Anvi'o zero-indexes sequences. For example, the methionine that every ORF starts with has the
index 0 (M0). This is in contrast to the most conventions, in which the methionine is indexed by
1 (M1). This function converts between the two.
index : integer, numpy array, pandas series, list
The sequence index/indices you are converting.
source : string
The convention you are converting from. Must be either "M0" (anvio) or
"M1" (not anvio)
destination : string
The convention you are converting to. Must be either "M0" (anvio) or
"M1" (not anvio)
"""
convert = lambda x, a: [i + a for i in x] if type(x) == list else x + a
if source not in ["M0", "M1"] or destination not in ["M0", "M1"]:
raise ValueError("Must be 'M0' or 'M1'.")
if source == "M0" and destination == "M1":
return convert(index, 1)
if source == "M1" and destination == "M0":
return convert(index, -1)
return index
@jit(nopython=True)
def get_constant_value_blocks(array, value):
"""Generator that returns blocks of consecutive numbers
Parameters
==========
array : array
a numerical numpy array. If a list is passed, this function is very slow
value : number
The number you want to get constant blocks for.
Examples
========
>>> a = np.array([47, 47, 47, 49, 50, 47, 47, 99])
>>> for i in get_constant_value_blocks(a, 47): print(i)
(0, 3)
(5, 7)
"""
ans = []
matching = False
for i in range(len(array)):
if array[i] == value:
if not matching:
start = i
matching = True
else:
if matching:
matching = False
ans.append((start, i))
if matching:
ans.append((start, i + 1))
return ans
@jit(nopython=True)
def find_value_index(x, val, reverse_search=False):
"""Returns first instance of indices where a value is found
Created this because unlike np.where, this stops after the first instance is found. If you only
want the first instance, this algorithm is therefore preferrable for array sizes < 1000 (see
examples)
Parameters
==========
x : 1D array
val : number
return index of x where the value == val.
reverse_search : bool, False
Search in reverse order
Examples
========
>>> import numpy as np
>>> import anvio.utils as utils
>>> x = np.arange(1000)
>>> %timeit utils.find_value_index(x, 999, rev=True)
574 ns ± 15.8 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
>>> %timeit utils.find_value_index(x, 999)
2.21 µs ± 36.7 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
>>> %timeit np.where(x == 999)[0][0]
2.91 µs ± 563 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
"""
for i in range(len(x)) if not reverse_search else range(len(x)-1, -1, -1):
if x[i] == val:
return i
def convert_SSM_to_single_accession(matrix_data):
"""
The substitution scores from the SSM dictionaries created in anvio.data.SSMs are accessed via a dictionary of
dictionaries, e.g. data["Ala"]["Trp"]. This returns a new dictionary accessed via the concatenated sequence element
pair, e.g. data["AlaTrp"], data["AT"], etc. where they are ordered alphabetically.
"""
items = matrix_data.keys()
new_data = {}
for row in items:
for column in items:
if row > column:
continue
new_data[''.join([row, column])] = matrix_data[row][column]
return new_data
def is_gene_sequence_clean(seq, amino_acid=False, can_end_with_stop=False, must_start_with_met=True):
"""Returns True if gene sequence is clean (amino acid or nucleotide), otherwise raises ConfigError
Parameters
==========
seq : str
A string of amino acid or nucleotide sequence
amino_acid : bool, False
If True, the sequence is assumed to be an amino acid sequence
can_end_with_stop : bool, False
If True, the sequence can, but does not have to, end with * if amino_acid=True, or one of
<TAG, TGA, TAA> if amino_acid=False.
must_start_with_met : bool, True
If True, the sequence must start with ATG if amino_acid=False or Met if amino_acid=True
Returns
=======
value : bool
Notes
=====
- A 'clean gene' depends on `amino_acid`. If amino_acid=True, must contain only the 20 1-letter
codes (case insensitive) and start with M. If amino_acid=False, must contain only A,C,T,G
(case insenstive), start with ATG, and have length divisible by 3. If can_end_with_stop=True,
`seq` can end with a stop. If any intermediate and in-frame stop codons are found, the gene
is not clean
"""
error_msg_template = "The gene sequence is not clean. Reason: %s"
seq = seq.upper()
start_char = 'M' if amino_acid else 'ATG'
end_chars = ['*'] if amino_acid else ['TAG', 'TGA', 'TAA']
permissible_chars = (set(constants.AA_to_single_letter_code.values())
if amino_acid
else set(constants.codons)) - set(end_chars)
if not amino_acid:
if len(seq) % 3:
raise ConfigError(error_msg_template % "The number of nucleotides is not divisible by 3")
new_seq = [] # list of length-3 strings
for i in range(0, len(seq), 3):
new_seq.append(seq[i:i+3])
seq = new_seq
if not seq[0] == start_char and must_start_with_met:
raise ConfigError(error_msg_template % "Should start with methionine but instead starts with %s" % seq[0])
for i, element in enumerate(seq[:-1]):
if element in end_chars:
l, r = min([i, 3]), min([len(seq[:-1])-i, 3])
error_msg = error_msg_template % "Premature stop codon at %dth codon position (counting from 0).\
Here is the position in the context of the sequence: ...%s[%s]%s..." \
% (i, ''.join(seq[:-1][i-l:i]), element, ''.join(seq[:-1][i+1:i+r+1]))
raise ConfigError(error_msg)
if element not in permissible_chars:
l, r = min([i, 3]), min([len(seq[:-1])-i, 3])
error_msg = error_msg_template % "%s at %dth codon position (counting from zero) isn't a valid sequence\
element. Here is the position in the context of the sequence: ...%s[%s]%s..." \
% (element, i, ''.join(seq[:-1][i-l:i]), element, ''.join(seq[:-1][i+1:i+r+1]))
raise ConfigError(error_msg)
if seq[-1] in end_chars:
if not can_end_with_stop:
raise ConfigError(error_msg_template % "Sequence should not contain an explicit stop codon")
elif seq[-1] not in permissible_chars:
raise ConfigError(error_msg_template % "Last codon is not a valid character: %s" % seq[-1])
return True
def get_list_of_AAs_for_gene_call(gene_call, contig_sequences_dict):
list_of_codons = get_list_of_codons_for_gene_call(gene_call, contig_sequences_dict)
list_of_AAs = []
for codon in list_of_codons:
# if concensus sequence contains shitty characters, we will not continue
if codon not in constants.codon_to_AA:
continue
# genes in the reverse direction are already handled in get_list_of_codons_for_gene_call so
# all we do is transform codons to AAs
list_of_AAs.append(constants.codon_to_AA[codon])
return list_of_AAs
def get_list_of_codons_for_gene_call(gene_call, contig_sequences_dict, **kwargs):
"""Get a list of the codons for a gene call
Parameters
==========
contig_sequences_dict : dict
An object that looks like that ContigsSuperclass.contig_sequences (initialized with
ContigsSuperclass.init_contig_sequences)
"""
codon_order_to_nt_positions = get_codon_order_to_nt_positions_dict(gene_call, **kwargs)
if gene_call['contig'] not in contig_sequences_dict:
raise ConfigError("get_list_of_AAs_for_gene_call: The contig sequences dict sent to "
"this function does contain the contig name that appears in the gene call. "
"Something is wrong here...")
try:
contig_sequence = contig_sequences_dict[gene_call['contig']]['sequence']
except:
raise ConfigError("get_list_of_AAs_for_gene_call: The contig sequences dict sent to "
"this function does not seem to be an anvi'o contig sequences dict :/ It "
"doesn't have the item 'sequence' in it.")
list_of_codons = []
for codon_order in codon_order_to_nt_positions:
nt_positions = codon_order_to_nt_positions[codon_order]
# here we cut it from the contig sequence
reference_codon_sequence = contig_sequence[nt_positions[0]:nt_positions[2] + 1]
# NOTE: here we make sure the codon sequence is composed of unambiguous nucleotides.
# and we will not inlcude those that contain anything other than proper
# nucleotides in the resulting list of codons.
if set(reference_codon_sequence).issubset(constants.unambiguous_nucleotides):
list_of_codons.append(constants.codon_to_codon_RC[reference_codon_sequence] if gene_call['direction'] == 'r' else reference_codon_sequence)
else:
list_of_codons.append(None)
return list_of_codons
def get_translated_sequence_for_gene_call(sequence, gene_callers_id, return_with_stops=False):
try:
translated_sequence = translate(sequence)
except ConfigError:
raise ConfigError("The sequence corresponding to the gene callers id '%s' has %d nucleotides, "
"which is indivisible by 3. This is bad because it is now ambiguous which codon "
"frame should be used for translation into an amino acid sequence. Here is "
"the culprit sequence: %s" % (gene_callers_id, len(sequence), sequence))
if translated_sequence.endswith('*'):
if return_with_stops:
pass
else:
translated_sequence = translated_sequence[:-1]
return translated_sequence
def translate(sequence):
"""Translate a sequence. As stupid as possible.
Returns
=======
amino_acid_sequence : str
Amino acid sequence of sequence. If translation of codon is unknown, X is used. All stop
codons are included and represented as *.
Notes
=====
- Raises error if indivisible by 3
- Consider smarter functions: utils.get_translated_sequence_for_gene_call,
utils.get_most_likely_translation_frame
"""
N = len(sequence)
sequence = sequence.upper()
translated_sequence = []
if N % 3:
raise ConfigError("utils.translate :: sequence is not divisible by 3: %s" % sequence)
for i in range(0, N, 3):
aa = constants.AA_to_single_letter_code[constants.codon_to_AA[sequence[i:i + 3]]] or 'X'
translated_sequence.append(aa)
return ''.join(translated_sequence)
def get_most_likely_translation_frame(sequence, model=None, null_prob=None, stop_prob=None, log_likelihood_cutoff=2):
"""Predict the translation frame with a markov model of amino acid sequences
Parameters
==========
sequence : str
A DNA sequence
model : numpy array, None
A numpy array of transition probabilities. For an example, see
anvio/data/seq_transition_models/AA/3rd_order.npy
null_prob : float, None
When a markov state contains an unspecified amino acid (X), what probability transition should
be applied to it? To be as uninformative as possible, if None, null_prob is set to the median
transition probability of the model.
stop_prob : float, None
When a markov state contains a stop codon, what transition probability should
be applied to it? Since internal stop codons are exceedingly rare, if None, stop_prob is set
to be 1/1,000,000th the probability of the minimum transition probability of the model.
log_likelihood_cutoff : float, 2
If the best frame has a log likelihood with respect to the second best frame that is less
than this value, the frame is set to None, which is to say, anvi'o is not confident enough
to tell you the frame. The amino acid sequence is still returned. The default is 2, which
means the probability of the first should be at least 10^2 times the probability of the
competing frame
Returns
=======
frame, amino_acid_sequence : int, str
frame is the number of shifted nucleotides that produced the most likely frame and is either
0, 1, or 2. amino_acid_sequence is the translated sequence. If less than log_likelihood_cutoff,
None is returned as the frame
"""
N = len(sequence)
if N == 3:
# Save ourselves the effort
return 0, translate(sequence)
elif N < 3:
raise ConfigError("utils.get_most_likely_translation_frame :: sequence has a length less than 3 "
"so there is nothing to translate.")
if model is None:
default_model_path = os.path.join(os.path.dirname(anvio.__file__), 'data/seq_transition_models/AA/fourth_order.npy')
model = np.load(default_model_path)
order = len(model.shape)
null_prob = null_prob if null_prob is not None else np.median(model)
stop_prob = stop_prob if stop_prob is not None else model.min()/1e6
aas = [constants.AA_to_single_letter_code[aa] for aa in constants.amino_acids if aa != 'STP']
aa_to_array_index = {aa: i for i, aa in enumerate(aas)}
# Collect all of the candidate sequences
candidates = {}
for frame in range(3):
frame_seq = sequence[frame:]
remainder = len(frame_seq) % 3
if remainder:
frame_seq = frame_seq[:-remainder]
if not frame_seq:
continue
candidates[frame] = {
'sequence': translate(frame_seq),
'log_prob': 0,
}
# Calculate the log probability of each candidate
smallest_seq_length = min([len(candidate['sequence']) for candidate in candidates.values()])
for frame in candidates:
# Some of the candidates will be one AA smaller. To not skew values, we truncate each
# candidate to the length of the smallest candidate
seq = candidates[frame]['sequence'][:smallest_seq_length]
trans_probs = np.zeros(smallest_seq_length - order)
for codon_order in range(smallest_seq_length - order):
state = seq[codon_order:codon_order+order]
if '*' in state:
trans_probs[codon_order] = stop_prob
elif 'X' in state:
trans_probs[codon_order] = null_prob
else:
state_as_indices = tuple([aa_to_array_index[aa] for aa in state])
trans_probs[codon_order] = model[state_as_indices]
candidates[frame]['log_prob'] = np.sum(np.log10(trans_probs))
frame_second, frame_best = sorted(candidates, key=lambda frame: candidates[frame]['log_prob'])[-2:]
log_prob_best = candidates[frame_best]['log_prob']
log_prob_second = candidates[frame_second]['log_prob']
if (log_prob_best - log_prob_second) < log_likelihood_cutoff:
# Frame is not league's better than the competing frame, which it should be if we are to
# have any confidence in it. The sequence is returned
return None, candidates[frame_best]['sequence']
amino_acid_sequence = candidates[frame_best]['sequence']
# if the best amino acid sequence ends with a stop codon, remove it.
amino_acid_sequence = amino_acid_sequence[:-1] if amino_acid_sequence.endswith('*') else amino_acid_sequence
return frame_best, amino_acid_sequence
def get_codon_order_to_nt_positions_dict(gene_call, subtract_by=0):
"""Returns a dictionary to translate codons in a gene to nucleotide positions
Parameters
==========
subtract_by : int, 0
Subtract the start and stop of the gene call by this amount. This could be useful if the
gene call start/stop are defined in terms of the contig, but you want the start/stop in
terms of the split. Then you could supply subtract_by=split_start, where split_start is the
start of the split
"""
if gene_call['call_type'] != constants.gene_call_types['CODING']:
raise ConfigError("utils.get_codon_order_to_nt_positions_dict :: this simply will not work "
"for noncoding gene calls, and gene caller id %d is noncoding." % gene_call['gene_callers_id'])
start = gene_call['start'] - subtract_by
stop = gene_call['stop'] - subtract_by
codon_order_to_nt_positions = {}
codon_order = 0
if gene_call['direction'] == 'r':
for nt_pos in range(stop - 1, start - 1, -3):
codon_order_to_nt_positions[codon_order] = [nt_pos - 2, nt_pos - 1, nt_pos]
codon_order += 1
else:
for nt_pos in range(start, stop, 3):
codon_order_to_nt_positions[codon_order] = [nt_pos, nt_pos + 1, nt_pos + 2]
codon_order += 1
return codon_order_to_nt_positions
def nt_seq_to_nt_num_array(seq, is_ord=False):
"""Convert a string of sequence into an array of numbers
Performance compared to {list comprehension with dictionary lookup} depends on sequence length.
See Examples
Parameters
==========
seq : str
string with A, C, T, G, N as its characters, e.g. 'AATGCN'
is_ord : bool, False
set True if seq is already a numpy array, where each element is the ord of the sequence. E.g.
if `seq` is passed as array([65, 65, 67]), then it is already the ordinal representation of
'AAC'
Returns
=======
output : numpy array
E.g. if seq = 'AATGCN', output = array([0, 0, 2, 3, 1, 4])
Examples
========
Init an environment
>>> import anvio.constants as constants
>>> import anvio.utils as utils
>>> seq_short = ''.join(list(np.random.choice(constants.nucleotides, size=100)))
>>> seq_long = ''.join(list(np.random.choice(constants.nucleotides, size=100_000_000)))
>>> nt_to_num = {'A': 0, 'C': 1, 'T': 2, 'G': 3, 'N': 4}
Time short sequence:
>>> %timeit utils.nt_seq_to_nt_num_array(seq_short)
2.36 µs ± 20.9 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
>>> %timeit [nt_to_num[s] for s in seq_short]
5.83 µs ± 20.7 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
Time long sequence:
>>> %timeit utils.nt_seq_to_nt_num_array(seq_long)
653 ms ± 1.02 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
>>> %timeit [nt_to_num[s] for s in seq_long]
5.27 s ± 13.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
"""
return constants.nt_to_num_lookup[seq if is_ord else np.frombuffer(seq.encode('ascii'), np.uint8)]
def nt_seq_to_RC_nt_num_array(seq, is_ord=False):
"""Convert a string of sequence into an array of numbers, reverse-complemented
Performance compared to {list comprehension with dictionary lookup} depends on sequence length.
See Examples
Parameters
==========
seq : str
string with A, C, T, G, N as its characters, e.g. 'AATGCN'
is_ord : bool, False
set True if seq is already a numpy array, where each element is the ord of the sequence. E.g.
if `seq` is passed as array([65, 65, 67]), then it is already the ordinal representation of
'AAC'
Returns
=======
output : numpy array
E.g. if seq = 'AATGCN', output = array([4, 2, 0, 1, 3, 3])
Examples
========
See `nt_seq_to_nt_num_array` docstring for examples
"""
return constants.nt_to_RC_num_lookup[seq if is_ord else np.frombuffer(seq.encode('ascii'), np.uint8)][::-1]
def nt_seq_to_codon_num_array(seq, is_ord=False):
"""Convert a sequence into an array of numbers corresponding to codons
Parameters
==========
seq : str
string with A, C, T, G as its characters, e.g. 'AATGCT'. seq must be divisible by 3
is_ord : bool, False
set True if seq is already a numpy array, where each element is the ord of the sequence. E.g.
if `seq` is passed as array([65, 65, 67]), then it is already the ordinal representation of
'AAC'
Notes
=====
- Delegates to just-in-time compiled function
"""
return _nt_seq_to_codon_num_array(
seq if is_ord else np.frombuffer(seq.encode('ascii'), np.uint8),
constants.codon_to_num_lookup,
)
def nt_seq_to_RC_codon_num_array(seq, is_ord=False):
"""Convert a sequence into an array of numbers corresponding to codons, reverse-complemented
Parameters
==========
seq : str
string with A, C, T, G as its characters, e.g. 'AATGCT'. seq must be divisible by 3
is_ord : bool, False
set True if seq is already a numpy array, where each element is the ord of the sequence. E.g.
if `seq` is passed as array([65, 65, 67]), then it is already the ordinal representation of
'AAC'
Notes
=====
- Delegates to just-in-time compiled function
"""
return _nt_seq_to_codon_num_array(
seq if is_ord else np.frombuffer(seq.encode('ascii'), np.uint8),
constants.codon_to_RC_num_lookup,
)[::-1]
@jit(nopython=True)
def _nt_seq_to_codon_num_array(seq_as_ascii_ints, lookup_codon):
"""Should be called through its parent functions `nt_seq_to_codon_num_array` and `nt_seq_to_RC_codon_num_array`"""
output = np.zeros(len(seq_as_ascii_ints)//3, dtype=np.uint8)
for i in range(0, seq_as_ascii_ints.shape[0], 3):
output[i//3] = lookup_codon[seq_as_ascii_ints[i], seq_as_ascii_ints[i+1], seq_as_ascii_ints[i+2]]
return output
def is_amino_acid_functionally_conserved(amino_acid_residue_1, amino_acid_residue_2):
"""Checks if two amino acid residues are part of the same biochemical property group"""
group = constants.amino_acid_property_group[amino_acid_residue_1]
conserved_group = constants.conserved_amino_acid_groups[group]
if amino_acid_residue_2 in conserved_group:
return True
if group == 'Polar and Nonpolar':
#they fall in more than one group, multiple tests needed
if amino_acid_residue_1 == 'H' and (amino_acid_residue_2 in constants.conserved_amino_acid_groups['Nonpolar'] \
or amino_acid_residue_2 in constants.conserved_amino_acid_groups['Bases']):
return True
if amino_acid_residue_1 == 'Y' and (amino_acid_residue_2 in constants.conserved_amino_acid_groups['Aromatic']):
return True
return False
def get_bin_name_from_item_name(anvio_db_path, item_name, collection_name=None):
is_pan_or_profile_db(anvio_db_path, genes_db_is_also_accepted=True)
database = db.DB(anvio_db_path, None, ignore_version=True)
if t.collections_splits_table_name not in database.get_table_names():
raise ConfigError("The database %s does not contain a collections table :/")
if collection_name:
where_clause = 'split = "%s" and collection_name = "%s"' % (item_name, collection_name)
else:
where_clause = 'split = "%s"' % (item_name)
rows = database.get_some_rows_from_table(t.collections_splits_table_name, where_clause=where_clause)
database.disconnect()
return rows
def get_contig_name_to_splits_dict(splits_basic_info_dict, contigs_basic_info_dict):
"""
Returns a dict for contig name to split name conversion.
Here are the proper source of the input params:
contigs_basic_info_dict = database.get_table_as_dict(t.contigs_info_table_name, string_the_key = True)
splits_basic_info_dict = database.get_table_as_dict(t.splits_info_table_name)
"""
contig_name_to_splits_dict = {}
for split_name in splits_basic_info_dict:
parent = splits_basic_info_dict[split_name]['parent']
if parent in contig_name_to_splits_dict:
contig_name_to_splits_dict[parent].append(split_name)
else:
contig_name_to_splits_dict[parent] = [split_name]
return contig_name_to_splits_dict
def check_sample_id(sample_id):
if sample_id:
if sample_id[0] in constants.digits:
raise ConfigError("The sample name ('%s') is not a valid one. Sample names can't start with digits. "
"Long story. Please specify a sample name that starts with an ASCII letter (if "
"there are no parameters available to you to set the sample name, it may be the "
"case that sample name is determined automatically from the input files you have "
"provided to whatever anvi'o workflow you were using, in which case you may need "
"to change your input file names or something :/)." % sample_id)
allowed_chars_for_samples = constants.allowed_chars.replace('-', '').replace('.', '')
if len([c for c in sample_id if c not in allowed_chars_for_samples]):
raise ConfigError("The sample name ('%s') contains characters anvi'o does not like. Please "
"limit the characters that make up the project name to ASCII letters, "
"digits, and the underscore character ('_')." % sample_id)
def check_collection_name(collection_name):
try:
check_sample_id(collection_name)
except:
raise ConfigError('"%s" is not a proper collection name. A proper one should be a single word and not contain '
'ANY characters but digits, ASCII letters and underscore character(s). There should not be '
'any space characters, and the collection name should not start with a digit.' % collection_name)
def is_this_name_OK_for_database(variable_name, content, stringent=True, additional_chars_allowed=''):
if not content:
raise ConfigError("But the %s is empty? Come on :(" % variable_name)
if content[0] in constants.digits:
raise ConfigError("Sorry, %s can't start with a digit. Long story. Please specify a name "
"that starts with an ASCII letter." % variable_name)
if stringent:
allowed_chars = constants.allowed_chars.replace('.', '').replace('-', '')
else:
allowed_chars = constants.allowed_chars.replace('.', '')
if len(additional_chars_allowed):
allowed_chars += additional_chars_allowed
if len([c for c in content if c not in allowed_chars]):
raise ConfigError("Well, the %s contains characters that anvi'o does not like :/ Please limit the characters "
"to ASCII letters, digits, and the underscore ('_') character." % variable_name)
def check_contig_names(contig_names, dont_raise=False):
all_characters_in_contig_names = set(''.join(contig_names))
characters_anvio_doesnt_like = [c for c in all_characters_in_contig_names if c not in constants.allowed_chars]
if len(characters_anvio_doesnt_like):
if dont_raise:
return False
raise ConfigError("The name of at least one contig in your BAM file %s anvio does not "
"like (%s). Please go back to your original files and make sure that "
"the characters in contig names are limited to to ASCII letters, "
"digits. Names can also contain underscore ('_'), dash ('-') and dot ('.') "
"characters. anvio knows how much work this may require for you to go back and "
"re-generate your BAM files and is very sorry for asking you to do that, however, "
"it is critical for later steps in the analysis." \
% ("contains multiple characters" if len(characters_anvio_doesnt_like) > 1 else "contains a character",
", ".join(['"%s"' % c for c in characters_anvio_doesnt_like])))
return True
def create_fasta_dir_from_sequence_sources(genome_desc, fasta_txt=None):
"""genome_desc is an instance of GenomeDescriptions"""
from anvio.summarizer import ArgsTemplateForSummarizerClass, ProfileSummarizer, Bin
if genome_desc is None and fasta_txt is None:
raise ConfigError("Anvi'o was given no internal genomes, no external genomes, and no fasta "
"files. Although anvi'o can technically go ahead and create a temporary "
"FASTA directory, what's the point if there's nothing to do?")
temp_dir = filesnpaths.get_temp_directory_path()
hash_to_name = {}
name_to_path = {}
genome_names = set([])
file_paths = set([])
if genome_desc is not None:
# first figure out internal genomes that are bound by the same collection name and
# profile db path
genome_subsets = {}
for entry in genome_desc.genomes.values():
if 'bin_id' in entry:
# if we are here, this entry represents an internal genome. we will add this genome
# to genome_subsets data structure to be processed later.
profile_and_collection_descriptor = '_'.join([entry['profile_db_path'], entry['collection_id']])
if profile_and_collection_descriptor in genome_subsets:
genome_subsets[profile_and_collection_descriptor]['genome_name_bin_name_tpl'].add((entry['name'], entry['bin_id']),)
else:
genome_subsets[profile_and_collection_descriptor] = {'genome_name_bin_name_tpl': set([(entry['name'], entry['bin_id'])]),
'profile_db_path': entry['profile_db_path'],
'contigs_db_path': entry['contigs_db_path'],
'collection_id': entry['collection_id']}
else:
# If we are here, this means this is an external genome, so we can basically take care of it here immediately.
genome_name = entry['name']
genome_names.add(genome_name)
contigs_db_path = genome_desc.genomes[genome_name]['contigs_db_path']
hash_for_output_file = hashlib.sha256(genome_name.encode('utf-8')).hexdigest()
hash_to_name[hash_for_output_file] = genome_name
path = os.path.join(temp_dir, hash_for_output_file + '.fa')
file_paths.add(path)
name_to_path[genome_name] = path
export_sequences_from_contigs_db(contigs_db_path, path)
# when we are here, all we have are interanl genomes as genome subsets.
for genome_subset in genome_subsets.values():
args = ArgsTemplateForSummarizerClass()
args.contigs_db = genome_subset['contigs_db_path']
args.profile_db = genome_subset['profile_db_path']
args.collection_name = genome_subset['collection_id']
args.output_dir = filesnpaths.get_temp_directory_path(just_the_path=True)
args.quick_summary = True
# note that we're initializing the summary class only for once for a given
# genome subset
summary = ProfileSummarizer(args, r=Run(verbose=False))
summary.init()
for genome_name, bin_name in genome_subset['genome_name_bin_name_tpl']:
genome_names.add(genome_name)
hash_for_output_file = hashlib.sha256(genome_name.encode('utf-8')).hexdigest()
hash_to_name[hash_for_output_file] = genome_name
path = os.path.join(temp_dir, hash_for_output_file + '.fa')
file_paths.add(path)
name_to_path[genome_name] = path
bin_summary = Bin(summary, bin_name)
with open(path, 'w') as fasta:
fasta.write(bin_summary.get_bin_sequence())
if fasta_txt is not None:
fastas = get_TAB_delimited_file_as_dictionary(fasta_txt, expected_fields=['name', 'path'], only_expected_fields=True)
for name in fastas.keys():
genome_names.add(name)
hash_for_output_file = hashlib.sha256(name.encode('utf-8')).hexdigest()
hash_to_name[hash_for_output_file] = name
source = fastas[name]['path']
path = os.path.join(temp_dir, hash_for_output_file + '.fa')
file_paths.add(path)
name_to_path[name] = path
with open(path, 'w') as dest:
with open(source, 'r') as src:
dest.write(src.read())
return temp_dir, hash_to_name, genome_names, name_to_path
def gen_NEXUS_format_partition_file_for_phylogenomics(partition_file_path, sequence_lengths, separator='', run=run, progress=progress):
""" Generates a NEXUS-formatted partition file for phylogenomics. See
https://github.com/merenlab/anvio/issues/1333 for details
Parameters
==========
partition_file_path: `str`
File path to be generated.
sequence_lengths: `list` of `tuples`
A list that contins sequence names and lenghts as tuples. I.e.,
[('seq_1', 100), ('seq_2', 42), ...]
separator: `str`
Characters used to separate sequences from each other in a multi-alignment
file.
run: `object`
Anvi'o run object
run: `progress`
Anvi'o progress object
Returns
=======
None
"""
filesnpaths.is_output_file_writable(partition_file_path)
if not isinstance(sequence_lengths, list):
raise ConfigError("Sequence lengths must be passed as a list of tuples.")
if not isinstance(sequence_lengths[0], tuple):
raise ConfigError("Sequence lengths must be passed as a list of tuples.")
with open(partition_file_path, 'w') as partition_file:
partition_file.write("#nexus\nbegin sets;\n")
index = 1
for sequence_name, sequence_length in sequence_lengths:
partition_file.write(" charset %s = %d-%d;\n" % (sequence_name, index, index + sequence_length - 1))
index += (sequence_length + len(separator))
partition_file.write("end;\n")
progress.reset()
run.info("Partition file", partition_file_path, mc='yellow')
run.info_single("Your partition file is ready. Please do not forget to replace placeholders for model names ('[MODEL]') "
"in this file with appropriate model names prior to your phylogenomic analysis.", nl_before=1, nl_after=1)
def get_FASTA_file_as_dictionary(file_path):
filesnpaths.is_file_exists(file_path)
filesnpaths.is_file_fasta_formatted(file_path)
d = {}
fasta = u.SequenceSource(file_path)
while next(fasta):
d[fasta.id] = fasta.seq
return d
def unique_FASTA_file(input_file_path, output_fasta_path=None, names_file_path=None, store_frequencies_in_deflines=True):
filesnpaths.is_file_exists(input_file_path)
if not output_fasta_path:
output_fasta_path = input_file_path + '.unique'
if not names_file_path:
names_file_path = output_fasta_path + '.names'
if output_fasta_path == names_file_path:
raise ConfigError("I can't unique this. Output FASTA file path can't be identical to "
"the names file path...")
if output_fasta_path == input_file_path or names_file_path == input_file_path:
raise ConfigError("Anvi'o will not unique this. Output FASTA path and names file path should "
"be different from the the input file path...")
filesnpaths.is_output_file_writable(output_fasta_path)
filesnpaths.is_output_file_writable(names_file_path)
input_fasta = u.SequenceSource(input_file_path, unique=True)
output_fasta = u.FastaOutput(output_fasta_path)
names_file = open(names_file_path, 'w')
names_dict = {}
while next(input_fasta):
output_fasta.store(input_fasta, split=False, store_frequencies=store_frequencies_in_deflines)
names_file.write('%s\t%s\n' % (input_fasta.id, ','.join(input_fasta.ids)))
names_dict[input_fasta.id] = input_fasta.ids
output_fasta.close()
names_file.close()
return output_fasta_path, names_file_path, names_dict
def ununique_BLAST_tabular_output(tabular_output_path, names_dict):
"""FIXME <A one line descriptor here>
Notes
=====
- Assumes outfmt has `qseqid` and `sseqid` as 1st and 2nd columns, respectively
"""
new_search_output_path = tabular_output_path + '.ununiqued'
new_tabular_output = open(new_search_output_path, 'w')
for line in open(tabular_output_path):
fields = line.strip().split('\t')
for query_id in names_dict[fields[0]]:
for subject_id in names_dict[fields[1]]:
new_tabular_output.write('%s\t%s\t%s\n' % (query_id, subject_id, '\t'.join(fields[2:])))
new_tabular_output.close()
shutil.move(tabular_output_path, tabular_output_path + '.unique')
shutil.move(new_search_output_path, tabular_output_path)
return tabular_output_path
def get_BLAST_tabular_output_as_dict(tabular_output_path, target_id_parser_func=None, query_id_parser_func=None):
"""Takes a BLAST output, returns a dict where each query appears only once!!
If there are multiple hits for a given query, the one with lower e-value.
remains in the dict.
Notes
=====
- Works only for the default "-outfmt 6"
"""
results_dict = {}
for line in open(tabular_output_path):
fields = line.strip().split('\t')
query_id = fields[0] if not query_id_parser_func else query_id_parser_func(fields[0])
target_id = fields[1] if not target_id_parser_func else target_id_parser_func(fields[1])
e_value = float(fields[10])
if query_id in results_dict:
if e_value > results_dict[query_id]['evalue']:
continue
results_dict[query_id] = {'hit': target_id, 'evalue': e_value}
return results_dict
def store_dict_as_FASTA_file(d, output_file_path, wrap_from=200):
filesnpaths.is_output_file_writable(output_file_path)
output = open(output_file_path, 'w')
for key in d:
output.write('>%s\n' % key)
if wrap_from:
output.write('%s\n' % textwrap.fill(d[key], wrap_from, break_on_hyphens=False))
else:
output.write('%s\n' % (d[key]))
output.close()
return True
def export_sequences_from_contigs_db(contigs_db_path, output_file_path, seq_names_to_export=None, splits_mode=False, rna_alphabet=False, truncate=True, just_do_it=False, run=run):
"""Export sequences from a contigs database."""
filesnpaths.is_output_file_writable(output_file_path)
contigs_db = db.DB(contigs_db_path, anvio.__contigs__version__)
contig_sequences_dict = contigs_db.get_table_as_dict(t.contig_sequences_table_name, string_the_key = True)
splits_info_dict = contigs_db.get_table_as_dict(t.splits_info_table_name)
contigs_db.disconnect()
output_fasta = u.FastaOutput(output_file_path)
FORMAT = lambda seq: seq.replace('T', 'U') if rna_alphabet else seq
if not seq_names_to_export:
if splits_mode:
seq_names_to_export = sorted(splits_info_dict.keys())
else:
seq_names_to_export = sorted(contig_sequences_dict.keys())
else:
contig_names = [contig_name for contig_name in seq_names_to_export if contig_name in contig_sequences_dict]
split_names = [split_name for split_name in seq_names_to_export if split_name in splits_info_dict]
missing_names = [name for name in seq_names_to_export if name not in contig_names and name not in split_names]
if splits_mode:
mode = "splits"
appropriate_seq_names = split_names
else:
mode = "contigs"
appropriate_seq_names = contig_names
if len(appropriate_seq_names) < len(seq_names_to_export):
if just_do_it:
run.warning("Not all the sequences you requested are %s in this CONTIGS.db. %d names are contigs, "
"%d are splits, and %d are neither. BUT you're in just-do-it mode and we know you're in charge, so we'll "
"proceed using any appropriate names." % \
(mode, len(contig_names), len(split_names), len(missing_names),))
seq_names_to_export = appropriate_seq_names
else:
raise ConfigError("Not all the sequences you requested are %s in this CONTIGS.db. %d names are contigs, "
"%d are splits, and %d are neither. If you want to live on the edge and try to "
"proceed using any appropriate names, try out the `--just-do-it` flag." % \
(mode, len(contig_names), len(split_names), len(missing_names)))
for seq_name in seq_names_to_export:
if splits_mode:
s = splits_info_dict[seq_name]
sequence = FORMAT(contig_sequences_dict[s['parent']]['sequence'][s['start']:s['end']])
else:
sequence = FORMAT(contig_sequences_dict[seq_name]['sequence'])
output_fasta.write_id(seq_name)
output_fasta.write_seq(sequence, split=truncate)
return True
def gen_gexf_network_file(units, samples_dict, output_file, sample_mapping_dict=None,
unit_mapping_dict=None, project=None, sample_size=8, unit_size=2,
skip_sample_labels=False, skip_unit_labels=False):
"""A function that generates an XML network description file for Gephi.
Two minimum required inputs are `units`, and `samples_dict`.
Simply, `samples_dict` is a dictionary that shows the distribution of `units` and their
frequencies across samples. Here is an example `units` variable (which is a type of `list`):
units = ['unit_1', 'unit_2', ... 'unit_n']
and a corresponding `samples_dict` would look like this:
samples_dict = {'sample_1': {'unit_1': 0.5,
'unit_2': 0.2,
...,
'unit_n': 0.1
},
'sample_2': { (...)
},
(...),
'sample_n': { (...)
}
}
"""
filesnpaths.is_output_file_writable(output_file)
output = open(output_file, 'w')
samples = sorted(samples_dict.keys())
sample_mapping_categories = sorted([k for k in list(sample_mapping_dict.values())[0].keys() if k != 'colors']) if sample_mapping_dict else None
unit_mapping_categories = sorted([k for k in list(unit_mapping_dict.keys()) if k not in ['colors', 'labels']]) if unit_mapping_dict else None
sample_mapping_category_types = []
if sample_mapping_dict:
for category in sample_mapping_categories:
if RepresentsFloat(list(sample_mapping_dict.values())[0][category]):
sample_mapping_category_types.append('double')
else:
sample_mapping_category_types.append('string')
output.write('''<?xml version="1.0" encoding="UTF-8"?>\n''')
output.write('''<gexf xmlns:viz="http:///www.gexf.net/1.1draft/viz" xmlns="http://www.gexf.net/1.2draft" version="1.2">\n''')
output.write('''<meta lastmodifieddate="2010-01-01+23:42">\n''')
output.write(''' <creator>Oligotyping pipeline</creator>\n''')
if project:
output.write(''' <creator>Network description for %s</creator>\n''' % (project))
output.write('''</meta>\n''')
output.write('''<graph type="static" defaultedgetype="undirected">\n\n''')
if sample_mapping_dict:
output.write('''<attributes class="node" type="static">\n''')
for i in range(0, len(sample_mapping_categories)):
category = sample_mapping_categories[i]
category_type = sample_mapping_category_types[i]
output.write(''' <attribute id="%d" title="%s" type="%s" />\n''' % (i, category, category_type))
output.write('''</attributes>\n\n''')
# FIXME: IDK what the hell is this one about:
if unit_mapping_dict:
output.write('''<attributes class="edge">\n''')
for i in range(0, len(unit_mapping_categories)):
category = unit_mapping_categories[i]
output.write(''' <attribute id="%d" title="%s" type="string" />\n''' % (i, category))
output.write('''</attributes>\n\n''')
output.write('''<nodes>\n''')
for sample in samples:
if skip_sample_labels:
output.write(''' <node id="%s">\n''' % (sample))
else:
output.write(''' <node id="%s" label="%s">\n''' % (sample, sample))
output.write(''' <viz:size value="%d"/>\n''' % sample_size)
if sample_mapping_dict and 'colors' in sample_mapping_dict[sample]:
output.write(''' <viz:color r="%d" g="%d" b="%d" a="1"/>\n''' %\
HTMLColorToRGB(sample_mapping_dict[sample]['colors'], scaled=False))
if sample_mapping_categories:
output.write(''' <attvalues>\n''')
for i in range(0, len(sample_mapping_categories)):
category = sample_mapping_categories[i]
output.write(''' <attvalue id="%d" value="%s"/>\n''' % (i, sample_mapping_dict[sample][category]))
output.write(''' </attvalues>\n''')
output.write(''' </node>\n''')
for unit in units:
if skip_unit_labels:
output.write(''' <node id="%s">\n''' % (unit))
else:
if unit_mapping_dict and 'labels' in unit_mapping_dict:
output.write(''' <node id="%s" label="%s">\n''' % (unit, unit_mapping_dict['labels'][unit]))
else:
output.write(''' <node id="%s">\n''' % (unit))
output.write(''' <viz:size value="%d" />\n''' % unit_size)
if unit_mapping_categories:
output.write(''' <attvalues>\n''')
for i in range(0, len(unit_mapping_categories)):
category = unit_mapping_categories[i]
output.write(''' <attvalue id="%d" value="%s"/>\n''' % (i, unit_mapping_dict[category][unit]))
output.write(''' </attvalues>\n''')
output.write(''' </node>\n''')
output.write('''</nodes>\n''')
edge_id = 0
output.write('''<edges>\n''')
for sample in samples:
for i in range(0, len(units)):
unit = units[i]
if samples_dict[sample][unit] > 0.0:
if unit_mapping_dict:
output.write(''' <edge id="%d" source="%s" target="%s" weight="%f">\n''' % (edge_id, unit, sample, samples_dict[sample][unit]))
if unit_mapping_categories:
output.write(''' <attvalues>\n''')
for i in range(0, len(unit_mapping_categories)):
category = unit_mapping_categories[i]
output.write(''' <attvalue id="%d" value="%s"/>\n''' % (i, unit_mapping_dict[category][unit]))
output.write(''' </attvalues>\n''')
output.write(''' </edge>\n''')
else:
output.write(''' <edge id="%d" source="%s" target="%s" weight="%f" />\n''' % (edge_id, unit, sample, samples_dict[sample][unit]))
edge_id += 1
output.write('''</edges>\n''')
output.write('''</graph>\n''')
output.write('''</gexf>\n''')
output.close()
def is_ascii_only(text):
"""test whether 'text' is composed of ASCII characters only"""
return all(ord(c) < 128 for c in text)
def get_bams_and_profiles_txt_as_data(file_path):
"""bams-and-profiles.txt is an anvi'o artifact with four columns.
This function will sanity check one, process it, and return data.
"""
COLUMN_DATA = lambda x: get_column_data_from_TAB_delim_file(file_path, [columns_found.index(x)])[columns_found.index(x)][1:]
if not filesnpaths.is_file_tab_delimited(file_path, dont_raise=True):
raise ConfigError(f"The bams and profiles txt file must be a TAB-delimited flat text file :/ "
f"The file you have at '{file_path}' is nothing of that sorts.")
expected_columns = ['name', 'contigs_db_path', 'profile_db_path', 'bam_file_path']
columns_found = get_columns_of_TAB_delim_file(file_path, include_first_column=True)
if not set(expected_columns).issubset(set(columns_found)):
raise ConfigError(f"A bams and profiles txt file is supposed to have at least the columns {', '.join(expected_columns)}.")
names = COLUMN_DATA('name')
if len(set(names)) != len(names):
raise ConfigError("Every name listed in the `names` column in a bams and profiles txt must be unique :/ "
"You have some redundant names in yours.")
contigs_db_paths = COLUMN_DATA('contigs_db_path')
if len(set(contigs_db_paths)) != 1:
raise ConfigError("All single profiles in bams and profiles file must be associated with the same "
"contigs database. Meaning, you have to use the same contigs database path for "
"every entry. Confusing? Yes. Still a rule? Yes.")
profile_db_paths = COLUMN_DATA('profile_db_path')
if len(set(profile_db_paths)) != len(profile_db_paths):
raise ConfigError("You listed the same profile database more than once in your bams and profiles txt file :/")
bam_file_paths = COLUMN_DATA('bam_file_path')
if len(set(bam_file_paths)) != len(bam_file_paths):
raise ConfigError("You listed the same BAM file more than once in your bams and profiles txt file :/")
contigs_db_path = contigs_db_paths[0]
profiles_and_bams = get_TAB_delimited_file_as_dictionary(file_path)
for sample_name in profiles_and_bams:
profiles_and_bams[sample_name].pop('contigs_db_path')
filesnpaths.is_file_bam_file(profiles_and_bams[sample_name]['bam_file_path'])
is_profile_db_and_contigs_db_compatible(profiles_and_bams[sample_name]['profile_db_path'], contigs_db_path)
return contigs_db_path, profiles_and_bams
def get_samples_txt_file_as_dict(file_path, run=run, progress=progress):
"Samples txt file is a commonly-used anvi'o artifact to describe FASTQ file paths for input samples"
filesnpaths.is_file_tab_delimited(file_path)
expected_columns = ['sample', 'r1', 'r2']
possible_columns = expected_columns + ['group']
columns_found = get_columns_of_TAB_delim_file(file_path, include_first_column=True)
extra_columns = set(columns_found).difference(set(possible_columns))
if not set(expected_columns).issubset(set(columns_found)):
raise ConfigError(f"A samples txt file is supposed to have at least the columns {', '.join(expected_columns)}.")
if len(extra_columns):
run.warning(f"Your samples txt file contains {pluralize('extra column', len(extra_columns))}: "
f"{', '.join(extra_columns)}. It is not a deal breaker, so anvi'o will continue with "
f"business, but we wanted you to be aware of the fact that your input file does not "
f"fully match anvi'o expectations from this file type.")
samples_txt = get_TAB_delimited_file_as_dictionary(file_path)
samples_with_missing_files = []
samples_with_identical_r1_r2_files = []
for sample_name in samples_txt:
check_sample_id(sample_name)
if not os.path.exists(samples_txt[sample_name]['r1']) or not os.path.exists(samples_txt[sample_name]['r2']):
samples_with_missing_files.append(sample_name)
if samples_txt[sample_name]['r1'] == samples_txt[sample_name]['r2']:
samples_with_identical_r1_r2_files.append(sample_name)
if len(samples_with_missing_files):
raise ConfigError(f"Bad news. Your samples txt contains {pluralize('sample', len(samples_with_missing_files))} "
f"({', '.join(samples_with_missing_files)}) with missing files (by which we mean that the "
f"r1/r2 paths are there, but the files they point to are not).")
if len(samples_with_identical_r1_r2_files):
raise ConfigError(f"Interesting. Your samples txt contains {pluralize('sample', len(samples_with_missing_files))} "
f"({', '.join(samples_with_identical_r1_r2_files)}) where r1 and r2 file paths are identical. Not OK.")
return samples_txt
def get_primers_txt_file_as_dict(file_path, run=run, progress=progress):
"""Primers-txt is an anvi'o artifact for primer sequencs."""
filesnpaths.is_file_tab_delimited(file_path)
columns_found = get_columns_of_TAB_delim_file(file_path, include_first_column=True)
if 'name' not in columns_found:
progress.reset()
raise ConfigError("A primers-txt file should have a column that is called `name` for the primer name.")
if 'primer_sequence' not in columns_found:
progress.reset()
raise ConfigError("A primers-txt file should have a column that is called `primer_sequence` for the primer sequence.")
if len(columns_found) < 2:
progress.reset()
raise ConfigError("A primers-txt file should have at least two columns - one for primer names, and one for primer sequences.")
item_column = columns_found[0]
if item_column != 'name':
progress.reset()
raise ConfigError("The first column in your primers-txt file does not seem to be `name`. Anvi'o expects the first "
"column to have sequence names.")
primers_txt = get_TAB_delimited_file_as_dictionary(file_path)
return primers_txt
def get_groups_txt_file_as_dict(file_path, run=run, progress=progress):
"""Groups-txt is an anvi'o artifact associating items with groups. This function extracts this file into a set of dictionaries.
Note that it only extracts the first column of the file (which will contain the 'item' or 'sample' information and can have any
header - let's call these the items) and the 'group' column of the file. Then it will return the following:
Returns
=======
item_to_group_dict : dict
Dictionary in which keys are items and values are groups
group_to_item_dict : dict
Dictionary in which keys are groups and values are lists of items in that group
"""
filesnpaths.is_file_tab_delimited(file_path)
columns_found = get_columns_of_TAB_delim_file(file_path, include_first_column=True)
if 'group' not in columns_found:
raise ConfigError("A groups-txt file should have a single column that is called `group`.")
if len(columns_found) < 2:
raise ConfigError("A groups-txt file should have at least two columns - one for item names, and one for groups names.")
item_column = columns_found[0]
if item_column == 'group':
raise ConfigError("The first column in your groups-txt file appears to be called 'group'. Sadly, anvi'o rather rigidly "
"expects the first column to have item names, not group names, so you will have to re-format it. Sorry "
"for any inconvenience.")
groups_txt = get_TAB_delimited_file_as_dictionary(file_path)
group_to_item_dict = {}
item_to_group_dict = {}
for item in groups_txt:
group_name = groups_txt[item]['group']
if item in item_to_group_dict:
raise ConfigError(f"Uh oh. The item {item} occurs more than once in your groups-txt file. This could explode things "
f"downstream, so we will stop you right there. Please remove all duplicate items from this file. :)")
item_to_group_dict[item] = group_name
if not group_name in group_to_item_dict:
group_to_item_dict[group_name] = []
group_to_item_dict[group_name].append(item)
if len(group_to_item_dict.keys()) < 2:
raise ConfigError("We notice that there is only one group in your groups-txt file. In the current applications that require "
"a groups-txt, we expect to have at least two groups, so we think this is an error. If the context you are "
"working in should allow for only one group in this file, please feel free to let us know.")
return item_to_group_dict, group_to_item_dict
def get_TAB_delimited_file_as_dictionary(file_path, expected_fields=None, dict_to_append=None, column_names=None,\
column_mapping=None, indexing_field=0, separator='\t', no_header=False,\
ascii_only=False, only_expected_fields=False, assign_none_for_missing=False,\
none_value=None, empty_header_columns_are_OK=False, return_failed_lines=False):
"""Takes a file path, returns a dictionary.
- If `return_failed_lines` is True, it the function will not throw an exception, but instead
return a list of `failed_lines` along with a dictionary of final results.
"""
if expected_fields and (not isinstance(expected_fields, list) and not isinstance(expected_fields, set)):
raise ConfigError("'expected_fields' variable must be a list (or a set).")
if only_expected_fields and not expected_fields:
raise ConfigError("'only_expected_fields' variable guarantees that there are no more fields present "
"in the input file but the ones requested with 'expected_fields' variable. If you "
"need to use this flag, you must also be explicit about what fields you expect to "
"find in the file.")
filesnpaths.is_file_plain_text(file_path)
filesnpaths.is_file_tab_delimited(file_path, separator=separator)
failed_lines = []
column_mapping_for_line_failed = None
f = open(file_path, 'rU')
# learn the number of fields and reset the file:
num_fields = len(f.readline().strip('\n').split(separator))
f.seek(0)
# if there is no file header, make up a columns list:
if no_header and not column_names:
column_names = ['column_%05d' % i for i in range(0, num_fields)]
if column_names:
columns = column_names
if num_fields != len(columns):
raise ConfigError("Number of column names declared (%d) differs from the number of columns "
"found (%d) in the matrix ('%s') :/" % (len(columns), num_fields, file_path))
# now we set the column names. if the file had its header, we must discard
# the first line. so here we go:
if not no_header:
f.readline()
else:
columns = f.readline().strip('\n').split(separator)
if not empty_header_columns_are_OK and min(map(len, columns)) == 0:
raise ConfigError("At least one of the column headers in your tab delimited file '%s' "
"is empty." % file_path)
if expected_fields:
for field in expected_fields:
if field not in columns:
raise ConfigError("The file '%s' does not contain the right type of header. It was expected "
"to have these: '%s', however it had these: '%s'" % (file_path,
', '.join(expected_fields),
', '.join(columns[1:])))
if only_expected_fields:
for field in columns:
if field not in expected_fields:
raise ConfigError("There are more fields in the file '%s' than the expected fields :/ "
"Anvi'o is telling you about this because get_TAB_delimited_file_as_dictionary "
"function is called with `only_expected_fields` flag turned on." % (file_path))
d = {}
line_counter = 0
for line in f.readlines():
if ascii_only:
if not is_ascii_only(line):
raise ConfigError("The input file conitans non-ascii characters at line number %d. Those lines "
"either should be removed, or edited." % (line_counter + 2))
line_fields = [f if f else None for f in line.strip('\n').split(separator)]
if line_fields and line_fields[0] == None:
raise ConfigError("The line number %d in '%s' has no data in its first column, and this doesn't "
"seem right at all :/" % (line_counter + 1, file_path))
if column_mapping:
column_mapping_for_line_failed = False
updated_line_fields = []
for i in range(0, len(line_fields)):
try:
if line_fields[i] == None and column_mapping[i] in [float, int]:
updated_line_fields.append(column_mapping[i](0))
else:
updated_line_fields.append(column_mapping[i](line_fields[i]))
except NameError:
if return_failed_lines:
failed_lines.append(line_counter + 1)
column_mapping_for_line_failed = True
else:
raise ConfigError("Mapping function '%s' did not work on value '%s'. These functions can be native "
"Python functions, such as 'str', 'int', or 'float', or anonymous functions "
"defined using lambda notation." % (column_mapping[i], line_fields[i]))
except TypeError:
if return_failed_lines:
failed_lines.append(line_counter + 1)
column_mapping_for_line_failed = True
else:
raise ConfigError("Mapping function '%s' does not seem to be a proper Python function :/" % column_mapping[i])
except ValueError:
if return_failed_lines:
failed_lines.append(line_counter + 1)
column_mapping_for_line_failed = True
else:
raise ConfigError("Mapping function '%s' did not like the value '%s' in column number %d "
"of the input matrix '%s' :/" % (column_mapping[i], line_fields[i], i + 1, file_path))
line_fields = updated_line_fields
if column_mapping_for_line_failed:
continue
if indexing_field == -1:
entry_name = 'line__%09d__' % line_counter
else:
entry_name = line_fields[indexing_field]
if entry_name in d:
raise ConfigError("The entry name %s appears more than once in the TAB-delimited file '%s'. We assume that you "
"did not do it that purposefully, but if you need this file in this form, then feel free to "
"contact us so we can try to find a solution for you. But if you have gotten this error while "
"working with HMMs, do not contact us since helping you in that case is beyond us (see the issue "
"#1206 for details))." % (entry_name, file_path))
d[entry_name] = {}
for i in range(0, len(columns)):
if i == indexing_field:
continue
d[entry_name][columns[i]] = line_fields[i]
line_counter += 1
# we have the dict, but we will not return it the way it is if its supposed to be appended to an
# already existing dictionary.
if dict_to_append:
# we don't want to through keys in d each time we want to add stuff to 'dict_to_append', so we keep keys we
# find in the first item in the dict in another variable. this is potentially very dangerous if not every
# item in 'd' has identical set of keys.
keys = list(d.values())[0].keys()
for entry in dict_to_append:
if entry not in d:
# so dict to append is missing a key that is in the dict to be appended. if the user did not
# ask us to add None for these entries via none_for_missing, we are going to make a noise,
# otherwise we will tolerate it.
if not assign_none_for_missing:
raise ConfigError("Appending entries to the already existing dictionary from file '%s' failed "
"as the entry %s does not appear to be in the file." % (file_path, entry))
else:
for key in keys:
dict_to_append[entry][key] = none_value
else:
for key in keys:
dict_to_append[entry][key] = d[entry][key]
return dict_to_append
# this is here for backward compatibility.
failed_lines = list(set(failed_lines)) # confirming we are not printing multiple instances of the same line
if return_failed_lines:
return d, failed_lines
return d
def get_filtered_dict(input_dict, item, accepted_values_set):
# removes any entry from d, where the value of the 'item' of items in d does not match
# with 'accepted_values'
if not isinstance(accepted_values_set, type(set([]))):
raise ConfigError("get_filtered_dict: values must be type of set([]).")
filtered_dict = {}
for entry_id in input_dict:
if input_dict[entry_id][item] not in accepted_values_set:
continue
else:
filtered_dict[entry_id] = input_dict[entry_id]
return filtered_dict
def anvio_hmm_target_term_to_alphabet_and_context(target):
"""Alphabet and context recovery from the target term in anvi'o HMM source directories."""
alphabet = None
context = None
fields = target.split(':')
if len(fields) == 2:
alphabet, context = fields
elif len(fields) == 1:
alphabet = fields[0]
else:
raise ConfigError("HMM stuff is upset with you. There are unexpected number of fields in the target "
"file.")
if alphabet not in ['AA', 'DNA', 'RNA']:
raise ConfigError("The alphabet in the target file (%s) isnot one of the alphabets anvi'o knows how to "
"work with. Here is a list for you to choose from: 'DNA', 'RNA', or 'AA'" % alphabet)
if context not in ['GENE', 'CONTIG', None]:
raise ConfigError("The context you defined in the target file (%s) does not make any sense to anvi'o. "
"It would have, if you had chosen one of these: 'GENE', 'CONTIG'." % context)
if alphabet == 'AA' and context == 'CONTIG':
raise ConfigError("You can't use the AA alphabet with the CONTIGS context :/ You need to set your target "
"again. 'AA' or 'AA:GENE' would have worked much better.")
if not context:
context = 'GENE'
return alphabet, context
def get_pruned_HMM_hits_dict(hmm_hits_dict):
"""This function will identify HMM hits that are almost identical and keep only the most significant hit.
This is an example situation where this problem occurs:
http://i.imgur.com/2ZxDchp.png
And this is how that context looks like after this function does its magic:
http://i.imgur.com/cAPKR0E.png
The data shown in the first screenshot resolves to an input dictionary like this one:
{
1: {'entry_id': 0, 'gene_name': 'Bacterial_23S_rRNA','contig_name': 'c_split_00001', 'start': 3175, 'stop': 267, 'e_value': 0.0},
2: {'entry_id': 1, 'gene_name': 'Bacterial_16S_rRNA','contig_name': 'c_split_00001', 'start': 4996, 'stop': 3439, 'e_value': 0.0},
3: {'entry_id': 2, 'gene_name': 'Archaeal_23S_rRNA', 'contig_name': 'c_split_00001', 'start': 3162, 'stop': 275, 'e_value': 0.0},
4: {'entry_id': 3, 'gene_name': 'Archaeal_16S_rRNA', 'contig_name': 'c_split_00001', 'start': 4988, 'stop': 3441, 'e_value': 7.7e-240}
}
where entry 1 and entry 2 should be removed (becuse they overlap witth 3 and 4, respectively, and they are shorter).
"""
# first create a simpler data structure where all hits in a single contig are accessible directly.
hits_per_contig = {}
for entry in hmm_hits_dict:
e = hmm_hits_dict[entry]
contig_name = e['contig_name']
start = e['start'] if e['start'] < e['stop'] else e['stop']
stop = e['stop'] if e['start'] < e['stop'] else e['start']
length = stop - start
if contig_name not in hits_per_contig:
hits_per_contig[contig_name] = []
hits_per_contig[contig_name].append((length, entry, start, stop), )
# go through hits in each contig to find overlapping hits
entry_ids_to_remove = set([])
for hits in hits_per_contig.values():
indices_with_matches = set([])
for i in range(0, len(hits)):
if i in indices_with_matches:
# this one is already processed and is matching
# with something else. no need to waste time
continue
overlapping_hits_indices = set([])
for j in range(i + 1, len(hits)):
alignment_start = max(hits[i][2], hits[j][2])
alignment_end = min(hits[i][3], hits[j][3])
shortest_of_the_two = min(hits[i][0], hits[j][0])
if alignment_end - alignment_start > shortest_of_the_two / 2:
# the overlap between these two is more than the half of the lenght of the
# shorter one. this is done
overlapping_hits_indices.add(i)
overlapping_hits_indices.add(j)
indices_with_matches.add(j)
if overlapping_hits_indices:
# here we have a set of overlapping indices. we will ort them based on length,
# and add the entry id of every match except the longest one into the shitkeeping
# variable
[entry_ids_to_remove.add(r) for r in sorted([hits[ind][1] for ind in overlapping_hits_indices], reverse=True)[1:]]
# time to remove all the entry ids from the actual dictionary
for entry_id in entry_ids_to_remove:
hmm_hits_dict.pop(entry_id)
return hmm_hits_dict
def get_HMM_sources_dictionary(source_dirs=[]):
"""An anvi'o HMM source directory importer.
The directory must have five files:
- genes.hmm.gz: compressed HMM for each gene.
- genes.txt: three column file lists all gene names appear in the genes.hmm.gz, accession numbers if there
are any, and HMM source for those.
- kind.txt: the kind of genes are there in this source. i.e., 'antibiotic_genes', or 'transporters'. the
term 'singlecopy' is a special one, and should be used with a domain term: 'singlecopy:bacteria',
'singlecopy:archaea', etc. Anvi'o utilizes single-copy sources to assess the completion of MAGs
later.
- reference.txt: Where is it coming from?
- target.txt: the target term. see `anvio_hmm_target_term_to_alphabet_and_context` for details.
- noise_cutoff_terms.txt: how the noisy hits should be dealt with? see this for details: https://github.com/merenlab/anvio/issues/498
For an example HMM source directory, take a look at an example in the codebase:
https://github.com/meren/anvio/tree/master/anvio/data/hmm/Campbell_et_al
"""
if not isinstance(source_dirs, type([])):
raise ConfigError("source_dirs parameter must be a list (get_HMM_sources_dictionary).")
sources = {}
allowed_chars_for_proper_sources = constants.allowed_chars.replace('.', '').replace('-', '')
PROPER = lambda w: not len([c for c in w if c not in allowed_chars_for_proper_sources]) \
and len(w) >= 3 \
and w[0] not in '_0123456789'
R = lambda f: open(os.path.join(source, f), 'rU').readlines()[0].strip()
for source in source_dirs:
if source.endswith('/'):
source = source[:-1]
if not PROPER(os.path.basename(source)):
raise ConfigError(f"One of the search database directories ({os.path.basename(source)}) contains characters "
"in its name anvio does not like. Directory names should be at least three characters long "
"and must not contain any characters but ASCII letters, digits and underscore")
expected_files = ['reference.txt', 'kind.txt', 'genes.txt', 'genes.hmm.gz', 'target.txt', 'noise_cutoff_terms.txt']
missing_files = [f for f in expected_files if not os.path.exists(os.path.join(source, f))]
if missing_files:
raise ConfigError(f"The HMM source '{os.path.basename(source)}' makes anvi'o unhappy. Each HMM source directory "
f"must contain a specific set of {len(expected_files)} files, and nothing more. See this URL "
f"for detailes: http://merenlab.org/software/anvio/help/artifacts/hmm-source/")
empty_files = [f for f in expected_files if os.stat(os.path.join(source, f)).st_size == 0]
if empty_files:
raise ConfigError("One or more files for the HMM source '%s' seems to be empty. Which creates lots of "
"counfusion around these parts of the code. Anvi'o could set some defualts for you, "
"but it would be much better if you set your own defaults explicitly. You're not "
"sure what would make a good default for your HMM collection? Reach out to "
"a developer, and they will help you! Here are the files that are empty: %s." % \
(os.path.basename(source), ', '.join(empty_files)))
ref = R('reference.txt')
kind = R('kind.txt')
target = R('target.txt')
noise_cutoff_terms = R('noise_cutoff_terms.txt')
anvio_hmm_target_term_to_alphabet_and_context(target)
domain = None
if kind == 'singlecopy' and kind.count(':') == 0:
raise ConfigError("This HMM profile seems to be a collection of single-copy core genes. Great. But for "
"this kind, you must also declare a 'domain' in your 'kind.txt' file. It is simple. "
"For instance, you could use 'singlecopy:bacteria', or 'singlecopy:archaea', or "
"'singlecopy:myspecificbranch'.")
if kind.count(':') == 1:
kind, domain = kind.split(':')
if not PROPER(kind):
raise ConfigError("'kind.txt' defines the kind of search this database offers. The kind term must be a single "
"word that is at least three characters long, and must not contain any characters but "
"ASCII letters, digits, and underscore. Here are some nice examples: 'singlecopy', "
"or 'pathogenicity', or 'noras_selection'. But yours is '%s'." % (kind))
if domain and not PROPER(domain):
raise ConfigError("That's lovely that you decided to specify a domain extension for your HMM collection in the "
"'kind.txt'. Although, your domain term is not a good one, as it must be a single "
"word that is at least three characters long, and without any characters but "
"ASCII letters, digits, and underscore. Confused? That's fine. Send an e-mail to the anvi'o "
"developers, and they will help you!")
genes = get_TAB_delimited_file_as_dictionary(os.path.join(source, 'genes.txt'), column_names=['gene', 'accession', 'hmmsource'])
sanity_check_hmm_model(os.path.join(source, 'genes.hmm.gz'), genes)
sources[os.path.basename(source)] = {'ref': ref,
'kind': kind,
'domain': domain,
'genes': list(genes.keys()),
'target': target,
'noise_cutoff_terms': noise_cutoff_terms,
'model': os.path.join(source, 'genes.hmm.gz')}
return sources
def check_misc_data_keys_for_format(data_keys_list):
"""Ensure user-provided misc data keys are compatible with the current version of anvi'o"""
if not data_keys_list:
return
# find out whether the user data contains the older implementation of stacked
# bar data type
obsolete_stackedbar_keys = [k for k in data_keys_list if k.find('!') > -1 and k.find(';') > -1]
if len(obsolete_stackedbar_keys):
key_violates_new_rule = obsolete_stackedbar_keys[0]
main_key, data_items = key_violates_new_rule.split('!')
new_rule_compatible_data_keys = ['%s!%s' % (main_key, d) for d in data_items.split(';')]
raise ConfigError("Oh no :( We recently changed the description of the stacked bar data type, and your input data "
"file still has the older version. Here is the list of those that are violating the new format: "
"%s. To avoid this issue and to turn them into the new format, you could take '%s', and present "
"it as %d separate TAB-delimited entries that look like this: %s. Sorry!" % \
(', '.join(['"%s"' % k for k in obsolete_stackedbar_keys]),
key_violates_new_rule,
len(new_rule_compatible_data_keys),
', '.join(['"%s"' % k for k in new_rule_compatible_data_keys])))
def sanity_check_hmm_model(model_path, genes):
genes = set(genes)
genes_in_model = set([])
accession_ids_in_model = []
with gzip.open(model_path, 'rt', encoding='utf-8') as f:
for line in f:
if line.startswith('NAME'):
genes_in_model.add(line.split()[1])
if line.startswith('ACC'):
accession_ids_in_model.append(line.split()[1])
if len(accession_ids_in_model) != len(set(accession_ids_in_model)):
raise ConfigError("Accession IDs in your HMM model should be unique, however, the `genes.hmm.gz` "
"file for `%s` seems to have the same accession ID (the line that starts with `ACC`) "
"more than once :(" % (os.path.abspath(model_path).split('/')[-2]))
if len(genes.difference(genes_in_model)):
raise ConfigError("Some gene names in genes.txt file does not seem to be appear in genes.hmm.gz. "
"Here is a list of missing gene names: %s" % ', '.join(list(genes.difference(genes_in_model))))
if len(genes_in_model.difference(genes)):
raise ConfigError("Some gene names in genes.hmm.gz file does not seem to be appear in genes.txt. "
"Here is a list of missing gene names: %s" % ', '.join(list(genes_in_model.difference(genes))))
def get_missing_programs_for_hmm_analysis():
missing_programs = []
for p in ['prodigal', 'hmmscan']:
try:
is_program_exists(p)
except ConfigError:
missing_programs.append(p)
return missing_programs
def get_genes_database_path_for_bin(profile_db_path, collection_name, bin_name):
if not collection_name or not bin_name:
raise ConfigError("Genes database must be associated with a collection name and a bin name :/")
return os.path.join(os.path.dirname(profile_db_path), 'GENES', '%s-%s.db' % (collection_name, bin_name))
def get_db_type_and_variant(db_path, dont_raise=False):
database = dbi(db_path, dont_raise=dont_raise)
return (database.db_type, database.variant)
def get_db_type(db_path):
return get_db_type_and_variant(db_path)[0]
def get_db_variant(db_path):
return get_db_type_and_variant(db_path)[1]
def get_required_version_for_db(db_path):
db_type = get_db_type(db_path)
if db_type not in t.versions_for_db_types:
raise ConfigError("Anvi'o was trying to get the version of the -alleged- anvi'o database '%s', but it failed "
"because it turns out it doesn't know anything about this '%s' type." % (db_path, db_type))
return t.versions_for_db_types[db_type]
def get_all_sample_names_from_the_database(db_path):
"""Returns all 'sample' names from a given database. At least it tries."""
db_type = get_db_type(db_path)
database = db.DB(db_path, get_required_version_for_db(db_path))
if db_type == 'profile':
samples = []
try:
samples = [s.strip() for s in database.get_meta_value('samples').split(',')]
except:
pass
return set(samples)
elif db_type == 'genes':
return set([str(i) for i in database.get_single_column_from_table(t.gene_level_coverage_stats_table_name, 'sample_name')])
elif db_type == 'pan':
internal_genome_names, external_genome_names = [], []
try:
internal_genome_names = [g.strip() for g in database.get_meta_value('internal_genome_names').split(',')]
except:
pass
try:
external_genome_names = [g.strip() for g in database.get_meta_value('external_genome_names').split(',')]
except:
pass
return set([s for s in internal_genome_names + external_genome_names if s])
else:
raise ConfigError("`get_all_sample_names_from_the_database` function does not know how to deal "
"with %s databases." % db_type)
def get_all_item_names_from_the_database(db_path, run=run):
"""Return all split names or gene cluster names in a given database"""
all_items = set([])
database = db.DB(db_path, get_required_version_for_db(db_path))
db_type = database.get_meta_value('db_type')
if db_type == 'profile':
if is_blank_profile(db_path):
run.warning("Someone asked for the split names in a blank profile database. Sadly, anvi'o does not keep track "
"of split names in blank profile databases. This function will return an empty set as split names "
"to not kill your mojo, but whatever you were trying to do will not work :(")
return set([])
else:
all_items = set(database.get_single_column_from_table('mean_coverage_Q2Q3_splits', 'item'))
elif db_type == 'pan':
all_items = set(database.get_single_column_from_table(t.pan_gene_clusters_table_name, 'gene_cluster_id'))
elif db_type == 'contigs':
all_items = set(database.get_single_column_from_table(t.splits_info_table_name, 'split'))
elif db_type == 'genes':
all_items = set([str(i) for i in database.get_single_column_from_table(t.gene_level_coverage_stats_table_name, 'gene_callers_id')])
else:
database.disconnect()
raise ConfigError("You wanted to get all items in the database %s, but no one here knows about its type. Seriously,\
what is '%s' anyway?" % (db_path, db_type))
if not len(all_items):
database.disconnect()
raise ConfigError("utils::get_all_item_names_from_the_database speaking. Something that should never happen happened :/ "
"There seems to be nothing in this %s database. Anvi'o is as confused as you are. Please get in touch "
"with a developer. They will love this story." % db_path)
database.disconnect()
return all_items
def get_variability_table_engine_type(table_path, dont_raise=False):
"""A non-extensive test to determine if a file was generated by anvi-gen-variability-profile,
and if it was, what engine (NT, CDN, or AA) was used.
"""
filesnpaths.is_file_tab_delimited(table_path)
columns_names = set(pd.read_csv(table_path, sep="\t", nrows = 0).columns)
if set(constants.nucleotides) < columns_names:
return "NT"
elif set(constants.codons) < columns_names:
return "CDN"
elif set(constants.amino_acids) < columns_names:
return "AA"
else:
if dont_raise:
return ""
raise ConfigError("anvi'o does not recognize %s as being a variability table generated by "
"anvi-gen-variability-profile." % table_path)
def is_contigs_db(db_path, dont_raise=False):
dbi(db_path, expecting='contigs', dont_raise=dont_raise)
return True
def is_trnaseq_db(db_path):
dbi(db_path, expecting='trnaseq')
return True
def is_pan_or_profile_db(db_path, genes_db_is_also_accepted=False):
ok_db_types = ['pan', 'profile'] + (['genes'] if genes_db_is_also_accepted else [])
dbi(db_path, expecting=ok_db_types)
return True
def is_profile_db(db_path):
dbi(db_path, expecting='profile')
return True
def is_structure_db(db_path):
dbi(db_path, expecting='structure')
return True
def is_blank_profile(db_path):
database = dbi(db_path, dont_raise=True)
if database.db_type != 'profile':
return False
return database.blank
def is_pan_db(db_path):
dbi(db_path, expecting='pan')
return True
def is_genome_storage(db_path):
dbi(db_path, expecting='genomestorage')
return True
def is_genes_db(db_path):
dbi(db_path, expecting='genes')
return True
def is_gene_caller_id(gene_caller_id, raise_if_fail=True):
"""Test whether a given `gene_caller_id` looks like a legitimate anvi'o gene caller id"""
try:
assert(int(gene_caller_id) >= 0)
except:
if raise_if_fail:
raise ConfigError(f"Anvi'o gene caller ids are represented by integers between 0 and infinity. "
f"and what you provided ('{gene_caller_id}') doesn't look like one :/")
else:
return False
return True
def is_kegg_modules_db(db_path):
dbi(db_path, expecting='modules')
return True
def is_profile_db_merged(profile_db_path):
return dbi(profile_db_path, expecting='profile').merged
def is_profile_db_and_contigs_db_compatible(profile_db_path, contigs_db_path):
pdb = dbi(profile_db_path)
cdb = dbi(contigs_db_path)
if cdb.hash != pdb.hash:
raise ConfigError(f"The contigs database and the profile database at '{profile_db_path}' "
f"does not seem to be compatible. More specifically, this contigs "
f"database is not the one that was used when %s generated this profile "
f"database (%s != %s)." % ('anvi-merge' if pdb.merged else 'anvi-profile', cdb.hash, pdb.hash))
return True
def is_structure_db_and_contigs_db_compatible(structure_db_path, contigs_db_path):
sdb = dbi(structure_db_path)
cdb = dbi(contigs_db_path)
if cdb.hash != sdb.hash:
raise ConfigError('The contigs and structure databases do not seem compatible. '
'More specifically, the contigs database is not the one that '
'was used when the structure database was created (%s != %s).'\
% (cdb.hash, sdb.hash))
return True
# # FIXME
# def is_external_genomes_compatible_with_pan_database(pan_db_path, external_genomes_path):
def get_enriched_groups(props, reps):
'''
Accepts a vector of proportions and number of replicates per group and
returns a boolean vector where each group that has proportion above
the "expected" (i.e. the overall proportion) is True and the rest are False.
'''
# if the function doesn't occur at all then test_statistic is zero and p-value is 1
if not np.count_nonzero(props):
return np.zeros(len(props))
overall_portion = np.sum(np.multiply(props, reps)) / np.sum(reps)
return props > overall_portion
def get_yaml_as_dict(file_path):
"""YAML parser"""
filesnpaths.is_file_plain_text(file_path)
try:
return yaml.load(open(file_path), Loader=yaml.FullLoader)
except Exception as e:
raise ConfigError(f"Anvi'o run into some trouble when trying to parse the file at "
f"{file_path} as a YAML file. It is likely that it is not a properly "
f"formatted YAML file and it needs editing, but here is the error "
f"message in case it clarifies things: '{e}'.")
def download_file(url, output_file_path, check_certificate=True, progress=progress, run=run):
filesnpaths.is_output_file_writable(output_file_path)
try:
if check_certificate:
response = urllib.request.urlopen(url)
else:
response = urllib.request.urlopen(url, context=ssl._create_unverified_context())
except Exception as e:
raise ConfigError(f"Something went wrong with your download attempt. Here is the "
f"problem for the url {url}: '{e}'")
file_size = 0
if 'Content-Length' in response.headers:
file_size = int(response.headers['Content-Length'])
f = open(output_file_path, 'wb')
progress.new('Downloading "%s"' % os.path.basename(output_file_path))
progress.update('...')
downloaded_size = 0
counter = 0
while True:
buffer = response.read(10000)
if buffer:
downloaded_size += len(buffer)
f.write(buffer)
if counter % 500 == 0:
if file_size:
progress.update('%.1f%%' % (downloaded_size * 100.0 / file_size))
else:
progress.update('%s' % human_readable_file_size(downloaded_size))
else:
break
counter += 1
f.close()
progress.end()
run.info('Downloaded successfully', output_file_path)
def get_remote_file_content(url, gzipped=False, timeout=None):
import requests
from io import BytesIO
if timeout:
remote_file = requests.get(url, timeout=timeout)
else:
remote_file = requests.get(url)
if remote_file.status_code == 404:
raise ConfigError("Bad news. The remote file at '%s' was not found :(" % url)
if gzipped:
buf = BytesIO(remote_file.content)
fg = gzip.GzipFile(fileobj=buf)
return fg.read().decode('utf-8')
return remote_file.content.decode('utf-8')
def get_anvio_news():
"""Reads news from anvi'o repository.
The format of the news file is expected to be like this:
# Title with spaces (01.01.1970) #
Lorem ipsum, dolor sit amet
***
# Title with spaces (01.01.1970) #
Lorem ipsum, dolor sit amet
***
# Title with spaces (01.01.1970) #
Lorem ipsum, dolor sit amet
Returns
=======
news : list
A list of dictionaries per news item
"""
try:
news = get_remote_file_content(constants.anvio_news_url, timeout=1)
except Exception as e:
raise ConfigError(f"Something went wrong reading the anvi'o news :/ This is what the "
f"downstream library had to say: {e}")
news_items = []
for news_item in news.split('***'):
if len(news_item) < 5:
# too short to parse, just skip it
continue
news_items.append({'date': news_item.split("(")[1].split(")")[0].strip(),
'title': news_item.split("#")[1].split("(")[0].strip(),
'content': news_item.split("#\n")[1].strip()})
return news_items
def download_protein_structure(protein_code, output_path=None, chain=None, raise_if_fail=True):
"""Downloads protein structures using Biopython.
Parameters
==========
protein_code : str
Each element is a 4-letter protein code
output_path : str
Path where structure is written to. Temporary directory is chosen if None
chain : str, None
If None, all chains remain in the PDB file. If specified, only the chain with the chain ID
`chain` will be saved.
raise_if_fail : bool, True
If the file does not download, raise an error
Returns
=======
output : output_path
Returns the filepath of the written file. Returns None if download failed
"""
output_dir = os.path.dirname(output_path)
if output_dir == '': output_dir = '.'
pdb_list = PDB.PDBList()
# NOTE This path is determined by Biopython's fn `pdb_list.retive_pdb_file`. If the logic in
# that function that determines the path name is changed, `download_protein_structure` will
# break because `temp_output_path` will be wrong.
temp_output_path = os.path.join(output_dir, f"pdb{protein_code.lower()}.ent")
try:
with SuppressAllOutput():
# We suppress output that looks like this:
# >>> WARNING: The default download format has changed from PDB to PDBx/mmCif
# >>> Downloading PDB structure '5w6y'...
pdb_list.retrieve_pdb_file(protein_code, file_format='pdb', pdir=output_dir, overwrite=True)
except:
pass
if not filesnpaths.is_file_exists(temp_output_path, dont_raise=True):
# The file wasn't downloaded
if raise_if_fail:
raise ConfigError("The protein %s could not be downloaded. Are you connected to internet?" % protein_code)
else:
return None
if chain is not None:
class ChainSelect(PDB.Select):
def accept_chain(self, chain_obj):
return 1 if chain_obj.get_id() == chain else 0
p = PDB.PDBParser()
try:
structure = p.get_structure(None, temp_output_path)
except:
# FIXME Something very rare happened on Biopython's end. We silently return the whole
# file instead of only the chain. Here is one such reason for failure we stumbled upon:
# https://github.com/biopython/biopython/issues/2819
shutil.move(temp_output_path, output_path)
return output_path
# Overwrite file with chain-only structure
io = PDB.PDBIO()
io.set_structure(structure)
io.save(temp_output_path, ChainSelect())
shutil.move(temp_output_path, output_path)
return output_path
def get_hash_for_list(l):
return 'hash' + str(hashlib.sha224(''.join(sorted(list(l))).encode('utf-8')).hexdigest()[0:8])
def get_file_md5(file_path):
hash_md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def run_selenium_and_export_svg(url, output_file_path, browser_path=None, run=run):
if filesnpaths.is_file_exists(output_file_path, dont_raise=True):
raise FilesNPathsError("The output file already exists. Anvi'o does not like overwriting stuff.")
filesnpaths.is_output_file_writable(output_file_path)
try:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
except:
raise ConfigError("You want to export SVGs? Well, you need the Python library 'selenium' to be able to "
"do that but you don't have it. If you are lucky, you probably can install it by "
"typing 'pip install selenium' or something :/")
if browser_path:
filesnpaths.is_file_exists(browser_path)
run.info_single('You are launching an alternative browser. Keep an eye on things!', mc='red', nl_before=1)
driver = webdriver.Chrome(executable_path=browser_path)
else:
driver = webdriver.Chrome()
driver.wait = WebDriverWait(driver, 10)
driver.set_window_size(1920, 1080)
driver.get(url)
try:
WebDriverWait(driver, 300).until(EC.text_to_be_present_in_element((By.ID, "title-panel-second-line"), "Current view"))
except TimeoutException:
print("Timeout occured, could not get the SVG drawing in 600 seconds.")
driver.quit()
time.sleep(1)
driver.execute_script("exportSvg(true);")
time.sleep(1)
svg = driver.find_element_by_id('panel-center')
svg_file = open(output_file_path, 'w')
svg_file.write(svg.get_attribute('innerHTML'))
svg_file.close()
driver.quit()
run.info_single('\'%s\' saved successfully.' % output_file_path)
def open_url_in_browser(url, browser_path=None, run=run):
if browser_path:
filesnpaths.is_file_exists(browser_path)
run.info_single('You are launching an alternative browser. Keep an eye on things!', mc='red', nl_before=1)
webbrowser.register('users_preferred_browser', None, webbrowser.BackgroundBrowser(browser_path))
webbrowser.get('users_preferred_browser').open_new(url)
else:
webbrowser.open_new(url)
def check_h5py_module():
"""To make sure we do have the h5py module.
The reason this function is here is becasue we removed h5py from anvi'o dependencies,
but some migration scripts may still need it if the user has very old databases. In
those cases the user must install it manually."""
try:
import h5py
h5py.__version__
except:
raise ConfigError("There is an issue but it is easy to resolve and everything is fine! To continue, please "
"first install the Python module `h5py` by running `pip install h5py==2.8.0` in your "
"anvi'o environment. The reason why the standard anvi'o package does not include "
"this module is both complicated and really unimportant. Re-running the migration "
"after `h5py` is installed will make things go smootly.")
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def RepresentsFloat(s):
try:
float(s)
return True
except ValueError:
return False
class Mailer:
def __init__(self, from_address='admin@localhost', server_address='localhost', server_port=25,
init_tls=False, username=None, password=None, run=Run(verbose=False),
progress=Progress(verbose=False)):
self.from_address = from_address
self.server_address = server_address
self.server_port = server_port
self.init_tls = init_tls
self.username = username
self.password = password
self.server = None
self.config_ini_path = None
self.run = run
self.progress = progress
self.config_template = {
'SMTP': {
'from_address': {'mandatory': True, 'test': lambda x: str(x)},
'server_address': {'mandatory': True, 'test': lambda x: str(x)},
'server_port': {'mandatory': True, 'test': lambda x: RepresentsInt(x) and int(x) > 0, 'required': 'an integer'},
'init_tls': {'mandatory': True, 'test': lambda x: x in ['True', 'False'], 'required': 'True or False'},
'username': {'mandatory': True, 'test': lambda x: str(x)},
'password': {'mandatory': True, 'test': lambda x: str(x)},
},
}
def init_from_config(self, config_ini_path):
def get_option(self, config, section, option, cast):
try:
return cast(config.get(section, option).strip())
except configparser.NoOptionError:
return None
filesnpaths.is_file_exists(config_ini_path)
self.config_ini_path = config_ini_path
config = configparser.ConfigParser()
try:
config.read(self.config_ini_path)
except Exception as e:
raise ConfigError("Well, the file '%s' does not seem to be a config file at all :/ Here "
"is what the parser had to complain about it: %s" % (self.config_ini_path, e))
section = 'SMTP'
if section not in config.sections():
raise ConfigError("The config file '%s' does not seem to have an 'SMTP' section, which "
"is essential for Mailer class to learn server and authentication "
"settings. Please check the documentation to create a proper config "
"file." % self.config_ini_path)
for option, value in config.items(section):
if option not in list(self.config_template[section].keys()):
raise ConfigError('Unknown option, "%s", under section "%s".' % (option, section))
if 'test' in self.config_template[section][option] and not self.config_template[section][option]['test'](value):
if 'required' in self.config_template[section][option]:
r = self.config_template[section][option]['required']
raise ConfigError('Unexpected value ("%s") for option "%s", under section "%s". '
'What is expected is %s.' % (value, option, section, r))
else:
raise ConfigError('Unexpected value ("%s") for option "%s", under section "%s".' % (value, option, section))
self.run.warning('', header="SMTP Configuration is read", lc='cyan')
for option, value in config.items(section):
self.run.info(option, value if option != 'password' else '*' * len(value))
setattr(self, option, value)
def test(self):
self.connect()
self.disconnect()
def connect(self):
if not self.server_address or not self.server_port:
raise ConfigError("SMTP server has not been configured to send e-mails :/")
try:
self.server = smtplib.SMTP(self.server_address, self.server_port)
if self.init_tls:
self.server.ehlo()
self.server.starttls()
if self.username:
self.server.login(self.username, self.password)
except Exception as e:
raise ConfigError("Something went wrong while connecting to the SMTP server :/ This is what we "
"know about the problem: %s" % e)
def disconnect(self):
if self.server:
self.server.quit()
self.server = None
def send(self, to, subject, content):
self.progress.new('E-mail')
self.progress.update('Establishing a connection ..')
self.connect()
self.progress.update('Preparing the package ..')
msg = MIMEText(content)
msg['To'] = to
msg['Subject'] = subject
msg['From'] = self.from_address
msg['Reply-to'] = self.from_address
try:
self.progress.update('Sending the e-mail to "%s" ..' % to)
self.server.sendmail(self.from_address, [to], msg.as_string())
except Exception as e:
self.progress.end()
raise ConfigError("Something went wrong while trying to connet send your e-mail :( "
"This is what we know about the problem: %s" % e)
self.progress.update('Disconnecting ..')
self.disconnect()
self.progress.end()
self.run.info('E-mail', 'Successfully sent to "%s"' % to)
| meren/anvio | anvio/utils.py | Python | gpl-3.0 | 183,298 | [
"BLAST",
"Bioconda",
"Bioconductor",
"Biopython"
] | b771c73387d078afedae5c123c958986ad749dd352e6c5490cea94f0f2dbd09d |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import torch.nn as nn
from zoo.orca.automl.model.base_pytorch_model import PytorchBaseModel, \
PYTORCH_REGRESSION_LOSS_MAP
import numpy as np
class LSTMSeq2Seq(nn.Module):
def __init__(self,
input_feature_num,
future_seq_len,
output_feature_num,
lstm_hidden_dim=128,
lstm_layer_num=2,
dropout=0.25,
teacher_forcing=False):
super(LSTMSeq2Seq, self).__init__()
self.lstm_encoder = nn.LSTM(input_size=input_feature_num,
hidden_size=lstm_hidden_dim,
num_layers=lstm_layer_num,
dropout=dropout,
batch_first=True)
self.lstm_decoder = nn.LSTM(input_size=output_feature_num,
hidden_size=lstm_hidden_dim,
num_layers=lstm_layer_num,
dropout=dropout,
batch_first=True)
self.fc = nn.Linear(in_features=lstm_hidden_dim, out_features=output_feature_num)
self.future_seq_len = future_seq_len
self.output_feature_num = output_feature_num
self.teacher_forcing = teacher_forcing
def forward(self, input_seq, target_seq=None):
x, (hidden, cell) = self.lstm_encoder(input_seq)
# input feature order should have target dimensions in the first
decoder_input = input_seq[:, -1, :self.output_feature_num]
decoder_input = decoder_input.unsqueeze(1)
decoder_output = []
for i in range(self.future_seq_len):
decoder_output_step, (hidden, cell) = self.lstm_decoder(decoder_input, (hidden, cell))
out_step = self.fc(decoder_output_step)
decoder_output.append(out_step)
if not self.teacher_forcing or target_seq is None:
# no teaching force
decoder_input = out_step
else:
# with teaching force
decoder_input = target_seq[:, i:i+1, :]
decoder_output = torch.cat(decoder_output, dim=1)
return decoder_output
def model_creator(config):
return LSTMSeq2Seq(input_feature_num=config["input_feature_num"],
output_feature_num=config["output_feature_num"],
future_seq_len=config["future_seq_len"],
lstm_hidden_dim=config.get("lstm_hidden_dim", 128),
lstm_layer_num=config.get("lstm_layer_num", 2),
dropout=config.get("dropout", 0.25),
teacher_forcing=config.get("teacher_forcing", False))
def optimizer_creator(model, config):
return getattr(torch.optim, config.get("optim", "Adam"))(model.parameters(),
lr=config.get("lr", 0.001))
def loss_creator(config):
loss_name = config.get("loss", "mse")
if loss_name in PYTORCH_REGRESSION_LOSS_MAP:
loss_name = PYTORCH_REGRESSION_LOSS_MAP[loss_name]
else:
raise RuntimeError(f"Got \"{loss_name}\" for loss name,\
where \"mse\", \"mae\" or \"huber_loss\" is expected")
return getattr(torch.nn, loss_name)()
class Seq2SeqPytorch(PytorchBaseModel):
def __init__(self, check_optional_config=False):
super().__init__(model_creator=model_creator,
optimizer_creator=optimizer_creator,
loss_creator=loss_creator,
check_optional_config=check_optional_config)
def _input_check(self, x, y):
if len(x.shape) < 3:
raise RuntimeError(f"Invalid data x with {len(x.shape)} dim where 3 dim is required.")
if len(y.shape) < 3:
raise RuntimeError(f"Invalid data y with {len(y.shape)} dim where 3 dim is required.")
if y.shape[-1] > x.shape[-1]:
raise RuntimeError(f"output dim should not larger than input dim,\
while we get {y.shape[-1]} > {x.shape[-1]}.")
def _forward(self, x, y):
self._input_check(x, y)
return self.model(x, y)
def _get_required_parameters(self):
return {
"input_feature_num",
"future_seq_len",
"output_feature_num"
}
def _get_optional_parameters(self):
return {
"lstm_hidden_dim",
"lstm_layer_num",
"teacher_forcing"
} | super()._get_optional_parameters()
| intel-analytics/analytics-zoo | pyzoo/zoo/chronos/model/Seq2Seq_pytorch.py | Python | apache-2.0 | 5,226 | [
"ORCA"
] | 96269bc924ddb14060157aca6f1cbc03fb410c2994b41e5632b50304ae2589cc |
########################################################################
# File : ServerUtils.py
# Author : Ricardo Graciani
########################################################################
"""
Provide uniform interface to backend for local and remote clients.return
There's a pretty big assumption here: that DB and Handler expose the same calls, with identical signatures.
This is not exactly the case for WMS DBs and services.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
def getDBOrClient(DB, serverName):
""" Tries to instantiate the DB object
and returns it if we manage to connect to the DB,
otherwise returns a Client of the server
"""
from DIRAC import gLogger
from DIRAC.Core.DISET.RPCClient import RPCClient
try:
myDB = DB()
if myDB._connected:
return myDB
except Exception:
pass
gLogger.info('Can not connect to DB will use %s' % serverName)
return RPCClient(serverName)
def getPilotAgentsDB():
serverName = 'WorkloadManagement/PilotManager'
PilotAgentsDB = None
try:
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
except Exception:
pass
return getDBOrClient(PilotAgentsDB, serverName)
pilotAgentsDB = getPilotAgentsDB()
| yujikato/DIRAC | src/DIRAC/WorkloadManagementSystem/Client/ServerUtils.py | Python | gpl-3.0 | 1,336 | [
"DIRAC"
] | 31b19e197f97e0ca663832234dd752a79eda7d12e5457a64a9a3c862e35ea470 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import bson
import os
import pickle
import pymongo
import sys
import unittest
import uuid
import warnings
from nose.plugins.skip import SkipTest
from datetime import datetime
from tests.fixtures import Base, Mixin, PickleEmbedded, PickleTest
from mongoengine import *
from mongoengine.base import NotRegistered, InvalidDocumentError, get_document
from mongoengine.queryset import InvalidQueryError
from mongoengine.connection import get_db, get_connection
TEST_IMAGE_PATH = os.path.join(os.path.dirname(__file__), 'mongoengine.png')
class DocumentTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
self.db = get_db()
class Person(Document):
name = StringField()
age = IntField()
meta = {'allow_inheritance': True}
self.Person = Person
def tearDown(self):
self.Person.drop_collection()
def test_drop_collection(self):
"""Ensure that the collection may be dropped from the database.
"""
self.Person(name='Test').save()
collection = self.Person._get_collection_name()
self.assertTrue(collection in self.db.collection_names())
self.Person.drop_collection()
self.assertFalse(collection in self.db.collection_names())
def test_queryset_resurrects_dropped_collection(self):
self.Person.objects().item_frequencies('name')
self.Person.drop_collection()
self.assertEqual({}, self.Person.objects().item_frequencies('name'))
class Actor(self.Person):
pass
# Ensure works correctly with inhertited classes
Actor.objects().item_frequencies('name')
self.Person.drop_collection()
self.assertEqual({}, Actor.objects().item_frequencies('name'))
def test_definition(self):
"""Ensure that document may be defined using fields.
"""
name_field = StringField()
age_field = IntField()
class Person(Document):
name = name_field
age = age_field
non_field = True
self.assertEqual(Person._fields['name'], name_field)
self.assertEqual(Person._fields['age'], age_field)
self.assertFalse('non_field' in Person._fields)
self.assertTrue('id' in Person._fields)
# Test iteration over fields
fields = list(Person())
self.assertTrue('name' in fields and 'age' in fields)
# Ensure Document isn't treated like an actual document
self.assertFalse(hasattr(Document, '_fields'))
def test_repr(self):
"""Ensure that unicode representation works
"""
class Article(Document):
title = StringField()
def __unicode__(self):
return self.title
Article.drop_collection()
Article(title=u'привет мир').save()
self.assertEqual('<Article: привет мир>', repr(Article.objects.first()))
self.assertEqual('[<Article: привет мир>]', repr(Article.objects.all()))
def test_collection_naming(self):
"""Ensure that a collection with a specified name may be used.
"""
class DefaultNamingTest(Document):
pass
self.assertEqual('default_naming_test', DefaultNamingTest._get_collection_name())
class CustomNamingTest(Document):
meta = {'collection': 'pimp_my_collection'}
self.assertEqual('pimp_my_collection', CustomNamingTest._get_collection_name())
class DynamicNamingTest(Document):
meta = {'collection': lambda c: "DYNAMO"}
self.assertEqual('DYNAMO', DynamicNamingTest._get_collection_name())
# Use Abstract class to handle backwards compatibility
class BaseDocument(Document):
meta = {
'abstract': True,
'collection': lambda c: c.__name__.lower()
}
class OldNamingConvention(BaseDocument):
pass
self.assertEqual('oldnamingconvention', OldNamingConvention._get_collection_name())
class InheritedAbstractNamingTest(BaseDocument):
meta = {'collection': 'wibble'}
self.assertEqual('wibble', InheritedAbstractNamingTest._get_collection_name())
# Mixin tests
class BaseMixin(object):
meta = {
'collection': lambda c: c.__name__.lower()
}
class OldMixinNamingConvention(Document, BaseMixin):
pass
self.assertEqual('oldmixinnamingconvention', OldMixinNamingConvention._get_collection_name())
class BaseMixin(object):
meta = {
'collection': lambda c: c.__name__.lower()
}
class BaseDocument(Document, BaseMixin):
meta = {'allow_inheritance': True}
class MyDocument(BaseDocument):
pass
self.assertEqual('basedocument', MyDocument._get_collection_name())
def test_get_superclasses(self):
"""Ensure that the correct list of superclasses is assembled.
"""
class Animal(Document):
meta = {'allow_inheritance': True}
class Fish(Animal): pass
class Mammal(Animal): pass
class Human(Mammal): pass
class Dog(Mammal): pass
mammal_superclasses = {'Animal': Animal}
self.assertEqual(Mammal._superclasses, mammal_superclasses)
dog_superclasses = {
'Animal': Animal,
'Animal.Mammal': Mammal,
}
self.assertEqual(Dog._superclasses, dog_superclasses)
def test_external_superclasses(self):
"""Ensure that the correct list of sub and super classes is assembled.
when importing part of the model
"""
class Animal(Base): pass
class Fish(Animal): pass
class Mammal(Animal): pass
class Human(Mammal): pass
class Dog(Mammal): pass
mammal_superclasses = {'Base': Base, 'Base.Animal': Animal}
self.assertEqual(Mammal._superclasses, mammal_superclasses)
dog_superclasses = {
'Base': Base,
'Base.Animal': Animal,
'Base.Animal.Mammal': Mammal,
}
self.assertEqual(Dog._superclasses, dog_superclasses)
Base.drop_collection()
h = Human()
h.save()
self.assertEqual(Human.objects.count(), 1)
self.assertEqual(Mammal.objects.count(), 1)
self.assertEqual(Animal.objects.count(), 1)
self.assertEqual(Base.objects.count(), 1)
Base.drop_collection()
def test_polymorphic_queries(self):
"""Ensure that the correct subclasses are returned from a query"""
class Animal(Document):
meta = {'allow_inheritance': True}
class Fish(Animal): pass
class Mammal(Animal): pass
class Human(Mammal): pass
class Dog(Mammal): pass
Animal.drop_collection()
Animal().save()
Fish().save()
Mammal().save()
Human().save()
Dog().save()
classes = [obj.__class__ for obj in Animal.objects]
self.assertEqual(classes, [Animal, Fish, Mammal, Human, Dog])
classes = [obj.__class__ for obj in Mammal.objects]
self.assertEqual(classes, [Mammal, Human, Dog])
classes = [obj.__class__ for obj in Human.objects]
self.assertEqual(classes, [Human])
Animal.drop_collection()
def test_polymorphic_references(self):
"""Ensure that the correct subclasses are returned from a query when
using references / generic references
"""
class Animal(Document):
meta = {'allow_inheritance': True}
class Fish(Animal): pass
class Mammal(Animal): pass
class Human(Mammal): pass
class Dog(Mammal): pass
class Zoo(Document):
animals = ListField(ReferenceField(Animal))
Zoo.drop_collection()
Animal.drop_collection()
Animal().save()
Fish().save()
Mammal().save()
Human().save()
Dog().save()
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Human, Dog])
Zoo.drop_collection()
class Zoo(Document):
animals = ListField(GenericReferenceField(Animal))
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Human, Dog])
Zoo.drop_collection()
Animal.drop_collection()
def test_reference_inheritance(self):
class Stats(Document):
created = DateTimeField(default=datetime.now)
meta = {'allow_inheritance': False}
class CompareStats(Document):
generated = DateTimeField(default=datetime.now)
stats = ListField(ReferenceField(Stats))
Stats.drop_collection()
CompareStats.drop_collection()
list_stats = []
for i in xrange(10):
s = Stats()
s.save()
list_stats.append(s)
cmp_stats = CompareStats(stats=list_stats)
cmp_stats.save()
self.assertEqual(list_stats, CompareStats.objects.first().stats)
def test_inheritance(self):
"""Ensure that document may inherit fields from a superclass document.
"""
class Employee(self.Person):
salary = IntField()
self.assertTrue('name' in Employee._fields)
self.assertTrue('salary' in Employee._fields)
self.assertEqual(Employee._get_collection_name(),
self.Person._get_collection_name())
# Ensure that MRO error is not raised
class A(Document):
meta = {'allow_inheritance': True}
class B(A): pass
class C(B): pass
def test_allow_inheritance(self):
"""Ensure that inheritance may be disabled on simple classes and that
_cls and _types will not be used.
"""
class Animal(Document):
name = StringField()
meta = {'allow_inheritance': False}
Animal.drop_collection()
def create_dog_class():
class Dog(Animal):
pass
self.assertRaises(ValueError, create_dog_class)
# Check that _cls etc aren't present on simple documents
dog = Animal(name='dog')
dog.save()
collection = self.db[Animal._get_collection_name()]
obj = collection.find_one()
self.assertFalse('_cls' in obj)
self.assertFalse('_types' in obj)
Animal.drop_collection()
def create_employee_class():
class Employee(self.Person):
meta = {'allow_inheritance': False}
self.assertRaises(ValueError, create_employee_class)
def test_allow_inheritance_abstract_document(self):
"""Ensure that abstract documents can set inheritance rules and that
_cls and _types will not be used.
"""
class FinalDocument(Document):
meta = {'abstract': True,
'allow_inheritance': False}
class Animal(FinalDocument):
name = StringField()
Animal.drop_collection()
def create_dog_class():
class Dog(Animal):
pass
self.assertRaises(ValueError, create_dog_class)
# Check that _cls etc aren't present on simple documents
dog = Animal(name='dog')
dog.save()
collection = self.db[Animal._get_collection_name()]
obj = collection.find_one()
self.assertFalse('_cls' in obj)
self.assertFalse('_types' in obj)
Animal.drop_collection()
def test_allow_inheritance_embedded_document(self):
# Test the same for embedded documents
class Comment(EmbeddedDocument):
content = StringField()
meta = {'allow_inheritance': False}
def create_special_comment():
class SpecialComment(Comment):
pass
self.assertRaises(ValueError, create_special_comment)
comment = Comment(content='test')
self.assertFalse('_cls' in comment.to_mongo())
self.assertFalse('_types' in comment.to_mongo())
class Comment(EmbeddedDocument):
content = StringField()
meta = {'allow_inheritance': True}
comment = Comment(content='test')
self.assertTrue('_cls' in comment.to_mongo())
self.assertTrue('_types' in comment.to_mongo())
def test_document_inheritance(self):
"""Ensure mutliple inheritance of abstract docs works
"""
class DateCreatedDocument(Document):
meta = {
'allow_inheritance': True,
'abstract': True,
}
class DateUpdatedDocument(Document):
meta = {
'allow_inheritance': True,
'abstract': True,
}
try:
class MyDocument(DateCreatedDocument, DateUpdatedDocument):
pass
except:
self.assertTrue(False, "Couldn't create MyDocument class")
def test_how_to_turn_off_inheritance(self):
"""Demonstrates migrating from allow_inheritance = True to False.
"""
class Animal(Document):
name = StringField()
meta = {
'indexes': ['name']
}
self.assertEqual(Animal._meta['index_specs'],
[{'fields': [('_types', 1), ('name', 1)]}])
Animal.drop_collection()
dog = Animal(name='dog')
dog.save()
collection = self.db[Animal._get_collection_name()]
obj = collection.find_one()
self.assertTrue('_cls' in obj)
self.assertTrue('_types' in obj)
info = collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertEqual([[(u'_id', 1)], [(u'_types', 1), (u'name', 1)]], info)
# Turn off inheritance
class Animal(Document):
name = StringField()
meta = {
'allow_inheritance': False,
'indexes': ['name']
}
self.assertEqual(Animal._meta['index_specs'],
[{'fields': [('name', 1)]}])
collection.update({}, {"$unset": {"_types": 1, "_cls": 1}}, multi=True)
# Confirm extra data is removed
obj = collection.find_one()
self.assertFalse('_cls' in obj)
self.assertFalse('_types' in obj)
info = collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertEqual([[(u'_id', 1)], [(u'_types', 1), (u'name', 1)]], info)
info = collection.index_information()
indexes_to_drop = [key for key, value in info.iteritems() if '_types' in dict(value['key'])]
for index in indexes_to_drop:
collection.drop_index(index)
info = collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertEqual([[(u'_id', 1)]], info)
# Recreate indexes
dog = Animal.objects.first()
dog.save()
info = collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertEqual([[(u'_id', 1)], [(u'name', 1),]], info)
Animal.drop_collection()
def test_abstract_documents(self):
"""Ensure that a document superclass can be marked as abstract
thereby not using it as the name for the collection."""
defaults = {'index_background': True,
'index_drop_dups': True,
'index_opts': {'hello': 'world'},
'allow_inheritance': True,
'queryset_class': 'QuerySet',
'db_alias': 'myDB',
'shard_key': ('hello', 'world')}
meta_settings = {'abstract': True}
meta_settings.update(defaults)
class Animal(Document):
name = StringField()
meta = meta_settings
class Fish(Animal): pass
class Guppy(Fish): pass
class Mammal(Animal):
meta = {'abstract': True}
class Human(Mammal): pass
for k, v in defaults.iteritems():
for cls in [Animal, Fish, Guppy]:
self.assertEqual(cls._meta[k], v)
self.assertFalse('collection' in Animal._meta)
self.assertFalse('collection' in Mammal._meta)
self.assertEqual(Animal._get_collection_name(), None)
self.assertEqual(Mammal._get_collection_name(), None)
self.assertEqual(Fish._get_collection_name(), 'fish')
self.assertEqual(Guppy._get_collection_name(), 'fish')
self.assertEqual(Human._get_collection_name(), 'human')
def create_bad_abstract():
class EvilHuman(Human):
evil = BooleanField(default=True)
meta = {'abstract': True}
self.assertRaises(ValueError, create_bad_abstract)
def test_collection_name(self):
"""Ensure that a collection with a specified name may be used.
"""
collection = 'personCollTest'
if collection in self.db.collection_names():
self.db.drop_collection(collection)
class Person(Document):
name = StringField()
meta = {'collection': collection}
user = Person(name="Test User")
user.save()
self.assertTrue(collection in self.db.collection_names())
user_obj = self.db[collection].find_one()
self.assertEqual(user_obj['name'], "Test User")
user_obj = Person.objects[0]
self.assertEqual(user_obj.name, "Test User")
Person.drop_collection()
self.assertFalse(collection in self.db.collection_names())
def test_collection_name_and_primary(self):
"""Ensure that a collection with a specified name may be used.
"""
class Person(Document):
name = StringField(primary_key=True)
meta = {'collection': 'app'}
user = Person(name="Test User")
user.save()
user_obj = Person.objects[0]
self.assertEqual(user_obj.name, "Test User")
Person.drop_collection()
def test_inherited_collections(self):
"""Ensure that subclassed documents don't override parents' collections.
"""
class Drink(Document):
name = StringField()
meta = {'allow_inheritance': True}
class Drinker(Document):
drink = GenericReferenceField()
try:
warnings.simplefilter("error")
class AcloholicDrink(Drink):
meta = {'collection': 'booze'}
except SyntaxWarning, w:
warnings.simplefilter("ignore")
class AlcoholicDrink(Drink):
meta = {'collection': 'booze'}
else:
raise AssertionError("SyntaxWarning should be triggered")
warnings.resetwarnings()
Drink.drop_collection()
AlcoholicDrink.drop_collection()
Drinker.drop_collection()
red_bull = Drink(name='Red Bull')
red_bull.save()
programmer = Drinker(drink=red_bull)
programmer.save()
beer = AlcoholicDrink(name='Beer')
beer.save()
real_person = Drinker(drink=beer)
real_person.save()
self.assertEqual(Drinker.objects[0].drink.name, red_bull.name)
self.assertEqual(Drinker.objects[1].drink.name, beer.name)
def test_capped_collection(self):
"""Ensure that capped collections work properly.
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
'max_size': 90000,
}
Log.drop_collection()
# Ensure that the collection handles up to its maximum
for i in range(10):
Log().save()
self.assertEqual(len(Log.objects), 10)
# Check that extra documents don't increase the size
Log().save()
self.assertEqual(len(Log.objects), 10)
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertEqual(options['max'], 10)
self.assertEqual(options['size'], 90000)
# Check that the document cannot be redefined with different options
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 11,
}
# Create the collection by accessing Document.objects
Log.objects
self.assertRaises(InvalidCollectionError, recreate_log_document)
Log.drop_collection()
def test_indexes(self):
"""Ensure that indexes are used when meta[indexes] is specified.
"""
class BlogPost(Document):
date = DateTimeField(db_field='addDate', default=datetime.now)
category = StringField()
tags = ListField(StringField())
meta = {
'indexes': [
'-date',
'tags',
('category', '-date')
],
'allow_inheritance': True
}
self.assertEqual(BlogPost._meta['index_specs'],
[{'fields': [('_types', 1), ('addDate', -1)]},
{'fields': [('tags', 1)]},
{'fields': [('_types', 1), ('category', 1),
('addDate', -1)]}])
BlogPost.drop_collection()
info = BlogPost.objects._collection.index_information()
# _id, '-date', 'tags', ('cat', 'date')
# NB: there is no index on _types by itself, since
# the indices on -date and tags will both contain
# _types as first element in the key
self.assertEqual(len(info), 4)
# Indexes are lazy so use list() to perform query
list(BlogPost.objects)
info = BlogPost.objects._collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertTrue([('_types', 1), ('category', 1), ('addDate', -1)]
in info)
self.assertTrue([('_types', 1), ('addDate', -1)] in info)
# tags is a list field so it shouldn't have _types in the index
self.assertTrue([('tags', 1)] in info)
class ExtendedBlogPost(BlogPost):
title = StringField()
meta = {'indexes': ['title']}
self.assertEqual(ExtendedBlogPost._meta['index_specs'],
[{'fields': [('_types', 1), ('addDate', -1)]},
{'fields': [('tags', 1)]},
{'fields': [('_types', 1), ('category', 1),
('addDate', -1)]},
{'fields': [('_types', 1), ('title', 1)]}])
BlogPost.drop_collection()
list(ExtendedBlogPost.objects)
info = ExtendedBlogPost.objects._collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertTrue([('_types', 1), ('category', 1), ('addDate', -1)]
in info)
self.assertTrue([('_types', 1), ('addDate', -1)] in info)
self.assertTrue([('_types', 1), ('title', 1)] in info)
BlogPost.drop_collection()
def test_inherited_index(self):
"""Ensure index specs are inhertited correctly"""
class A(Document):
title = StringField()
meta = {
'indexes': [
{
'fields': ('title',),
},
],
'allow_inheritance': True,
}
class B(A):
description = StringField()
self.assertEqual(A._meta['index_specs'], B._meta['index_specs'])
self.assertEqual([{'fields': [('_types', 1), ('title', 1)]}],
A._meta['index_specs'])
def test_build_index_spec_is_not_destructive(self):
class MyDoc(Document):
keywords = StringField()
meta = {
'indexes': ['keywords'],
'allow_inheritance': False
}
self.assertEqual(MyDoc._meta['index_specs'],
[{'fields': [('keywords', 1)]}])
# Force index creation
MyDoc.objects._ensure_indexes()
self.assertEqual(MyDoc._meta['index_specs'],
[{'fields': [('keywords', 1)]}])
def test_db_field_load(self):
"""Ensure we load data correctly
"""
class Person(Document):
name = StringField(required=True)
_rank = StringField(required=False, db_field="rank")
@property
def rank(self):
return self._rank or "Private"
Person.drop_collection()
Person(name="Jack", _rank="Corporal").save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_db_embedded_doc_field_load(self):
"""Ensure we load embedded document data correctly
"""
class Rank(EmbeddedDocument):
title = StringField(required=True)
class Person(Document):
name = StringField(required=True)
rank_ = EmbeddedDocumentField(Rank, required=False, db_field='rank')
@property
def rank(self):
return self.rank_.title if self.rank_ is not None else "Private"
Person.drop_collection()
Person(name="Jack", rank_=Rank(title="Corporal")).save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_embedded_document_index_meta(self):
"""Ensure that embedded document indexes are created explicitly
"""
class Rank(EmbeddedDocument):
title = StringField(required=True)
class Person(Document):
name = StringField(required=True)
rank = EmbeddedDocumentField(Rank, required=False)
meta = {
'indexes': [
'rank.title',
],
'allow_inheritance': False
}
self.assertEqual([{'fields': [('rank.title', 1)]}],
Person._meta['index_specs'])
Person.drop_collection()
# Indexes are lazy so use list() to perform query
list(Person.objects)
info = Person.objects._collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertTrue([('rank.title', 1)] in info)
def test_explicit_geo2d_index(self):
"""Ensure that geo2d indexes work when created via meta[indexes]
"""
class Place(Document):
location = DictField()
meta = {
'indexes': [
'*location.point',
],
}
self.assertEqual([{'fields': [('location.point', '2d')]}],
Place._meta['index_specs'])
Place.drop_collection()
info = Place.objects._collection.index_information()
# Indexes are lazy so use list() to perform query
list(Place.objects)
info = Place.objects._collection.index_information()
info = [value['key'] for key, value in info.iteritems()]
self.assertTrue([('location.point', '2d')] in info)
def test_dictionary_indexes(self):
"""Ensure that indexes are used when meta[indexes] contains dictionaries
instead of lists.
"""
class BlogPost(Document):
date = DateTimeField(db_field='addDate', default=datetime.now)
category = StringField()
tags = ListField(StringField())
meta = {
'indexes': [
{'fields': ['-date'], 'unique': True,
'sparse': True, 'types': False },
],
}
self.assertEqual([{'fields': [('addDate', -1)], 'unique': True,
'sparse': True, 'types': False}],
BlogPost._meta['index_specs'])
BlogPost.drop_collection()
info = BlogPost.objects._collection.index_information()
# _id, '-date'
self.assertEqual(len(info), 3)
# Indexes are lazy so use list() to perform query
list(BlogPost.objects)
info = BlogPost.objects._collection.index_information()
info = [(value['key'],
value.get('unique', False),
value.get('sparse', False))
for key, value in info.iteritems()]
self.assertTrue(([('addDate', -1)], True, True) in info)
BlogPost.drop_collection()
def test_abstract_index_inheritance(self):
class UserBase(Document):
meta = {
'abstract': True,
'indexes': ['user_guid']
}
user_guid = StringField(required=True)
class Person(UserBase):
meta = {
'indexes': ['name'],
}
name = StringField()
Person.drop_collection()
p = Person(name="test", user_guid='123')
p.save()
self.assertEqual(1, Person.objects.count())
info = Person.objects._collection.index_information()
self.assertEqual(info.keys(), ['_types_1_user_guid_1', '_id_', '_types_1_name_1'])
Person.drop_collection()
def test_disable_index_creation(self):
"""Tests setting auto_create_index to False on the connection will
disable any index generation.
"""
class User(Document):
meta = {
'indexes': ['user_guid'],
'auto_create_index': False
}
user_guid = StringField(required=True)
User.drop_collection()
u = User(user_guid='123')
u.save()
self.assertEqual(1, User.objects.count())
info = User.objects._collection.index_information()
self.assertEqual(info.keys(), ['_id_'])
User.drop_collection()
def test_embedded_document_index(self):
"""Tests settings an index on an embedded document
"""
class Date(EmbeddedDocument):
year = IntField(db_field='yr')
class BlogPost(Document):
title = StringField()
date = EmbeddedDocumentField(Date)
meta = {
'indexes': [
'-date.year'
],
}
BlogPost.drop_collection()
info = BlogPost.objects._collection.index_information()
self.assertEqual(info.keys(), ['_types_1_date.yr_-1', '_id_'])
BlogPost.drop_collection()
def test_list_embedded_document_index(self):
"""Ensure list embedded documents can be indexed
"""
class Tag(EmbeddedDocument):
name = StringField(db_field='tag')
class BlogPost(Document):
title = StringField()
tags = ListField(EmbeddedDocumentField(Tag))
meta = {
'indexes': [
'tags.name'
],
}
BlogPost.drop_collection()
info = BlogPost.objects._collection.index_information()
# we don't use _types in with list fields by default
self.assertEqual(info.keys(), ['_id_', '_types_1', 'tags.tag_1'])
post1 = BlogPost(title="Embedded Indexes tests in place",
tags=[Tag(name="about"), Tag(name="time")]
)
post1.save()
BlogPost.drop_collection()
def test_recursive_embedded_objects_dont_break_indexes(self):
class RecursiveObject(EmbeddedDocument):
obj = EmbeddedDocumentField('self')
class RecursiveDocument(Document):
recursive_obj = EmbeddedDocumentField(RecursiveObject)
info = RecursiveDocument.objects._collection.index_information()
self.assertEqual(info.keys(), ['_id_', '_types_1'])
def test_geo_indexes_recursion(self):
class Location(Document):
name = StringField()
location = GeoPointField()
class Parent(Document):
name = StringField()
location = ReferenceField(Location)
Location.drop_collection()
Parent.drop_collection()
list(Parent.objects)
collection = Parent._get_collection()
info = collection.index_information()
self.assertFalse('location_2d' in info)
self.assertEqual(len(Parent._geo_indices()), 0)
self.assertEqual(len(Location._geo_indices()), 1)
def test_covered_index(self):
"""Ensure that covered indexes can be used
"""
class Test(Document):
a = IntField()
meta = {
'indexes': ['a'],
'allow_inheritance': False
}
Test.drop_collection()
obj = Test(a=1)
obj.save()
# Need to be explicit about covered indexes as mongoDB doesn't know if
# the documents returned might have more keys in that here.
query_plan = Test.objects(id=obj.id).exclude('a').explain()
self.assertFalse(query_plan['indexOnly'])
query_plan = Test.objects(id=obj.id).only('id').explain()
self.assertTrue(query_plan['indexOnly'])
query_plan = Test.objects(a=1).only('a').exclude('id').explain()
self.assertTrue(query_plan['indexOnly'])
def test_index_on_id(self):
class BlogPost(Document):
meta = {
'indexes': [
['categories', 'id']
],
'allow_inheritance': False
}
title = StringField(required=True)
description = StringField(required=True)
categories = ListField()
BlogPost.drop_collection()
indexes = BlogPost.objects._collection.index_information()
self.assertEqual(indexes['categories_1__id_1']['key'],
[('categories', 1), ('_id', 1)])
def test_hint(self):
class BlogPost(Document):
tags = ListField(StringField())
meta = {
'indexes': [
'tags',
],
}
BlogPost.drop_collection()
for i in xrange(0, 10):
tags = [("tag %i" % n) for n in xrange(0, i % 2)]
BlogPost(tags=tags).save()
self.assertEqual(BlogPost.objects.count(), 10)
self.assertEqual(BlogPost.objects.hint().count(), 10)
self.assertEqual(BlogPost.objects.hint([('tags', 1)]).count(), 10)
self.assertEqual(BlogPost.objects.hint([('ZZ', 1)]).count(), 10)
def invalid_index():
BlogPost.objects.hint('tags')
self.assertRaises(TypeError, invalid_index)
def invalid_index_2():
return BlogPost.objects.hint(('tags', 1))
self.assertRaises(TypeError, invalid_index_2)
def test_unique(self):
"""Ensure that uniqueness constraints are applied to fields.
"""
class BlogPost(Document):
title = StringField()
slug = StringField(unique=True)
BlogPost.drop_collection()
post1 = BlogPost(title='test1', slug='test')
post1.save()
# Two posts with the same slug is not allowed
post2 = BlogPost(title='test2', slug='test')
self.assertRaises(NotUniqueError, post2.save)
# Ensure backwards compatibilty for errors
self.assertRaises(OperationError, post2.save)
def test_unique_with(self):
"""Ensure that unique_with constraints are applied to fields.
"""
class Date(EmbeddedDocument):
year = IntField(db_field='yr')
class BlogPost(Document):
title = StringField()
date = EmbeddedDocumentField(Date)
slug = StringField(unique_with='date.year')
BlogPost.drop_collection()
post1 = BlogPost(title='test1', date=Date(year=2009), slug='test')
post1.save()
# day is different so won't raise exception
post2 = BlogPost(title='test2', date=Date(year=2010), slug='test')
post2.save()
# Now there will be two docs with the same slug and the same day: fail
post3 = BlogPost(title='test3', date=Date(year=2010), slug='test')
self.assertRaises(OperationError, post3.save)
BlogPost.drop_collection()
def test_unique_embedded_document(self):
"""Ensure that uniqueness constraints are applied to fields on embedded documents.
"""
class SubDocument(EmbeddedDocument):
year = IntField(db_field='yr')
slug = StringField(unique=True)
class BlogPost(Document):
title = StringField()
sub = EmbeddedDocumentField(SubDocument)
BlogPost.drop_collection()
post1 = BlogPost(title='test1', sub=SubDocument(year=2009, slug="test"))
post1.save()
# sub.slug is different so won't raise exception
post2 = BlogPost(title='test2', sub=SubDocument(year=2010, slug='another-slug'))
post2.save()
# Now there will be two docs with the same sub.slug
post3 = BlogPost(title='test3', sub=SubDocument(year=2010, slug='test'))
self.assertRaises(NotUniqueError, post3.save)
BlogPost.drop_collection()
def test_unique_with_embedded_document_and_embedded_unique(self):
"""Ensure that uniqueness constraints are applied to fields on
embedded documents. And work with unique_with as well.
"""
class SubDocument(EmbeddedDocument):
year = IntField(db_field='yr')
slug = StringField(unique=True)
class BlogPost(Document):
title = StringField(unique_with='sub.year')
sub = EmbeddedDocumentField(SubDocument)
BlogPost.drop_collection()
post1 = BlogPost(title='test1', sub=SubDocument(year=2009, slug="test"))
post1.save()
# sub.slug is different so won't raise exception
post2 = BlogPost(title='test2', sub=SubDocument(year=2010, slug='another-slug'))
post2.save()
# Now there will be two docs with the same sub.slug
post3 = BlogPost(title='test3', sub=SubDocument(year=2010, slug='test'))
self.assertRaises(NotUniqueError, post3.save)
# Now there will be two docs with the same title and year
post3 = BlogPost(title='test1', sub=SubDocument(year=2009, slug='test-1'))
self.assertRaises(NotUniqueError, post3.save)
BlogPost.drop_collection()
def test_ttl_indexes(self):
class Log(Document):
created = DateTimeField(default=datetime.now)
meta = {
'indexes': [
{'fields': ['created'], 'expireAfterSeconds': 3600}
]
}
Log.drop_collection()
if pymongo.version_tuple[0] < 2 and pymongo.version_tuple[1] < 3:
raise SkipTest('pymongo needs to be 2.3 or higher for this test')
connection = get_connection()
version_array = connection.server_info()['versionArray']
if version_array[0] < 2 and version_array[1] < 2:
raise SkipTest('MongoDB needs to be 2.2 or higher for this test')
# Indexes are lazy so use list() to perform query
list(Log.objects)
info = Log.objects._collection.index_information()
self.assertEqual(3600,
info['_types_1_created_1']['expireAfterSeconds'])
def test_unique_and_indexes(self):
"""Ensure that 'unique' constraints aren't overridden by
meta.indexes.
"""
class Customer(Document):
cust_id = IntField(unique=True, required=True)
meta = {
'indexes': ['cust_id'],
'allow_inheritance': False,
}
Customer.drop_collection()
cust = Customer(cust_id=1)
cust.save()
cust_dupe = Customer(cust_id=1)
try:
cust_dupe.save()
raise AssertionError, "We saved a dupe!"
except NotUniqueError:
pass
Customer.drop_collection()
def test_unique_and_primary(self):
"""If you set a field as primary, then unexpected behaviour can occur.
You won't create a duplicate but you will update an existing document.
"""
class User(Document):
name = StringField(primary_key=True, unique=True)
password = StringField()
User.drop_collection()
user = User(name='huangz', password='secret')
user.save()
user = User(name='huangz', password='secret2')
user.save()
self.assertEqual(User.objects.count(), 1)
self.assertEqual(User.objects.get().password, 'secret2')
User.drop_collection()
def test_custom_id_field(self):
"""Ensure that documents may be created with custom primary keys.
"""
class User(Document):
username = StringField(primary_key=True)
name = StringField()
meta = {'allow_inheritance': True}
User.drop_collection()
self.assertEqual(User._fields['username'].db_field, '_id')
self.assertEqual(User._meta['id_field'], 'username')
def create_invalid_user():
User(name='test').save() # no primary key field
self.assertRaises(ValidationError, create_invalid_user)
def define_invalid_user():
class EmailUser(User):
email = StringField(primary_key=True)
self.assertRaises(ValueError, define_invalid_user)
class EmailUser(User):
email = StringField()
user = User(username='test', name='test user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'test')
self.assertEqual(user_obj.pk, 'test')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'test')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
user = User(pk='mongo', name='mongo user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'mongo')
self.assertEqual(user_obj.pk, 'mongo')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'mongo')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
def test_document_not_registered(self):
class Place(Document):
name = StringField()
meta = {'allow_inheritance': True}
class NicePlace(Place):
pass
Place.drop_collection()
Place(name="London").save()
NicePlace(name="Buckingham Palace").save()
# Mimic Place and NicePlace definitions being in a different file
# and the NicePlace model not being imported in at query time.
from mongoengine.base import _document_registry
del(_document_registry['Place.NicePlace'])
def query_without_importing_nice_place():
print Place.objects.all()
self.assertRaises(NotRegistered, query_without_importing_nice_place)
def test_document_registry_regressions(self):
class Location(Document):
name = StringField()
meta = {'allow_inheritance': True}
class Area(Location):
location = ReferenceField('Location', dbref=True)
Location.drop_collection()
self.assertEquals(Area, get_document("Area"))
self.assertEquals(Area, get_document("Location.Area"))
def test_creation(self):
"""Ensure that document may be created using keyword arguments.
"""
person = self.Person(name="Test User", age=30)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 30)
def test_to_dbref(self):
"""Ensure that you can get a dbref of a document"""
person = self.Person(name="Test User", age=30)
self.assertRaises(OperationError, person.to_dbref)
person.save()
person.to_dbref()
def test_reload(self):
"""Ensure that attributes may be reloaded.
"""
person = self.Person(name="Test User", age=20)
person.save()
person_obj = self.Person.objects.first()
person_obj.name = "Mr Test User"
person_obj.age = 21
person_obj.save()
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 20)
person.reload()
self.assertEqual(person.name, "Mr Test User")
self.assertEqual(person.age, 21)
def test_reload_sharded(self):
class Animal(Document):
superphylum = StringField()
meta = {'shard_key': ('superphylum',)}
Animal.drop_collection()
doc = Animal(superphylum = 'Deuterostomia')
doc.save()
doc.reload()
Animal.drop_collection()
def test_reload_referencing(self):
"""Ensures reloading updates weakrefs correctly
"""
class Embedded(EmbeddedDocument):
dict_field = DictField()
list_field = ListField()
class Doc(Document):
dict_field = DictField()
list_field = ListField()
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc()
doc.dict_field = {'hello': 'world'}
doc.list_field = ['1', 2, {'hello': 'world'}]
embedded_1 = Embedded()
embedded_1.dict_field = {'hello': 'world'}
embedded_1.list_field = ['1', 2, {'hello': 'world'}]
doc.embedded_field = embedded_1
doc.save()
doc = doc.reload(10)
doc.list_field.append(1)
doc.dict_field['woot'] = "woot"
doc.embedded_field.list_field.append(1)
doc.embedded_field.dict_field['woot'] = "woot"
self.assertEqual(doc._get_changed_fields(), [
'list_field', 'dict_field', 'embedded_field.list_field',
'embedded_field.dict_field'])
doc.save()
doc = doc.reload(10)
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(len(doc.list_field), 4)
self.assertEqual(len(doc.dict_field), 2)
self.assertEqual(len(doc.embedded_field.list_field), 4)
self.assertEqual(len(doc.embedded_field.dict_field), 2)
def test_dictionary_access(self):
"""Ensure that dictionary-style field access works properly.
"""
person = self.Person(name='Test User', age=30)
self.assertEqual(person['name'], 'Test User')
self.assertRaises(KeyError, person.__getitem__, 'salary')
self.assertRaises(KeyError, person.__setitem__, 'salary', 50)
person['name'] = 'Another User'
self.assertEqual(person['name'], 'Another User')
# Length = length(assigned fields + id)
self.assertEqual(len(person), 3)
self.assertTrue('age' in person)
person.age = None
self.assertFalse('age' in person)
self.assertFalse('nationality' in person)
def test_embedded_document(self):
"""Ensure that embedded documents are set up correctly.
"""
class Comment(EmbeddedDocument):
content = StringField()
self.assertTrue('content' in Comment._fields)
self.assertFalse('id' in Comment._fields)
def test_embedded_document_validation(self):
"""Ensure that embedded documents may be validated.
"""
class Comment(EmbeddedDocument):
date = DateTimeField()
content = StringField(required=True)
comment = Comment()
self.assertRaises(ValidationError, comment.validate)
comment.content = 'test'
comment.validate()
comment.date = 4
self.assertRaises(ValidationError, comment.validate)
comment.date = datetime.now()
comment.validate()
def test_embedded_db_field_validate(self):
class SubDoc(EmbeddedDocument):
val = IntField()
class Doc(Document):
e = EmbeddedDocumentField(SubDoc, db_field='eb')
Doc.drop_collection()
Doc(e=SubDoc(val=15)).save()
doc = Doc.objects.first()
doc.validate()
keys = doc._data.keys()
self.assertEqual(2, len(keys))
self.assertTrue(None in keys)
self.assertTrue('e' in keys)
def test_save(self):
"""Ensure that a document may be saved in the database.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(person_obj['name'], 'Test User')
self.assertEqual(person_obj['age'], 30)
self.assertEqual(person_obj['_id'], person.id)
# Test skipping validation on save
class Recipient(Document):
email = EmailField(required=True)
recipient = Recipient(email='root@localhost')
self.assertRaises(ValidationError, recipient.save)
try:
recipient.save(validate=False)
except ValidationError:
self.fail()
def test_save_to_a_value_that_equates_to_false(self):
class Thing(EmbeddedDocument):
count = IntField()
class User(Document):
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
user = User(thing=Thing(count=1))
user.save()
user.reload()
user.thing.count = 0
user.save()
user.reload()
self.assertEqual(user.thing.count, 0)
def test_save_max_recursion_not_hit(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
friend = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p1.friend = p2
p1.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
p0 = Person.objects.first()
p0.name = 'wpjunior'
p0.save()
def test_save_max_recursion_not_hit_with_file_field(self):
class Foo(Document):
name = StringField()
picture = FileField()
bar = ReferenceField('self')
Foo.drop_collection()
a = Foo(name='hello')
a.save()
a.bar = a
with open(TEST_IMAGE_PATH, 'rb') as test_image:
a.picture = test_image
a.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
b = Foo.objects.with_id(a.id)
b.name='world'
b.save()
self.assertEqual(b.picture, b.bar.picture, b.bar.bar.picture)
def test_save_cascades(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_kwargs(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save(force_insert=True, cascade_kwargs={"force_insert": False})
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_meta(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
meta = {'cascade': False}
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascades_generically(self):
class Person(Document):
name = StringField()
parent = GenericReferenceField()
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_update(self):
"""Ensure that an existing document is updated instead of be overwritten.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Create same person object, with same id, without age
same_person = self.Person(name='Test')
same_person.id = person.id
same_person.save()
# Confirm only one object
self.assertEqual(self.Person.objects.count(), 1)
# reload
person.reload()
same_person.reload()
# Confirm the same
self.assertEqual(person, same_person)
self.assertEqual(person.name, same_person.name)
self.assertEqual(person.age, same_person.age)
# Confirm the saved values
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# Test only / exclude only updates included fields
person = self.Person.objects.only('name').get()
person.name = 'User'
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Test only / exclude can set non excluded / included fields
person = self.Person.objects.only('name').get()
person.name = 'Test'
person.age = 30
person.save()
person.reload()
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.name = 'User'
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Confirm does remove unrequired fields
person = self.Person.objects.exclude('name').get()
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, None)
person = self.Person.objects.get()
person.name = None
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, None)
self.assertEqual(person.age, None)
def test_can_save_if_not_included(self):
class EmbeddedDoc(EmbeddedDocument):
pass
class Simple(Document):
pass
class Doc(Document):
string_field = StringField(default='1')
int_field = IntField(default=1)
float_field = FloatField(default=1.1)
boolean_field = BooleanField(default=True)
datetime_field = DateTimeField(default=datetime.now)
embedded_document_field = EmbeddedDocumentField(EmbeddedDoc, default=lambda: EmbeddedDoc())
list_field = ListField(default=lambda: [1, 2, 3])
dict_field = DictField(default=lambda: {"hello": "world"})
objectid_field = ObjectIdField(default=bson.ObjectId)
reference_field = ReferenceField(Simple, default=lambda: Simple().save())
map_field = MapField(IntField(), default=lambda: {"simple": 1})
decimal_field = DecimalField(default=1.0)
complex_datetime_field = ComplexDateTimeField(default=datetime.now)
url_field = URLField(default="http://mongoengine.org")
dynamic_field = DynamicField(default=1)
generic_reference_field = GenericReferenceField(default=lambda: Simple().save())
sorted_list_field = SortedListField(IntField(), default=lambda: [1, 2, 3])
email_field = EmailField(default="ross@example.com")
geo_point_field = GeoPointField(default=lambda: [1, 2])
sequence_field = SequenceField()
uuid_field = UUIDField(default=uuid.uuid4)
generic_embedded_document_field = GenericEmbeddedDocumentField(default=lambda: EmbeddedDoc())
Simple.drop_collection()
Doc.drop_collection()
Doc().save()
my_doc = Doc.objects.only("string_field").first()
my_doc.string_field = "string"
my_doc.save()
my_doc = Doc.objects.get(string_field="string")
self.assertEqual(my_doc.string_field, "string")
self.assertEqual(my_doc.int_field, 1)
def test_document_update(self):
def update_not_saved_raises():
person = self.Person(name='dcrosta')
person.update(set__name='Dan Crosta')
self.assertRaises(OperationError, update_not_saved_raises)
author = self.Person(name='dcrosta')
author.save()
author.update(set__name='Dan Crosta')
author.reload()
p1 = self.Person.objects.first()
self.assertEqual(p1.name, author.name)
def update_no_value_raises():
person = self.Person.objects.first()
person.update()
self.assertRaises(OperationError, update_no_value_raises)
def update_no_op_raises():
person = self.Person.objects.first()
person.update(name="Dan")
self.assertRaises(InvalidQueryError, update_no_op_raises)
def test_embedded_update(self):
"""
Test update on `EmbeddedDocumentField` fields
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_embedded_update_db_field(self):
"""
Test update on `EmbeddedDocumentField` fields when db_field is other
than default.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
db_field="page_log_message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_circular_reference_deltas(self):
class Person(Document):
name = StringField()
owns = ListField(ReferenceField('Organization'))
class Organization(Document):
name = StringField()
owner = ReferenceField('Person')
Person.drop_collection()
Organization.drop_collection()
person = Person(name="owner")
person.save()
organization = Organization(name="company")
organization.save()
person.owns.append(organization)
organization.owner = person
person.save()
organization.save()
p = Person.objects[0].select_related()
o = Organization.objects.first()
self.assertEqual(p.owns[0], o)
self.assertEqual(o.owner, p)
def test_circular_reference_deltas_2(self):
class Person(Document):
name = StringField()
owns = ListField( ReferenceField( 'Organization' ) )
employer = ReferenceField( 'Organization' )
class Organization( Document ):
name = StringField()
owner = ReferenceField( 'Person' )
employees = ListField( ReferenceField( 'Person' ) )
Person.drop_collection()
Organization.drop_collection()
person = Person( name="owner" )
person.save()
employee = Person( name="employee" )
employee.save()
organization = Organization( name="company" )
organization.save()
person.owns.append( organization )
organization.owner = person
organization.employees.append( employee )
employee.employer = organization
person.save()
organization.save()
employee.save()
p = Person.objects.get(name="owner")
e = Person.objects.get(name="employee")
o = Organization.objects.first()
self.assertEqual(p.owns[0], o)
self.assertEqual(o.owner, p)
self.assertEqual(e.employer, o)
def test_delta(self):
class Doc(Document):
string_field = StringField()
int_field = IntField()
dict_field = DictField()
list_field = ListField()
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(doc._delta(), ({}, {}))
doc.string_field = 'hello'
self.assertEqual(doc._get_changed_fields(), ['string_field'])
self.assertEqual(doc._delta(), ({'string_field': 'hello'}, {}))
doc._changed_fields = []
doc.int_field = 1
self.assertEqual(doc._get_changed_fields(), ['int_field'])
self.assertEqual(doc._delta(), ({'int_field': 1}, {}))
doc._changed_fields = []
dict_value = {'hello': 'world', 'ping': 'pong'}
doc.dict_field = dict_value
self.assertEqual(doc._get_changed_fields(), ['dict_field'])
self.assertEqual(doc._delta(), ({'dict_field': dict_value}, {}))
doc._changed_fields = []
list_value = ['1', 2, {'hello': 'world'}]
doc.list_field = list_value
self.assertEqual(doc._get_changed_fields(), ['list_field'])
self.assertEqual(doc._delta(), ({'list_field': list_value}, {}))
# Test unsetting
doc._changed_fields = []
doc.dict_field = {}
self.assertEqual(doc._get_changed_fields(), ['dict_field'])
self.assertEqual(doc._delta(), ({}, {'dict_field': 1}))
doc._changed_fields = []
doc.list_field = []
self.assertEqual(doc._get_changed_fields(), ['list_field'])
self.assertEqual(doc._delta(), ({}, {'list_field': 1}))
def test_delta_recursive(self):
class Embedded(EmbeddedDocument):
string_field = StringField()
int_field = IntField()
dict_field = DictField()
list_field = ListField()
class Doc(Document):
string_field = StringField()
int_field = IntField()
dict_field = DictField()
list_field = ListField()
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(doc._delta(), ({}, {}))
embedded_1 = Embedded()
embedded_1.string_field = 'hello'
embedded_1.int_field = 1
embedded_1.dict_field = {'hello': 'world'}
embedded_1.list_field = ['1', 2, {'hello': 'world'}]
doc.embedded_field = embedded_1
self.assertEqual(doc._get_changed_fields(), ['embedded_field'])
embedded_delta = {
'string_field': 'hello',
'int_field': 1,
'dict_field': {'hello': 'world'},
'list_field': ['1', 2, {'hello': 'world'}]
}
self.assertEqual(doc.embedded_field._delta(), (embedded_delta, {}))
embedded_delta.update({
'_types': ['Embedded'],
'_cls': 'Embedded',
})
self.assertEqual(doc._delta(), ({'embedded_field': embedded_delta}, {}))
doc.save()
doc = doc.reload(10)
doc.embedded_field.dict_field = {}
self.assertEqual(doc._get_changed_fields(), ['embedded_field.dict_field'])
self.assertEqual(doc.embedded_field._delta(), ({}, {'dict_field': 1}))
self.assertEqual(doc._delta(), ({}, {'embedded_field.dict_field': 1}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.dict_field, {})
doc.embedded_field.list_field = []
self.assertEqual(doc._get_changed_fields(), ['embedded_field.list_field'])
self.assertEqual(doc.embedded_field._delta(), ({}, {'list_field': 1}))
self.assertEqual(doc._delta(), ({}, {'embedded_field.list_field': 1}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field, [])
embedded_2 = Embedded()
embedded_2.string_field = 'hello'
embedded_2.int_field = 1
embedded_2.dict_field = {'hello': 'world'}
embedded_2.list_field = ['1', 2, {'hello': 'world'}]
doc.embedded_field.list_field = ['1', 2, embedded_2]
self.assertEqual(doc._get_changed_fields(), ['embedded_field.list_field'])
self.assertEqual(doc.embedded_field._delta(), ({
'list_field': ['1', 2, {
'_cls': 'Embedded',
'_types': ['Embedded'],
'string_field': 'hello',
'dict_field': {'hello': 'world'},
'int_field': 1,
'list_field': ['1', 2, {'hello': 'world'}],
}]
}, {}))
self.assertEqual(doc._delta(), ({
'embedded_field.list_field': ['1', 2, {
'_cls': 'Embedded',
'_types': ['Embedded'],
'string_field': 'hello',
'dict_field': {'hello': 'world'},
'int_field': 1,
'list_field': ['1', 2, {'hello': 'world'}],
}]
}, {}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field[0], '1')
self.assertEqual(doc.embedded_field.list_field[1], 2)
for k in doc.embedded_field.list_field[2]._fields:
self.assertEqual(doc.embedded_field.list_field[2][k], embedded_2[k])
doc.embedded_field.list_field[2].string_field = 'world'
self.assertEqual(doc._get_changed_fields(), ['embedded_field.list_field.2.string_field'])
self.assertEqual(doc.embedded_field._delta(), ({'list_field.2.string_field': 'world'}, {}))
self.assertEqual(doc._delta(), ({'embedded_field.list_field.2.string_field': 'world'}, {}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field[2].string_field, 'world')
# Test multiple assignments
doc.embedded_field.list_field[2].string_field = 'hello world'
doc.embedded_field.list_field[2] = doc.embedded_field.list_field[2]
self.assertEqual(doc._get_changed_fields(), ['embedded_field.list_field'])
self.assertEqual(doc.embedded_field._delta(), ({
'list_field': ['1', 2, {
'_types': ['Embedded'],
'_cls': 'Embedded',
'string_field': 'hello world',
'int_field': 1,
'list_field': ['1', 2, {'hello': 'world'}],
'dict_field': {'hello': 'world'}}]}, {}))
self.assertEqual(doc._delta(), ({
'embedded_field.list_field': ['1', 2, {
'_types': ['Embedded'],
'_cls': 'Embedded',
'string_field': 'hello world',
'int_field': 1,
'list_field': ['1', 2, {'hello': 'world'}],
'dict_field': {'hello': 'world'}}
]}, {}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field[2].string_field, 'hello world')
# Test list native methods
doc.embedded_field.list_field[2].list_field.pop(0)
self.assertEqual(doc._delta(), ({'embedded_field.list_field.2.list_field': [2, {'hello': 'world'}]}, {}))
doc.save()
doc = doc.reload(10)
doc.embedded_field.list_field[2].list_field.append(1)
self.assertEqual(doc._delta(), ({'embedded_field.list_field.2.list_field': [2, {'hello': 'world'}, 1]}, {}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field[2].list_field, [2, {'hello': 'world'}, 1])
doc.embedded_field.list_field[2].list_field.sort(key=str)
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field[2].list_field, [1, 2, {'hello': 'world'}])
del(doc.embedded_field.list_field[2].list_field[2]['hello'])
self.assertEqual(doc._delta(), ({'embedded_field.list_field.2.list_field': [1, 2, {}]}, {}))
doc.save()
doc = doc.reload(10)
del(doc.embedded_field.list_field[2].list_field)
self.assertEqual(doc._delta(), ({}, {'embedded_field.list_field.2.list_field': 1}))
doc.save()
doc = doc.reload(10)
doc.dict_field['Embedded'] = embedded_1
doc.save()
doc = doc.reload(10)
doc.dict_field['Embedded'].string_field = 'Hello World'
self.assertEqual(doc._get_changed_fields(), ['dict_field.Embedded.string_field'])
self.assertEqual(doc._delta(), ({'dict_field.Embedded.string_field': 'Hello World'}, {}))
def test_delta_db_field(self):
class Doc(Document):
string_field = StringField(db_field='db_string_field')
int_field = IntField(db_field='db_int_field')
dict_field = DictField(db_field='db_dict_field')
list_field = ListField(db_field='db_list_field')
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(doc._delta(), ({}, {}))
doc.string_field = 'hello'
self.assertEqual(doc._get_changed_fields(), ['db_string_field'])
self.assertEqual(doc._delta(), ({'db_string_field': 'hello'}, {}))
doc._changed_fields = []
doc.int_field = 1
self.assertEqual(doc._get_changed_fields(), ['db_int_field'])
self.assertEqual(doc._delta(), ({'db_int_field': 1}, {}))
doc._changed_fields = []
dict_value = {'hello': 'world', 'ping': 'pong'}
doc.dict_field = dict_value
self.assertEqual(doc._get_changed_fields(), ['db_dict_field'])
self.assertEqual(doc._delta(), ({'db_dict_field': dict_value}, {}))
doc._changed_fields = []
list_value = ['1', 2, {'hello': 'world'}]
doc.list_field = list_value
self.assertEqual(doc._get_changed_fields(), ['db_list_field'])
self.assertEqual(doc._delta(), ({'db_list_field': list_value}, {}))
# Test unsetting
doc._changed_fields = []
doc.dict_field = {}
self.assertEqual(doc._get_changed_fields(), ['db_dict_field'])
self.assertEqual(doc._delta(), ({}, {'db_dict_field': 1}))
doc._changed_fields = []
doc.list_field = []
self.assertEqual(doc._get_changed_fields(), ['db_list_field'])
self.assertEqual(doc._delta(), ({}, {'db_list_field': 1}))
# Test it saves that data
doc = Doc()
doc.save()
doc.string_field = 'hello'
doc.int_field = 1
doc.dict_field = {'hello': 'world'}
doc.list_field = ['1', 2, {'hello': 'world'}]
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.string_field, 'hello')
self.assertEqual(doc.int_field, 1)
self.assertEqual(doc.dict_field, {'hello': 'world'})
self.assertEqual(doc.list_field, ['1', 2, {'hello': 'world'}])
def test_delta_recursive_db_field(self):
class Embedded(EmbeddedDocument):
string_field = StringField(db_field='db_string_field')
int_field = IntField(db_field='db_int_field')
dict_field = DictField(db_field='db_dict_field')
list_field = ListField(db_field='db_list_field')
class Doc(Document):
string_field = StringField(db_field='db_string_field')
int_field = IntField(db_field='db_int_field')
dict_field = DictField(db_field='db_dict_field')
list_field = ListField(db_field='db_list_field')
embedded_field = EmbeddedDocumentField(Embedded, db_field='db_embedded_field')
Doc.drop_collection()
doc = Doc()
doc.save()
doc = Doc.objects.first()
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(doc._delta(), ({}, {}))
embedded_1 = Embedded()
embedded_1.string_field = 'hello'
embedded_1.int_field = 1
embedded_1.dict_field = {'hello': 'world'}
embedded_1.list_field = ['1', 2, {'hello': 'world'}]
doc.embedded_field = embedded_1
self.assertEqual(doc._get_changed_fields(), ['db_embedded_field'])
embedded_delta = {
'db_string_field': 'hello',
'db_int_field': 1,
'db_dict_field': {'hello': 'world'},
'db_list_field': ['1', 2, {'hello': 'world'}]
}
self.assertEqual(doc.embedded_field._delta(), (embedded_delta, {}))
embedded_delta.update({
'_types': ['Embedded'],
'_cls': 'Embedded',
})
self.assertEqual(doc._delta(), ({'db_embedded_field': embedded_delta}, {}))
doc.save()
doc = doc.reload(10)
doc.embedded_field.dict_field = {}
self.assertEqual(doc._get_changed_fields(), ['db_embedded_field.db_dict_field'])
self.assertEqual(doc.embedded_field._delta(), ({}, {'db_dict_field': 1}))
self.assertEqual(doc._delta(), ({}, {'db_embedded_field.db_dict_field': 1}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.dict_field, {})
doc.embedded_field.list_field = []
self.assertEqual(doc._get_changed_fields(), ['db_embedded_field.db_list_field'])
self.assertEqual(doc.embedded_field._delta(), ({}, {'db_list_field': 1}))
self.assertEqual(doc._delta(), ({}, {'db_embedded_field.db_list_field': 1}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field, [])
embedded_2 = Embedded()
embedded_2.string_field = 'hello'
embedded_2.int_field = 1
embedded_2.dict_field = {'hello': 'world'}
embedded_2.list_field = ['1', 2, {'hello': 'world'}]
doc.embedded_field.list_field = ['1', 2, embedded_2]
self.assertEqual(doc._get_changed_fields(), ['db_embedded_field.db_list_field'])
self.assertEqual(doc.embedded_field._delta(), ({
'db_list_field': ['1', 2, {
'_cls': 'Embedded',
'_types': ['Embedded'],
'db_string_field': 'hello',
'db_dict_field': {'hello': 'world'},
'db_int_field': 1,
'db_list_field': ['1', 2, {'hello': 'world'}],
}]
}, {}))
self.assertEqual(doc._delta(), ({
'db_embedded_field.db_list_field': ['1', 2, {
'_cls': 'Embedded',
'_types': ['Embedded'],
'db_string_field': 'hello',
'db_dict_field': {'hello': 'world'},
'db_int_field': 1,
'db_list_field': ['1', 2, {'hello': 'world'}],
}]
}, {}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field[0], '1')
self.assertEqual(doc.embedded_field.list_field[1], 2)
for k in doc.embedded_field.list_field[2]._fields:
self.assertEqual(doc.embedded_field.list_field[2][k], embedded_2[k])
doc.embedded_field.list_field[2].string_field = 'world'
self.assertEqual(doc._get_changed_fields(), ['db_embedded_field.db_list_field.2.db_string_field'])
self.assertEqual(doc.embedded_field._delta(), ({'db_list_field.2.db_string_field': 'world'}, {}))
self.assertEqual(doc._delta(), ({'db_embedded_field.db_list_field.2.db_string_field': 'world'}, {}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field[2].string_field, 'world')
# Test multiple assignments
doc.embedded_field.list_field[2].string_field = 'hello world'
doc.embedded_field.list_field[2] = doc.embedded_field.list_field[2]
self.assertEqual(doc._get_changed_fields(), ['db_embedded_field.db_list_field'])
self.assertEqual(doc.embedded_field._delta(), ({
'db_list_field': ['1', 2, {
'_types': ['Embedded'],
'_cls': 'Embedded',
'db_string_field': 'hello world',
'db_int_field': 1,
'db_list_field': ['1', 2, {'hello': 'world'}],
'db_dict_field': {'hello': 'world'}}]}, {}))
self.assertEqual(doc._delta(), ({
'db_embedded_field.db_list_field': ['1', 2, {
'_types': ['Embedded'],
'_cls': 'Embedded',
'db_string_field': 'hello world',
'db_int_field': 1,
'db_list_field': ['1', 2, {'hello': 'world'}],
'db_dict_field': {'hello': 'world'}}
]}, {}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field[2].string_field, 'hello world')
# Test list native methods
doc.embedded_field.list_field[2].list_field.pop(0)
self.assertEqual(doc._delta(), ({'db_embedded_field.db_list_field.2.db_list_field': [2, {'hello': 'world'}]}, {}))
doc.save()
doc = doc.reload(10)
doc.embedded_field.list_field[2].list_field.append(1)
self.assertEqual(doc._delta(), ({'db_embedded_field.db_list_field.2.db_list_field': [2, {'hello': 'world'}, 1]}, {}))
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field[2].list_field, [2, {'hello': 'world'}, 1])
doc.embedded_field.list_field[2].list_field.sort(key=str)
doc.save()
doc = doc.reload(10)
self.assertEqual(doc.embedded_field.list_field[2].list_field, [1, 2, {'hello': 'world'}])
del(doc.embedded_field.list_field[2].list_field[2]['hello'])
self.assertEqual(doc._delta(), ({'db_embedded_field.db_list_field.2.db_list_field': [1, 2, {}]}, {}))
doc.save()
doc = doc.reload(10)
del(doc.embedded_field.list_field[2].list_field)
self.assertEqual(doc._delta(), ({}, {'db_embedded_field.db_list_field.2.db_list_field': 1}))
def test_save_only_changed_fields(self):
"""Ensure save only sets / unsets changed fields
"""
class User(self.Person):
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
user = User(name='Test User', age=30, active=True)
user.save()
user.reload()
# Simulated Race condition
same_person = self.Person.objects.get()
same_person.active = False
user.age = 21
user.save()
same_person.name = 'User'
same_person.save()
person = self.Person.objects.get()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
self.assertEqual(person.active, False)
def test_save_only_changed_fields_recursive(self):
"""Ensure save only sets / unsets changed fields
"""
class Comment(EmbeddedDocument):
published = BooleanField(default=True)
class User(self.Person):
comments_dict = DictField()
comments = ListField(EmbeddedDocumentField(Comment))
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
person = User(name='Test User', age=30, active=True)
person.comments.append(Comment())
person.save()
person.reload()
person = self.Person.objects.get()
self.assertTrue(person.comments[0].published)
person.comments[0].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments[0].published)
# Simple dict w
person.comments_dict['first_post'] = Comment()
person.save()
person = self.Person.objects.get()
self.assertTrue(person.comments_dict['first_post'].published)
person.comments_dict['first_post'].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments_dict['first_post'].published)
def test_delete(self):
"""Ensure that document may be deleted using the delete method.
"""
person = self.Person(name="Test User", age=30)
person.save()
self.assertEqual(len(self.Person.objects), 1)
person.delete()
self.assertEqual(len(self.Person.objects), 0)
def test_save_custom_id(self):
"""Ensure that a document may be saved with a custom _id.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
id='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_custom_pk(self):
"""Ensure that a document may be saved with a custom _id using pk alias.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
pk='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_list(self):
"""Ensure that a list field may be properly saved.
"""
class Comment(EmbeddedDocument):
content = StringField()
class BlogPost(Document):
content = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
tags = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost(content='Went for a walk today...')
post.tags = tags = ['fun', 'leisure']
comments = [Comment(content='Good for you'), Comment(content='Yay.')]
post.comments = comments
post.save()
collection = self.db[BlogPost._get_collection_name()]
post_obj = collection.find_one()
self.assertEqual(post_obj['tags'], tags)
for comment_obj, comment in zip(post_obj['comments'], comments):
self.assertEqual(comment_obj['content'], comment['content'])
BlogPost.drop_collection()
def test_list_search_by_embedded(self):
class User(Document):
username = StringField(required=True)
meta = {'allow_inheritance': False}
class Comment(EmbeddedDocument):
comment = StringField()
user = ReferenceField(User,
required=True)
meta = {'allow_inheritance': False}
class Page(Document):
comments = ListField(EmbeddedDocumentField(Comment))
meta = {'allow_inheritance': False,
'indexes': [
{'fields': ['comments.user']}
]}
User.drop_collection()
Page.drop_collection()
u1 = User(username="wilson")
u1.save()
u2 = User(username="rozza")
u2.save()
u3 = User(username="hmarr")
u3.save()
p1 = Page(comments = [Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world"),
Comment(user=u3, comment="Ping Pong"),
Comment(user=u1, comment="I like a beer")])
p1.save()
p2 = Page(comments = [Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world")])
p2.save()
p3 = Page(comments = [Comment(user=u3, comment="Its very good")])
p3.save()
p4 = Page(comments = [Comment(user=u2, comment="Heavy Metal song")])
p4.save()
self.assertEqual([p1, p2], list(Page.objects.filter(comments__user=u1)))
self.assertEqual([p1, p2, p4], list(Page.objects.filter(comments__user=u2)))
self.assertEqual([p1, p3], list(Page.objects.filter(comments__user=u3)))
def test_save_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
employee_obj = collection.find_one({'name': 'Test Employee'})
self.assertEqual(employee_obj['name'], 'Test Employee')
self.assertEqual(employee_obj['age'], 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(employee_obj['details']['position'], 'Developer')
def test_embedded_update_after_save(self):
"""
Test update of `EmbeddedDocumentField` attached to a newly saved
document.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_updating_an_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Test updating an embedded document
promoted_employee = Employee.objects.get(name='Test Employee')
promoted_employee.details.position = 'Senior Developer'
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.name, 'Test Employee')
self.assertEqual(promoted_employee.age, 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(promoted_employee.details.position, 'Senior Developer')
# Test removal
promoted_employee.details = None
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.details, None)
def test_mixins_dont_add_to_types(self):
class Mixin(object):
name = StringField()
class Person(Document, Mixin):
pass
Person.drop_collection()
self.assertEqual(Person._fields.keys(), ['name', 'id'])
Person(name="Rozza").save()
collection = self.db[Person._get_collection_name()]
obj = collection.find_one()
self.assertEqual(obj['_cls'], 'Person')
self.assertEqual(obj['_types'], ['Person'])
self.assertEqual(Person.objects.count(), 1)
Person.drop_collection()
def test_object_mixins(self):
class NameMixin(object):
name = StringField()
class Foo(EmbeddedDocument, NameMixin):
quantity = IntField()
self.assertEqual(['name', 'quantity'], sorted(Foo._fields.keys()))
class Bar(Document, NameMixin):
widgets = StringField()
self.assertEqual(['id', 'name', 'widgets'], sorted(Bar._fields.keys()))
def test_mixin_inheritance(self):
class BaseMixIn(object):
count = IntField()
data = StringField()
class DoubleMixIn(BaseMixIn):
comment = StringField()
class TestDoc(Document, DoubleMixIn):
age = IntField()
TestDoc.drop_collection()
t = TestDoc(count=12, data="test",
comment="great!", age=19)
t.save()
t = TestDoc.objects.first()
self.assertEqual(t.age, 19)
self.assertEqual(t.comment, "great!")
self.assertEqual(t.data, "test")
self.assertEqual(t.count, 12)
def test_save_reference(self):
"""Ensure that a document reference field may be saved in the database.
"""
class BlogPost(Document):
meta = {'collection': 'blogpost_1'}
content = StringField()
author = ReferenceField(self.Person)
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV today... how exciting.')
# Should only reference author when saving
post.author = author
post.save()
post_obj = BlogPost.objects.first()
# Test laziness
self.assertTrue(isinstance(post_obj._data['author'],
bson.DBRef))
self.assertTrue(isinstance(post_obj.author, self.Person))
self.assertEqual(post_obj.author.name, 'Test User')
# Ensure that the dereferenced object may be changed and saved
post_obj.author.age = 25
post_obj.author.save()
author = list(self.Person.objects(name='Test User'))[-1]
self.assertEqual(author.age, 25)
BlogPost.drop_collection()
def test_cannot_perform_joins_references(self):
class BlogPost(Document):
author = ReferenceField(self.Person)
author2 = GenericReferenceField()
def test_reference():
list(BlogPost.objects(author__name="test"))
self.assertRaises(InvalidQueryError, test_reference)
def test_generic_reference():
list(BlogPost.objects(author2__name="test"))
self.assertRaises(InvalidQueryError, test_generic_reference)
def test_duplicate_db_fields_raise_invalid_document_error(self):
"""Ensure a InvalidDocumentError is thrown if duplicate fields
declare the same db_field"""
def throw_invalid_document_error():
class Foo(Document):
name = StringField()
name2 = StringField(db_field='name')
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_invalid_son(self):
"""Raise an error if loading invalid data"""
class Occurrence(EmbeddedDocument):
number = IntField()
class Word(Document):
stem = StringField()
count = IntField(default=1)
forms = ListField(StringField(), default=list)
occurs = ListField(EmbeddedDocumentField(Occurrence), default=list)
def raise_invalid_document():
Word._from_son({'stem': [1,2,3], 'forms': 1, 'count': 'one', 'occurs': {"hello": None}})
self.assertRaises(InvalidDocumentError, raise_invalid_document)
def test_reverse_delete_rule_cascade_and_nullify(self):
"""Ensure that a referenced document is also deleted upon deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content = 'Watched some TV')
post.author = author
post.reviewer = reviewer
post.save()
reviewer.delete()
self.assertEqual(len(BlogPost.objects), 1) # No effect on the BlogPost
self.assertEqual(BlogPost.objects.get().reviewer, None)
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(len(BlogPost.objects), 0)
def test_reverse_delete_rule_cascade_and_nullify_complex_field(self):
"""Ensure that a referenced document is also deleted upon deletion for
complex fields.
"""
class BlogPost(Document):
content = StringField()
authors = ListField(ReferenceField(self.Person, reverse_delete_rule=CASCADE))
reviewers = ListField(ReferenceField(self.Person, reverse_delete_rule=NULLIFY))
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.authors = [author]
post.reviewers = [reviewer]
post.save()
# Deleting the reviewer should have no effect on the BlogPost
reviewer.delete()
self.assertEqual(len(BlogPost.objects), 1)
self.assertEqual(BlogPost.objects.get().reviewers, [])
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(len(BlogPost.objects), 0)
def test_two_way_reverse_delete_rule(self):
"""Ensure that Bi-Directional relationships work with
reverse_delete_rule
"""
class Bar(Document):
content = StringField()
foo = ReferenceField('Foo')
class Foo(Document):
content = StringField()
bar = ReferenceField(Bar)
Bar.register_delete_rule(Foo, 'bar', NULLIFY)
Foo.register_delete_rule(Bar, 'foo', NULLIFY)
Bar.drop_collection()
Foo.drop_collection()
b = Bar(content="Hello")
b.save()
f = Foo(content="world", bar=b)
f.save()
b.foo = f
b.save()
f.delete()
self.assertEqual(len(Bar.objects), 1) # No effect on the BlogPost
self.assertEqual(Bar.objects.get().foo, None)
def test_invalid_reverse_delete_rules_raise_errors(self):
def throw_invalid_document_error():
class Blog(Document):
content = StringField()
authors = MapField(ReferenceField(self.Person, reverse_delete_rule=CASCADE))
reviewers = DictField(field=ReferenceField(self.Person, reverse_delete_rule=NULLIFY))
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def throw_invalid_document_error_embedded():
class Parents(EmbeddedDocument):
father = ReferenceField('Person', reverse_delete_rule=DENY)
mother = ReferenceField('Person', reverse_delete_rule=DENY)
self.assertRaises(InvalidDocumentError, throw_invalid_document_error_embedded)
def test_reverse_delete_rule_cascade_recurs(self):
"""Ensure that a chain of documents is also deleted upon cascaded
deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
class Comment(Document):
text = StringField()
post = ReferenceField(BlogPost, reverse_delete_rule=CASCADE)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content = 'Watched some TV')
post.author = author
post.save()
comment = Comment(text = 'Kudos.')
comment.post = post
comment.save()
# Delete the Person, which should lead to deletion of the BlogPost, and,
# recursively to the Comment, too
author.delete()
self.assertEqual(len(Comment.objects), 0)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
def test_reverse_delete_rule_deny(self):
"""Ensure that a document cannot be referenced if there are still
documents referring to it.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=DENY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content = 'Watched some TV')
post.author = author
post.save()
# Delete the Person should be denied
self.assertRaises(OperationError, author.delete) # Should raise denied error
self.assertEqual(len(BlogPost.objects), 1) # No objects may have been deleted
self.assertEqual(len(self.Person.objects), 1)
# Other users, that don't have BlogPosts must be removable, like normal
author = self.Person(name='Another User')
author.save()
self.assertEqual(len(self.Person.objects), 2)
author.delete()
self.assertEqual(len(self.Person.objects), 1)
self.Person.drop_collection()
BlogPost.drop_collection()
def subclasses_and_unique_keys_works(self):
class A(Document):
pass
class B(A):
foo = BooleanField(unique=True)
A.drop_collection()
B.drop_collection()
A().save()
A().save()
B(foo=True).save()
self.assertEqual(A.objects.count(), 2)
self.assertEqual(B.objects.count(), 1)
A.drop_collection()
B.drop_collection()
def test_document_hash(self):
"""Test document in list, dict, set
"""
class User(Document):
pass
class BlogPost(Document):
pass
# Clear old datas
User.drop_collection()
BlogPost.drop_collection()
u1 = User.objects.create()
u2 = User.objects.create()
u3 = User.objects.create()
u4 = User() # New object
b1 = BlogPost.objects.create()
b2 = BlogPost.objects.create()
# in List
all_user_list = list(User.objects.all())
self.assertTrue(u1 in all_user_list)
self.assertTrue(u2 in all_user_list)
self.assertTrue(u3 in all_user_list)
self.assertFalse(u4 in all_user_list) # New object
self.assertFalse(b1 in all_user_list) # Other object
self.assertFalse(b2 in all_user_list) # Other object
# in Dict
all_user_dic = {}
for u in User.objects.all():
all_user_dic[u] = "OK"
self.assertEqual(all_user_dic.get(u1, False), "OK" )
self.assertEqual(all_user_dic.get(u2, False), "OK" )
self.assertEqual(all_user_dic.get(u3, False), "OK" )
self.assertEqual(all_user_dic.get(u4, False), False ) # New object
self.assertEqual(all_user_dic.get(b1, False), False ) # Other object
self.assertEqual(all_user_dic.get(b2, False), False ) # Other object
# in Set
all_user_set = set(User.objects.all())
self.assertTrue(u1 in all_user_set )
def test_picklable(self):
pickle_doc = PickleTest(number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleEmbedded()
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
resurrected.string = "Two"
resurrected.save()
pickle_doc = pickle_doc.reload()
self.assertEqual(resurrected, pickle_doc)
def test_throw_invalid_document_error(self):
# test handles people trying to upsert
def throw_invalid_document_error():
class Blog(Document):
validate = DictField()
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_mutating_documents(self):
class B(EmbeddedDocument):
field1 = StringField(default='field1')
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
A.drop_collection()
a = A()
a.save()
a.reload()
self.assertEqual(a.b.field1, 'field1')
class C(EmbeddedDocument):
c_field = StringField(default='cfield')
class B(EmbeddedDocument):
field1 = StringField(default='field1')
field2 = EmbeddedDocumentField(C, default=lambda: C())
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
a = A.objects()[0]
a.b.field2.c_field = 'new value'
a.save()
a.reload()
self.assertEqual(a.b.field2.c_field, 'new value')
def test_can_save_false_values(self):
"""Ensures you can save False values on save"""
class Doc(Document):
foo = StringField()
archived = BooleanField(default=False, required=True)
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_can_save_false_values_dynamic(self):
"""Ensures you can save False values on dynamic docs"""
class Doc(DynamicDocument):
foo = StringField()
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_do_not_save_unchanged_references(self):
"""Ensures cascading saves dont auto update"""
class Job(Document):
name = StringField()
class Person(Document):
name = StringField()
age = IntField()
job = ReferenceField(Job)
Job.drop_collection()
Person.drop_collection()
job = Job(name="Job 1")
# job should not have any changed fields after the save
job.save()
person = Person(name="name", age=10, job=job)
from pymongo.collection import Collection
orig_update = Collection.update
try:
def fake_update(*args, **kwargs):
self.fail("Unexpected update for %s" % args[0].name)
return orig_update(*args, **kwargs)
Collection.update = fake_update
person.save()
finally:
Collection.update = orig_update
def test_db_alias_tests(self):
""" DB Alias tests """
# mongoenginetest - Is default connection alias from setUp()
# Register Aliases
register_connection('testdb-1', 'mongoenginetest2')
register_connection('testdb-2', 'mongoenginetest3')
register_connection('testdb-3', 'mongoenginetest4')
class User(Document):
name = StringField()
meta = {"db_alias": "testdb-1"}
class Book(Document):
name = StringField()
meta = {"db_alias": "testdb-2"}
# Drops
User.drop_collection()
Book.drop_collection()
# Create
bob = User.objects.create(name="Bob")
hp = Book.objects.create(name="Harry Potter")
# Selects
self.assertEqual(User.objects.first(), bob)
self.assertEqual(Book.objects.first(), hp)
# DeReference
class AuthorBooks(Document):
author = ReferenceField(User)
book = ReferenceField(Book)
meta = {"db_alias": "testdb-3"}
# Drops
AuthorBooks.drop_collection()
ab = AuthorBooks.objects.create(author=bob, book=hp)
# select
self.assertEqual(AuthorBooks.objects.first(), ab)
self.assertEqual(AuthorBooks.objects.first().book, hp)
self.assertEqual(AuthorBooks.objects.first().author, bob)
self.assertEqual(AuthorBooks.objects.filter(author=bob).first(), ab)
self.assertEqual(AuthorBooks.objects.filter(book=hp).first(), ab)
# DB Alias
self.assertEqual(User._get_db(), get_db("testdb-1"))
self.assertEqual(Book._get_db(), get_db("testdb-2"))
self.assertEqual(AuthorBooks._get_db(), get_db("testdb-3"))
# Collections
self.assertEqual(User._get_collection(), get_db("testdb-1")[User._get_collection_name()])
self.assertEqual(Book._get_collection(), get_db("testdb-2")[Book._get_collection_name()])
self.assertEqual(AuthorBooks._get_collection(), get_db("testdb-3")[AuthorBooks._get_collection_name()])
def test_db_alias_propagates(self):
"""db_alias propagates?
"""
class A(Document):
name = StringField()
meta = {"db_alias": "testdb-1", "allow_inheritance": True}
class B(A):
pass
self.assertEqual('testdb-1', B._meta.get('db_alias'))
def test_db_ref_usage(self):
""" DB Ref usage in dict_fields"""
class User(Document):
name = StringField()
class Book(Document):
name = StringField()
author = ReferenceField(User)
extra = DictField()
meta = {
'ordering': ['+name']
}
def __unicode__(self):
return self.name
def __str__(self):
return self.name
# Drops
User.drop_collection()
Book.drop_collection()
# Authors
bob = User.objects.create(name="Bob")
jon = User.objects.create(name="Jon")
# Redactors
karl = User.objects.create(name="Karl")
susan = User.objects.create(name="Susan")
peter = User.objects.create(name="Peter")
# Bob
Book.objects.create(name="1", author=bob, extra={"a": bob.to_dbref(), "b": [karl.to_dbref(), susan.to_dbref()]})
Book.objects.create(name="2", author=bob, extra={"a": bob.to_dbref(), "b": karl.to_dbref()} )
Book.objects.create(name="3", author=bob, extra={"a": bob.to_dbref(), "c": [jon.to_dbref(), peter.to_dbref()]})
Book.objects.create(name="4", author=bob)
# Jon
Book.objects.create(name="5", author=jon)
Book.objects.create(name="6", author=peter)
Book.objects.create(name="7", author=jon)
Book.objects.create(name="8", author=jon)
Book.objects.create(name="9", author=jon, extra={"a": peter.to_dbref()})
# Checks
self.assertEqual(u",".join([str(b) for b in Book.objects.all()] ) , "1,2,3,4,5,6,7,8,9" )
# bob related books
self.assertEqual(u",".join([str(b) for b in Book.objects.filter(
Q(extra__a=bob ) |
Q(author=bob) |
Q(extra__b=bob))]) ,
"1,2,3,4")
# Susan & Karl related books
self.assertEqual(u",".join([str(b) for b in Book.objects.filter(
Q(extra__a__all=[karl, susan] ) |
Q(author__all=[karl, susan ] ) |
Q(extra__b__all=[karl.to_dbref(), susan.to_dbref()] )
) ] ) , "1" )
# $Where
self.assertEqual(u",".join([str(b) for b in Book.objects.filter(
__raw__={
"$where": """
function(){
return this.name == '1' ||
this.name == '2';}"""
}
) ]), "1,2")
class ValidatorErrorTest(unittest.TestCase):
def test_to_dict(self):
"""Ensure a ValidationError handles error to_dict correctly.
"""
error = ValidationError('root')
self.assertEqual(error.to_dict(), {})
# 1st level error schema
error.errors = {'1st': ValidationError('bad 1st'), }
self.assertTrue('1st' in error.to_dict())
self.assertEqual(error.to_dict()['1st'], 'bad 1st')
# 2nd level error schema
error.errors = {'1st': ValidationError('bad 1st', errors={
'2nd': ValidationError('bad 2nd'),
})}
self.assertTrue('1st' in error.to_dict())
self.assertTrue(isinstance(error.to_dict()['1st'], dict))
self.assertTrue('2nd' in error.to_dict()['1st'])
self.assertEqual(error.to_dict()['1st']['2nd'], 'bad 2nd')
# moar levels
error.errors = {'1st': ValidationError('bad 1st', errors={
'2nd': ValidationError('bad 2nd', errors={
'3rd': ValidationError('bad 3rd', errors={
'4th': ValidationError('Inception'),
}),
}),
})}
self.assertTrue('1st' in error.to_dict())
self.assertTrue('2nd' in error.to_dict()['1st'])
self.assertTrue('3rd' in error.to_dict()['1st']['2nd'])
self.assertTrue('4th' in error.to_dict()['1st']['2nd']['3rd'])
self.assertEqual(error.to_dict()['1st']['2nd']['3rd']['4th'],
'Inception')
self.assertEqual(error.message, "root(2nd.3rd.4th.Inception: ['1st'])")
def test_model_validation(self):
class User(Document):
username = StringField(primary_key=True)
name = StringField(required=True)
try:
User().validate()
except ValidationError, e:
expected_error_message = """ValidationError(Field is required: ['username', 'name'])"""
self.assertEqual(e.message, expected_error_message)
self.assertEqual(e.to_dict(), {
'username': 'Field is required',
'name': 'Field is required'})
def test_spaces_in_keys(self):
class Embedded(DynamicEmbeddedDocument):
pass
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
setattr(doc, 'hello world', 1)
doc.save()
one = Doc.objects.filter(**{'hello world': 1}).count()
self.assertEqual(1, one)
def test_fields_rewrite(self):
class BasePerson(Document):
name = StringField()
age = IntField()
meta = {'abstract': True}
class Person(BasePerson):
name = StringField(required=True)
p = Person(age=15)
self.assertRaises(ValidationError, p.validate)
def test_cascaded_save_wrong_reference(self):
class ADocument(Document):
val = IntField()
class BDocument(Document):
a = ReferenceField(ADocument)
ADocument.drop_collection()
BDocument.drop_collection()
a = ADocument()
a.val = 15
a.save()
b = BDocument()
b.a = a
b.save()
a.delete()
b = BDocument.objects.first()
b.save(cascade=True)
def test_shard_key(self):
class LogEntry(Document):
machine = StringField()
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
def test_shard_key_primary(self):
class LogEntry(Document):
machine = StringField(primary_key=True)
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
if __name__ == '__main__':
unittest.main()
| depop/mongoengine-stripped | tests/test_document.py | Python | mit | 116,392 | [
"exciting"
] | a974f401d21fa1c29f0229bbc4f8cc5e550b088bddcc335d0dda9f6eaf6ba287 |
#!/usr/bin/env python
'''
TDDFT with k-point sampling or at an individual k-point
(This feature is in testing. We observe numerical stability problem in TDDFT
diagonalization.)
'''
from pyscf.pbc import gto
from pyscf.pbc import scf
from pyscf.pbc import df
from pyscf.pbc import tdscf
cell = gto.Cell()
cell.unit = 'B'
cell.atom = '''
C 0. 0. 0.
C 1.68506879 1.68506879 1.68506879
'''
cell.a = '''
0. 3.37013758 3.37013758
3.37013758 0. 3.37013758
3.37013758 3.37013758 0.
'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.build()
mf = scf.KRHF(cell, cell.make_kpts([2,2,2]))
mf.run()
td = tdscf.KTDA(mf)
td.nstates = 5
td.verbose = 5
print(td.kernel()[0] * 27.2114)
td = tdscf.KTDDFT(mf)
td.nstates = 5
td.verbose = 5
print(td.kernel()[0] * 27.2114)
mf = scf.RHF(cell)
mf.kernel()
td = tdscf.TDA(mf)
td.kernel()
# TODO:
#kpt = cell.get_abs_kpts([0.25, 0.25, 0.25])
#mf = scf.RHF(cell, kpt=kpt)
#mf.kernel()
#td = tdscf.TDA(mf)
#td.kernel()
| gkc1000/pyscf | examples/pbc/22-k_points_tddft.py | Python | apache-2.0 | 1,002 | [
"PySCF"
] | d91f8efa1bdfbb3a739cd1afaa21bfb78551405e1ca0f728b7317dde1d30cfaf |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2013, 2014 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/Freeseer/freeseer/
import ctypes
import os
import shutil
import sys
import unicodedata
def format_size(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def get_free_space(directory):
""" Return directory free space (in human readable form) """
if sys.platform in ["win32", "cygwin"]:
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(directory),
None, None, ctypes.pointer(free_bytes))
space = free_bytes.value
else:
space = os.statvfs(directory).f_bfree * os.statvfs(directory).f_frsize
return format_size(space)
###
### Filename related functions
###
def get_record_name(extension, presentation=None, filename=None, path="."):
"""Returns the filename to use when recording.
If a record name with a .None extension is returned, the record name
will just be ignored by the output plugin (e.g. Video Preview plugin).
Function will return None if neither presentation nor filename is passed.
"""
if presentation is not None:
recordname = make_record_name(presentation)
elif filename is not None:
recordname = filename
else:
return None
count = 0
tempname = recordname
# Add a number to the end of a duplicate record name so we don't
# overwrite existing files
while(os.path.exists(os.path.join(path, "%s.%s" % (tempname, extension)))):
tempname = "{0}-{1}".format(recordname, count)
count += 1
recordname = "%s.%s" % (tempname, extension)
return recordname
def make_record_name(presentation):
"""Create an 'EVENT-ROOM-SPEAKER-TITLE' record name using presentation metadata."""
tags = [
make_shortname(presentation.event),
make_shortname(presentation.room),
make_shortname(presentation.speaker),
make_shortname(presentation.title),
]
record_name = unicode('-'.join(tag for tag in tags if tag))
# Convert unicode filenames to their equivalent ascii so that
# we don't run into issues with gstreamer or filesystems.
safe_record_name = unicodedata.normalize('NFKD', record_name).encode('ascii', 'ignore')
return safe_record_name or 'default'
def make_shortname(string):
"""Returns the first 6 characters of a string in uppercase.
Strip out non alpha-numeric characters, spaces, and most punctuation.
"""
bad_chars = set("!@#$%^&*()+=|:;{}[]',? <>~`/\\")
string = "".join(ch for ch in string if ch not in bad_chars)
return string[0:6].upper()
###
### Handy functions for reseting Freeseer configuration
###
def reset(configdir):
"""Deletes the Freeseer configuration directory"""
if validate_configdir(configdir):
print('This will wipe out your freeseer configuration directory.')
if confirm_yes() is True:
shutil.rmtree(configdir)
else:
print("%s is not a invalid configuration directory." % configdir)
def reset_configuration(configdir, profile='default'):
"""Deletes the Freeseer configuration files freeseer.conf and plugin.conf"""
if profile is None:
profile = 'default'
if validate_configdir(configdir):
freeseer_conf = os.path.join(configdir, 'profiles', profile, 'freeseer.conf')
plugin_conf = os.path.join(configdir, 'profiles', profile, 'plugin.conf')
if os.path.exists(freeseer_conf):
os.remove(freeseer_conf)
if os.path.exists(plugin_conf):
os.remove(plugin_conf)
else:
print("%s is not a invalid configuration directory." % configdir)
def reset_database(configdir, profile='default'):
"""Deletes the Freeseer database file"""
if profile is None:
profile = 'default'
if validate_configdir(configdir):
dbfile = os.path.join(configdir, 'profiles', profile, 'presentations.db')
if os.path.exists(dbfile):
os.remove(dbfile)
else:
print("%s is not a invalid configuration directory." % configdir)
def validate_configdir(configdir):
"""Validate that the configdir is not one of the blacklisted directories"""
if (configdir and configdir != '/' and
configdir != '~' and
configdir != os.path.abspath(os.path.expanduser('~'))):
return True
return False
def confirm_yes():
"""Prompts the user to confirm by typing 'yes' in response"""
confirm = raw_input("Enter 'yes' to confirm: ")
if confirm == 'yes':
return True
return False
| Freeseer/freeseer | src/freeseer/framework/util.py | Python | gpl-3.0 | 5,638 | [
"VisIt"
] | 684defdf93e7d606215dd4131828a7aa202bcb9d60546531d4be74b38cf7e441 |
"""Payload management for sending Ansible files and test content to other systems (VMs, containers)."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import atexit
import os
import stat
import tarfile
import tempfile
import time
from . import types as t
from .config import (
IntegrationConfig,
ShellConfig,
)
from .util import (
display,
ANSIBLE_SOURCE_ROOT,
remove_tree,
is_subdir,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
# improve performance by disabling uid/gid lookups
tarfile.pwd = None
tarfile.grp = None
# this bin symlink map must exactly match the contents of the bin directory
# it is necessary for payload creation to reconstruct the bin directory when running ansible-test from an installed version of ansible
ANSIBLE_BIN_SYMLINK_MAP = {
'ansible': '../lib/ansible/cli/scripts/ansible_cli_stub.py',
'ansible-config': 'ansible',
'ansible-connection': '../lib/ansible/cli/scripts/ansible_connection_cli_stub.py',
'ansible-console': 'ansible',
'ansible-doc': 'ansible',
'ansible-galaxy': 'ansible',
'ansible-inventory': 'ansible',
'ansible-playbook': 'ansible',
'ansible-pull': 'ansible',
'ansible-test': '../test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py',
'ansible-vault': 'ansible',
}
def create_payload(args, dst_path): # type: (CommonConfig, str) -> None
"""Create a payload for delegation."""
if args.explain:
return
files = list(data_context().ansible_source)
filters = {}
def make_executable(tar_info): # type: (tarfile.TarInfo) -> t.Optional[tarfile.TarInfo]
"""Make the given file executable."""
tar_info.mode |= stat.S_IXUSR | stat.S_IXOTH | stat.S_IXGRP
return tar_info
if not ANSIBLE_SOURCE_ROOT:
# reconstruct the bin directory which is not available when running from an ansible install
files.extend(create_temporary_bin_files(args))
filters.update(dict((path[3:], make_executable) for path in ANSIBLE_BIN_SYMLINK_MAP.values() if path.startswith('../')))
if not data_context().content.is_ansible:
# exclude unnecessary files when not testing ansible itself
files = [f for f in files if
is_subdir(f[1], 'bin/') or
is_subdir(f[1], 'lib/ansible/') or
(is_subdir(f[1], 'test/lib/ansible_test/') and not is_subdir(f[1], 'test/lib/ansible_test/tests/'))]
if not isinstance(args, (ShellConfig, IntegrationConfig)):
# exclude built-in ansible modules when they are not needed
files = [f for f in files if not is_subdir(f[1], 'lib/ansible/modules/') or f[1] == 'lib/ansible/modules/__init__.py']
collection_layouts = data_context().create_collection_layouts()
for layout in collection_layouts:
# include files from each collection in the same collection root as the content being tested
files.extend((os.path.join(layout.root, path), os.path.join(layout.collection.directory, path)) for path in layout.all_files())
for callback in data_context().payload_callbacks:
callback(files)
# maintain predictable file order
files = sorted(set(files))
display.info('Creating a payload archive containing %d files...' % len(files), verbosity=1)
start = time.time()
with tarfile.TarFile.gzopen(dst_path, mode='w', compresslevel=4) as tar:
for src, dst in files:
display.info('%s -> %s' % (src, dst), verbosity=4)
tar.add(src, dst, filter=filters.get(dst))
duration = time.time() - start
payload_size_bytes = os.path.getsize(dst_path)
display.info('Created a %d byte payload archive containing %d files in %d seconds.' % (payload_size_bytes, len(files), duration), verbosity=1)
def create_temporary_bin_files(args): # type: (CommonConfig) -> t.Tuple[t.Tuple[str, str], ...]
"""Create a temporary ansible bin directory populated using the symlink map."""
if args.explain:
temp_path = '/tmp/ansible-tmp-bin'
else:
temp_path = tempfile.mkdtemp(prefix='ansible', suffix='bin')
atexit.register(remove_tree, temp_path)
for name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
path = os.path.join(temp_path, name)
os.symlink(dest, path)
return tuple((os.path.join(temp_path, name), os.path.join('bin', name)) for name in sorted(ANSIBLE_BIN_SYMLINK_MAP))
| Dhivyap/ansible | test/lib/ansible_test/_internal/payload.py | Python | gpl-3.0 | 4,516 | [
"Galaxy"
] | 59f06d54c3ec8d24740dadd581b5b0d7599679f8c3cf7d1e3c4159c3ac5d0573 |
#!/usr/bin/env python
from __future__ import division
import os
import itertools as it
import pytest
import numpy as np
import minipnm as mini
def test_print():
network = mini.Delaunay.random(100)
print( network )
def test_prune():
delaunay = mini.Delaunay(np.random.rand(100,3))
original_size = delaunay.size
changed = delaunay - ~delaunay.boundary()
new_size = changed.size
assert type(delaunay) is type(changed)
assert np.greater(original_size, new_size).all()
def test_subtract_all():
network = mini.Cubic([3,3,3])
reduced = network.copy()
reduced.prune(network.indexes!=-1)
assert set(network.keys()) == set(reduced.keys())
assert reduced.size == 0
assert all([value.size==0 for value in reduced.values()])
rereduced = reduced.copy()
rereduced.prune(network.indexes!=-1)
assert set(network.keys()) == set(rereduced.keys())
assert rereduced.size == 0
assert all(value.size==0 for value in rereduced.values())
def test_render():
try:
import vtk
except ImportError:
return
network = mini.Delaunay.random(100)
scene = mini.Scene()
network.render(scene=scene)
def test_handling_of_pseudo_array_input():
network = mini.Network()
with pytest.raises(TypeError):
network.points = None, None, None
network.points = [(1,1,1), [2,2,2], np.array([3,3,3])]
network.pairs = (0,1)
network.pairs = [(1,2), [2,0]]
def test_merge():
network = mini.Delaunay.random(100)
inside, outside = network.split(network.boundary())
(inside | outside)
def test_qhull_coplanar():
points = np.random.rand(100,3)
points.T[2] = 0
network = mini.Delaunay(points)
network.boundary()
def test_lengths():
# create a voxelized sphere. black (ones, vs. zeros) is void.
N = 13
im = np.ones([N,N,N])
for i in [i for i, c in np.ndenumerate(im) if np.linalg.norm(np.subtract(i, N/2-0.5))>N/2.5]:
im[i] = 0
def disable_test_save_and_load():
try:
original = mini.Cubic([20,20,20])
mini.save(original)
copy = mini.load('Cubic.npz')
assert type(original) is type(copy)
for key, value in original.items():
np.testing.assert_allclose(copy[key], value)
finally:
os.system("rm Cubic.npz")
def test_clone():
original = mini.Cubic([5,5,5])
copy = original.copy()
assert type(original) is type(copy)
unmatched = set(original.keys()) ^ set(copy.keys())
assert not unmatched
for key, value in original.items():
np.testing.assert_allclose(value, copy[key])
if __name__ == '__main__':
errors = pytest.main()
os.system("find . -name '*.pyc' -delete")
| RodericDay/MiniPNM | test_minipnm.py | Python | mit | 2,708 | [
"VTK"
] | 9e6e5421b26cc7c39958749bd0f4f69dd574a35a48377647ab892c8ea336d564 |
# Copyright (C) 2011, 2012, 2013, 2014, 2015, 2016, 2017 David Maxwell and Constantine Khroulev
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Module containing classes managing SSA forward runs, inverse SSA
solves, and iteration reporting."""
import PISM
import math
from PISM.logging import logMessage
class SSAForwardRun(PISM.ssa.SSARun):
"""Subclass of :class:`PISM.ssa.SSAFromInputFile` where the underlying SSA implementation is an
:cpp:class:`IP_SSATaucForwardProblem` or :cpp:class:`IP_SSAHardavForwardProblem`.
It is responsible for putting together a :class:`PISM.model.ModelData` containing the auxilliary data
needed for solving the SSA (:cpp:class:`IceModelVec`'s, :cpp:class:`EnthalpyConverter`, etc.) as well
as the instance of :cpp:class:`IP_SSATaucForwardProblem` that will solve the SSA repeatedly in the course
of solving an inverse problem. This class is intended to be subclassed by test cases where the data
is not provided from an input file. See also :class:`SSAForwardRunFromInputFile`."""
def __init__(self, design_var):
PISM.ssa.SSARun.__init__(self)
assert(design_var in ssa_forward_problems.keys())
self.grid = None
self.config = PISM.Context().config
self.design_var = design_var
self.design_var_param = createDesignVariableParam(self.config, self.design_var)
self.is_regional = False
def designVariable(self):
""":returns: String description of the design variable of the forward problem (e.g. 'tauc' or 'hardness')"""
return self.design_var
def designVariableParameterization(self):
""":returns: Object that performs zeta->design variable transformation."""
return self.design_var_param
def _setFromOptions(self):
"""Initialize internal parameters based on command-line flags. Called from :meth:`PISM.ssa.SSARun.setup`."""
self.is_regional = PISM.optionsFlag("-regional", "regional mode")
def _constructSSA(self):
"""Returns an instance of :cpp:class:`IP_SSATaucForwardProblem` rather than
a basic :cpp:class:`SSAFEM` or :cpp:class:`SSAFD`. Called from :meth:`PISM.ssa.SSARun.setup`."""
md = self.modeldata
return createSSAForwardProblem(md.grid, md.enthalpyconverter, self.design_var_param, self.design_var)
def _initSSA(self):
"""One-time initialization of the :cpp:class:`IP_SSATaucForwardProblem`. Called from :meth:`PISM.ssa.SSARun.setup`."""
# init() will cache the values of the coefficeints at
# quadrature points once here. Subsequent solves will then not
# need to cache these values.
self.ssa.init()
class SSAForwardRunFromInputFile(SSAForwardRun):
"""Subclass of :class:`SSAForwardRun` where the vector data
for the run is provided in an input :file:`.nc` file."""
def __init__(self, input_filename, inv_data_filename, design_var):
"""
:param input_filename: :file:`.nc` file containing generic PISM model data.
:param inv_data_filename: :file:`.nc` file containing data specific to inversion (e.g. observed SSA velocities).
"""
SSAForwardRun.__init__(self, design_var)
self.input_filename = input_filename
self.inv_data_filename = inv_data_filename
def _initGrid(self):
"""Initialize grid size and periodicity. Called from :meth:`PISM.ssa.SSARun.setup`."""
if self.is_regional:
registration = PISM.CELL_CORNER
else:
registration = PISM.CELL_CENTER
ctx = PISM.Context().ctx
pio = PISM.PIO(ctx.com(), "netcdf3", self.input_filename, PISM.PISM_READONLY)
self.grid = PISM.IceGrid.FromFile(ctx, pio, "enthalpy", registration)
pio.close()
def _initPhysics(self):
"""Override of :meth:`SSARun._initPhysics` that sets the physics based on command-line flags."""
config = self.config
enthalpyconverter = PISM.EnthalpyConverter(config)
if PISM.OptionBool("-ssa_glen", "SSA flow law Glen exponent"):
config.set_string("stress_balance.ssa.flow_law", "isothermal_glen")
config.scalar_from_option("flow_law.isothermal_Glen.ice_softness", "ice_softness")
else:
config.set_string("stress_balance.ssa.flow_law", "gpbld")
self.modeldata.setPhysics(enthalpyconverter)
def _initSSACoefficients(self):
"""Reads SSA coefficients from the input file. Called from :meth:`PISM.ssa.SSARun.setup`."""
self._allocStdSSACoefficients()
# Read PISM SSA related state variables
#
# Hmmm. A lot of code duplication with SSAFromInputFile._initSSACoefficients.
vecs = self.modeldata.vecs
thickness = vecs.land_ice_thickness
bed = vecs.bedrock_altitude
enthalpy = vecs.enthalpy
mask = vecs.mask
surface = vecs.surface_altitude
# Read in the PISM state variables that are used directly in the SSA solver
for v in [thickness, bed, enthalpy]:
v.regrid(self.input_filename, True)
# variables mask and surface are computed from the geometry previously read
sea_level = 0 # FIXME setFromOption?
gc = PISM.GeometryCalculator(self.config)
gc.compute(sea_level, bed, thickness, mask, surface)
grid = self.grid
config = self.modeldata.config
# Compute yield stress from PISM state variables
# (basal melt rate, tillphi, and basal water height) if they are available
file_has_inputs = (PISM.util.fileHasVariable(self.input_filename, 'bmelt') and
PISM.util.fileHasVariable(self.input_filename, 'tillwat') and
PISM.util.fileHasVariable(self.input_filename, 'tillphi'))
if file_has_inputs:
bmr = PISM.model.createBasalMeltRateVec(grid)
tillphi = PISM.model.createTillPhiVec(grid)
tillwat = PISM.model.createBasalWaterVec(grid)
for v in [bmr, tillphi, tillwat]:
v.regrid(self.input_filename, True)
vecs.add(v)
# hydrology models use cell areas:
cell_area = PISM.model.createCellAreaVec(grid)
if PISM.util.fileHasVariable(self.input_filename, 'cell_area'):
cell_area.regrid(self.input_filename, True)
vecs.add(cell_area)
# The SIA model might need the age field.
if self.config.get_boolean("age.enabled"):
vecs.age.regrid(self.input_filename, True)
hydrology_model = config.get_string("hydrology.model")
if hydrology_model == "null":
subglacial_hydrology = PISM.NullTransportHydrology(grid)
elif hydrology_model == "routing":
subglacial_hydrology = PISM.RoutingHydrology(grid)
elif hydrology_model == "distributed":
subglacial_hydrology = PISM.DistributedHydrology(grid)
if self.is_regional:
yieldstress = PISM.RegionalDefaultYieldStress(self.modeldata.grid, subglacial_hydrology)
else:
yieldstress = PISM.MohrCoulombYieldStress(self.modeldata.grid, subglacial_hydrology)
# make sure vecs is locked!
subglacial_hydrology.init()
yieldstress.init()
yieldstress.basal_material_yield_stress(vecs.tauc)
elif PISM.util.fileHasVariable(self.input_filename, 'tauc'):
vecs.tauc.regrid(self.input_filename, critical=True)
if PISM.util.fileHasVariable(self.input_filename, 'ssa_driving_stress_x'):
vecs.add(PISM.model.createDrivingStressXVec(self.grid))
vecs.ssa_driving_stress_x.regrid(self.input_filename, critical=True)
if PISM.util.fileHasVariable(self.input_filename, 'ssa_driving_stress_y'):
vecs.add(PISM.model.createDrivingStressYVec(self.grid))
vecs.ssa_driving_stress_y.regrid(self.input_filename, critical=True)
# read in the fractional floatation mask
vecs.add(PISM.model.createGroundingLineMask(self.grid))
vecs.gl_mask.regrid(self.input_filename, critical=False, default_value=0.0) # set to zero if not found
if self.is_regional:
vecs.add(PISM.model.createNoModelMaskVec(self.grid), 'no_model_mask')
vecs.no_model_mask.regrid(self.input_filename, True)
vecs.add(vecs.surface_altitude, 'usurfstore')
if self.config.get_boolean('stress_balance.ssa.dirichlet_bc'):
vecs.add(PISM.model.create2dVelocityVec(self.grid, name='_ssa_bc', desc='SSA velocity boundary condition', intent='intent'), "vel_ssa_bc")
has_u_ssa_bc = PISM.util.fileHasVariable(self.input_filename, 'u_ssa_bc')
has_v_ssa_bc = PISM.util.fileHasVariable(self.input_filename, 'v_ssa_bc')
if (not has_u_ssa_bc) or (not has_v_ssa_bc):
PISM.verbPrintf(2, self.grid.com, "Input file '%s' missing Dirichlet boundary data u/v_ssa_bc; using zero default instead." % self.input_filename)
vecs.vel_ssa_bc.set(0.)
else:
vecs.vel_ssa_bc.regrid(self.input_filename, True)
if self.is_regional:
vecs.add(vecs.no_model_mask, 'bc_mask')
else:
vecs.add(PISM.model.createBCMaskVec(self.grid), 'bc_mask')
bc_mask_name = vecs.bc_mask.metadata().get_string("short_name")
if PISM.util.fileHasVariable(self.input_filename, bc_mask_name):
vecs.bc_mask.regrid(self.input_filename, True)
else:
PISM.verbPrintf(2, self.grid.com, "Input file '%s' missing Dirichlet location mask '%s'. Default to no Dirichlet locations." % (self.input_filename, bc_mask_name))
vecs.bc_mask.set(0)
if PISM.util.fileHasVariable(self.inv_data_filename, 'vel_misfit_weight'):
vecs.add(PISM.model.createVelocityMisfitWeightVec(self.grid))
vecs.vel_misfit_weight.regrid(self.inv_data_filename, True)
class InvSSASolver(object):
"""Abstract base class for SSA inverse problem solvers."""
def __init__(self, ssarun, method):
"""
:param ssarun: The :class:`PISM.invert.ssa.SSAForwardRun` defining the forward problem.
:param method: String describing the actual algorithm to use. Must be a key in :attr:`tao_types`."""
self.ssarun = ssarun
self.config = ssarun.config
self.method = method
def solveForward(self, zeta, out=None):
r"""Given a parameterized design variable value :math:`\zeta`, solve the SSA.
See :cpp:class:`IP_TaucParam` for a discussion of parameterizations.
:param zeta: :cpp:class:`IceModelVec` containing :math:`\zeta`.
:param out: optional :cpp:class:`IceModelVec` for storage of the computation result.
:returns: An :cpp:class:`IceModelVec` contianing the computation result.
"""
raise NotImplementedError()
def addIterationListener(self, listener):
"""Add a listener to be called after each iteration. See :ref:`Listeners`."""
raise NotImplementedError()
def addDesignUpdateListener(self, listener):
"""Add a listener to be called after each time the design variable is changed."""
raise NotImplementedError()
def solveInverse(self, zeta0, u_obs, zeta_inv):
r"""Executes the inversion algorithm.
:param zeta0: The best `a-priori` guess for the value of the parameterized design variable :math:`\zeta`.
:param u_obs: :cpp:class:`IceModelVec2V` of observed surface velocities.
:param zeta_inv: :cpp:class:`zeta_inv` starting value of :math:`\zeta` for minimization of the Tikhonov functional.
:returns: A :cpp:class:`TerminationReason`.
"""
raise NotImplementedError()
def inverseSolution(self):
"""Returns a tuple ``(zeta, u)`` of :cpp:class:`IceModelVec`'s corresponding to the values
of the design and state variables at the end of inversion."""
raise NotImplementedError()
def createInvSSASolver(ssarun, method=None):
"""Factory function returning an inverse solver appropriate for the config variable ``inverse.ssa.method``.
:param ssarun: an instance of :class:`SSAForwardRun:` or :class:`SSAForwardRunFromInputFile`.
:param method: a string correpsonding to config variable ``inverse.ssa.method`` describing the inversion method to be used.
"""
if method is None:
method = ssarun.config.get_string('inverse.ssa.method')
if method == 'tikhonov_gn':
from PISM.invert import ssa_gn
return ssa_gn.InvSSASolver_TikhonovGN(ssarun, method)
elif method.startswith('tikhonov'):
try:
from PISM.invert import ssa_tao
return ssa_tao.InvSSASolver_Tikhonov(ssarun, method)
except ImportError:
raise RuntimeError("Inversion method '%s' requires the TAO library." % method)
if method == 'sd' or method == 'nlcg' or method == 'ign':
try:
from PISM.invert import ssa_siple
return ssa_siple.InvSSASolver_Gradient(ssarun, method)
except ImportError:
raise RuntimeError("Inversion method '%s' requires the siple python library." % method)
raise Exception("Unknown inverse method '%s'; unable to construct solver.", method)
design_param_types = {"ident": PISM.IPDesignVariableParamIdent,
"square": PISM.IPDesignVariableParamSquare,
"exp": PISM.IPDesignVariableParamExp,
"trunc": PISM.IPDesignVariableParamTruncatedIdent}
def createDesignVariableParam(config, design_var_name, param_name=None):
"""Factory function for creating subclasses of :cpp:class:`IPDesignVariableParameterization` based on command-line flags."""
if param_name is None:
param_name = config.get_string("inverse.design.param")
design_param = design_param_types[param_name]()
design_param.set_scales(config, design_var_name)
return design_param
ssa_forward_problems = {'tauc': PISM.IP_SSATaucForwardProblem,
'hardav': PISM.IP_SSAHardavForwardProblem}
def createSSAForwardProblem(grid, ec, design_param, design_var):
"""Returns an instance of an SSA forward problem (e.g. :cpp:class:`IP_SSATaucForwardProblem`)
suitable for the value of `design_var`"""
ForwardProblem = ssa_forward_problems[design_var]
if ForwardProblem is None:
raise RuntimeError("Design variable %s is not yet supported.", design_var)
return ForwardProblem(grid, design_param)
def createGradientFunctionals(ssarun):
"""Returns a tuple ``(designFunctional,stateFunctional)`` of :cpp:class:`IP_IPFunctional`'s
for gradient-based inversions. The specific functionals are constructed on the basis of
command-line parameters ``inverse.state_func`` and ``inverse.design.func``.
:param ssarun: The instance of :class:`PISM.ssa.SSARun` that encapsulates the forward problem,
typically a :class:`SSAForwardRunFromFile`.
"""
vecs = ssarun.modeldata.vecs
grid = ssarun.grid
useGroundedIceOnly = PISM.OptionBool("-inv_ssa_grounded_ice_tauc",
"Computed norms for tau_c only on elements with all grounded ice.")
misfit_type = grid.ctx().config().get_string("inverse.state_func")
if misfit_type != 'meansquare':
inv_method = grid.ctx().config().get_string("inverse.ssa.method")
raise Exception("'-inv_state_func %s' is not supported with '-inv_method %s'.\nUse '-inv_state_func meansquare' instead" % (misfit_type, inv_method))
design_functional = grid.ctx().config().get_string("inverse.design.func")
if design_functional != "sobolevH1":
inv_method = grid.ctx().config().get_string("inverse.ssa.method")
raise Exception("'-inv_design_func %s' is not supported with '-inv_method %s'.\nUse '-inv_design_func sobolevH1' instead" % (design_functional, inv_method))
designFunctional = createHilbertDesignFunctional(grid, vecs, useGroundedIceOnly)
stateFunctional = createMeanSquareMisfitFunctional(grid, vecs)
return (designFunctional, stateFunctional)
def createTikhonovFunctionals(ssarun):
"""Returns a tuple ``(designFunctional,stateFunctional)`` of :cpp:class:`IP_Functional`'s
for Tikhonov-based inversions. The specific functionals are constructed on the basis of
command-line parameters ``inv_state_func`` and ``inv_design_func``.
:param ssarun: The instance of :class:`PISM.ssa.SSARun` that encapsulates the forward problem,
typically a :class:`SSATaucForwardRunFromFile`.
"""
vecs = ssarun.modeldata.vecs
grid = ssarun.grid
useGroundedIceOnly = PISM.OptionBool("-inv_ssa_grounded_ice_tauc",
"Computed norms for tau_c only on elements with all grounded ice.")
misfit_type = grid.ctx().config().get_string("inverse.state_func")
if misfit_type == "meansquare":
stateFunctional = createMeanSquareMisfitFunctional(grid, vecs)
elif misfit_type == "log_ratio":
vel_ssa_observed = vecs.vel_ssa_observed
scale = grid.ctx().config().get_double("inverse.log_ratio_scale")
velocity_eps = grid.ctx().config().get_double("inverse.ssa.velocity_eps", "m/second")
misfit_weight = None
if vecs.has('vel_misfit_weight'):
misfit_weight = vecs.vel_misfit_weight
stateFunctional = PISM.IPLogRatioFunctional(grid, vel_ssa_observed, velocity_eps, misfit_weight)
stateFunctional.normalize(scale)
elif misfit_type == "log_relative":
vel_ssa_observed = vecs.vel_ssa_observed
velocity_scale = grid.ctx().config().get_double("inverse.ssa.velocity_scale", "m/second")
velocity_eps = grid.ctx().config().get_double("inverse.ssa.velocity_eps", "m/second")
misfit_weight = None
if vecs.has('vel_misfit_weight'):
misfit_weight = vecs.vel_misfit_weight
stateFunctional = PISM.IPLogRelativeFunctional(grid, vel_ssa_observed, velocity_eps, misfit_weight)
stateFunctional.normalize(velocity_scale)
else:
raise RuntimeError("Unknown inv_state_func '%s'; unable to construct solver.", misfit_type)
design_functional = grid.ctx().config().get_string("inverse.design.func")
if design_functional == "sobolevH1":
designFunctional = createHilbertDesignFunctional(grid, vecs, useGroundedIceOnly)
elif design_functional == "tv":
area = 4 * grid.Lx() * grid.Ly()
velocity_scale = grid.ctx().config().get_double("inverse.ssa.velocity_scale", "m/second")
length_scale = grid.ctx().config().get_double("inverse.ssa.length_scale")
lebesgue_exponent = grid.ctx().config().get_double("inverse.ssa.tv_exponent")
cTV = 1 / area
cTV *= (length_scale) ** (lebesgue_exponent)
zeta_fixed_mask = None
if vecs.has('zeta_fixed_mask'):
zeta_fixed_mask = vecs.zeta_fixed_mask
strain_rate_eps = PISM.optionsReal("-inv_ssa_tv_eps",
"regularization constant for 'total variation' functional", default=None)
if strain_rate_eps is None:
schoofLen = grid.ctx().config().get_double("flow_law.Schoof_regularizing_length", "m")
strain_rate_eps = 1 / schoofLen
designFunctional = PISM.IPTotalVariationFunctional2S(grid, cTV, lebesgue_exponent, strain_rate_eps, zeta_fixed_mask)
else:
raise Exception("Unknown inv_design_func '%s'; unable to construct solver." % design_functional)
return (designFunctional, stateFunctional)
def createMeanSquareMisfitFunctional(grid, vecs):
"""Creates a :cpp:class:`IPMeanSquareFunctional2V` suitable for use for a
state variable function for SSA inversions."""
misfit_weight = None
if vecs.has('vel_misfit_weight'):
misfit_weight = vecs.vel_misfit_weight
velocity_scale = grid.ctx().config().get_double("inverse.ssa.velocity_scale", "m/second")
stateFunctional = PISM.IPMeanSquareFunctional2V(grid, misfit_weight)
stateFunctional.normalize(velocity_scale)
return stateFunctional
def createHilbertDesignFunctional(grid, vecs, useGroundedIceOnly):
"""Creates a :cpp:class:`IP_H1NormFunctional2S` or a :cpp:class`IPGroundedIceH1NormFunctional2S` suitable
for use for a design variable functional.
:param grid: computation grid
:param vecs: model vecs
:param useGroundedIceOnly: flag, ``True`` if a :cpp:class`IPGroundedIceH1NormFunctional2S` should be created.
"""
cL2 = grid.ctx().config().get_double("inverse.design.cL2")
cH1 = grid.ctx().config().get_double("inverse.design.cH1")
area = 4 * grid.Lx() * grid.Ly()
length_scale = grid.ctx().config().get_double("inverse.ssa.length_scale")
cL2 /= area
cH1 /= area
cH1 *= (length_scale * length_scale)
zeta_fixed_mask = None
if vecs.has('zeta_fixed_mask'):
zeta_fixed_mask = vecs.zeta_fixed_mask
if useGroundedIceOnly:
mask = vecs.mask
designFunctional = PISM.IPGroundedIceH1NormFunctional2S(grid, cL2, cH1, mask, zeta_fixed_mask)
else:
designFunctional = PISM.IP_H1NormFunctional2S(grid, cL2, cH1, zeta_fixed_mask)
return designFunctional
def printIteration(invssa_solver, it, data):
"Print a header for an iteration report."
logMessage("----------------------------------------------------------\n")
logMessage("Iteration %d\n" % it)
def printTikhonovProgress(invssasolver, it, data):
"Report on the progress of a Tikhonov iteration."
eta = data.tikhonov_penalty
stateVal = data.JState
designVal = data.JDesign
sWeight = 1
dWeight = 1.0 / eta
norm_type = PISM.PETSc.NormType.NORM_2
logMessage("design objective %.8g; weighted %.8g\n" % (designVal, designVal * dWeight))
if data.has_key('grad_JTikhonov'):
logMessage("gradient: design %.8g state %.8g sum %.8g\n" % (data.grad_JDesign.norm(norm_type) * dWeight,
data.grad_JState.norm(norm_type) * sWeight,
data.grad_JTikhonov.norm(norm_type)))
else:
logMessage("gradient: design %.8g state %.8g; constraints: %.8g\n" % (data.grad_JDesign.norm(norm_type) * dWeight,
data.grad_JState.norm(norm_type) * sWeight,
data.constraints.norm(norm_type)))
logMessage("tikhonov functional: %.8g\n" % (stateVal * sWeight + designVal * dWeight))
class RMSMisfitReporter(object):
"Report RMS misfit."
def __init__(self):
self.J = None
def __call__(self, invssa_solver, it, data):
grid = invssa_solver.ssarun.grid
if self.J is None:
vecs = invssa_solver.ssarun.modeldata.vecs
self.J = createMeanSquareMisfitFunctional(grid, vecs)
Jmisfit = self.J.valueAt(data.residual)
rms_misfit = math.sqrt(Jmisfit) * grid.ctx().config().get_double("inverse.ssa.velocity_scale")
PISM.logging.logMessage("Diagnostic RMS Misfit: %0.8g (m/a)\n" % rms_misfit)
class MisfitLogger(object):
"Logger that saves history of misfits to a file."
def __init__(self):
self.misfit_history = []
self.misfit_type = None
def __call__(self, invssa_solver, it, data):
"""
:param inverse_solver: the solver (e.g. :class:`~InvSSASolver_Tikhonov`) we are listening to.
:param count: the iteration number.
:param data: dictionary of data related to the iteration.
"""
grid = invssa_solver.ssarun.grid
if self.misfit_type is None:
self.misfit_type = grid.ctx().config().get_string("inverse.state_func")
method = invssa_solver.method
if method == 'ign' or method == 'sd' or method == 'nlcg':
import PISM.invert.sipletools
fp = invssa_solver.forward_problem
r = PISM.invert.sipletools.PISMLocalVector(data.residual)
Jmisfit = fp.rangeIP(r, r)
elif data.has_key('JState'):
Jmisfit = data.JState
else:
raise RuntimeError("Unable to report misfits for inversion method: %s" % method)
if self.misfit_type == "meansquare":
velScale_m_per_year = grid.ctx().config().get_double("inverse.ssa.velocity_scale")
rms_misfit = math.sqrt(Jmisfit) * velScale_m_per_year
logMessage("Misfit: sqrt(J_misfit) = %.8g (m/a)\n" % rms_misfit)
self.misfit_history.append(rms_misfit)
else:
logMessage("Misfit: J_misfit = %.8g (dimensionless)\n" % Jmisfit)
self.misfit_history.append(Jmisfit)
def write(self, output_filename):
"""Saves a history of misfits as :ncvar:`inv_ssa_misfit`
:param output_filename: filename to save misfits to."""
if PISM.Context().rank == 0:
nc = PISM.netCDF.Dataset(output_filename, 'a') # append
nc.createDimension('inv_ssa_iter', len(self.misfit_history))
nc_misfit = nc.createVariable('inv_ssa_misfit', 'f8', dimensions=('inv_ssa_iter'))
if self.misfit_type == "meansquare":
nc_misfit.setncattr('_units', 'm/a')
nc_misfit[:] = self.misfit_history[:]
nc.close()
class ZetaSaver(object):
r"""Iteration listener used to save a copy of the current value
of :math:`\zeta` (i.e. a parameterized design variable such as :math:`\tau_c` or hardness)
at each iteration during an inversion. The intent is to use a saved value to restart
an inversion if need be.
"""
def __init__(self, output_filename):
""":param output_filename: file to save iterations to."""
self.output_filename = output_filename
def __call__(self, inverse_solver, count, data):
zeta = data.zeta
# The solver doesn't care what the name of zeta is, and we
# want it called 'zeta_inv' in the output file, so we rename it.
zeta.metadata().set_name('zeta_inv')
zeta.metadata().set_string('long_name',
'last iteration of parameterized basal yeild stress computed by inversion')
zeta.write(self.output_filename)
| talbrecht/pism_pik | site-packages/PISM/invert/ssa.py | Python | gpl-3.0 | 27,277 | [
"NetCDF"
] | cc94554c51249b6d8ecb4dec2ac46d2613cd1f8dbebd1bd8e81a2e5cb0c58ea4 |
# -*- coding: utf-8 -*-
""" Python package of code for Gaussian process classification experiments. """
__authors__ = 'Matt Graham'
__copyright__ = 'Copyright 2015, Matt Graham'
__license__ = 'MIT'
| matt-graham/auxiliary-pm-mcmc | gpdemo/__init__.py | Python | mit | 199 | [
"Gaussian"
] | 61e71f35764fd70d478636285510d6bca512790b65838d5e3b525af380022648 |
from collections import defaultdict
import networkx as nx
__all__ = ["check_planarity", "PlanarEmbedding"]
def check_planarity(G, counterexample=False):
"""Check if a graph is planar and return a counterexample or an embedding.
A graph is planar iff it can be drawn in a plane without
any edge intersections.
Parameters
----------
G : NetworkX graph
counterexample : bool
A Kuratowski subgraph (to proof non planarity) is only returned if set
to true.
Returns
-------
(is_planar, certificate) : (bool, NetworkX graph) tuple
is_planar is true if the graph is planar.
If the graph is planar `certificate` is a PlanarEmbedding
otherwise it is a Kuratowski subgraph.
Notes
-----
A (combinatorial) embedding consists of cyclic orderings of the incident
edges at each vertex. Given such an embedding there are multiple approaches
discussed in literature to drawing the graph (subject to various
constraints, e.g. integer coordinates), see e.g. [2].
The planarity check algorithm and extraction of the combinatorial embedding
is based on the Left-Right Planarity Test [1].
A counterexample is only generated if the corresponding parameter is set,
because the complexity of the counterexample generation is higher.
References
----------
.. [1] Ulrik Brandes:
The Left-Right Planarity Test
2009
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208
.. [2] Takao Nishizeki, Md Saidur Rahman:
Planar graph drawing
Lecture Notes Series on Computing: Volume 12
2004
"""
planarity_state = LRPlanarity(G)
embedding = planarity_state.lr_planarity()
if embedding is None:
# graph is not planar
if counterexample:
return False, get_counterexample(G)
else:
return False, None
else:
# graph is planar
return True, embedding
def check_planarity_recursive(G, counterexample=False):
"""Recursive version of :meth:`check_planarity`."""
planarity_state = LRPlanarity(G)
embedding = planarity_state.lr_planarity_recursive()
if embedding is None:
# graph is not planar
if counterexample:
return False, get_counterexample_recursive(G)
else:
return False, None
else:
# graph is planar
return True, embedding
def get_counterexample(G):
"""Obtains a Kuratowski subgraph.
Raises nx.NetworkXException if G is planar.
The function removes edges such that the graph is still not planar.
At some point the removal of any edge would make the graph planar.
This subgraph must be a Kuratowski subgraph.
Parameters
----------
G : NetworkX graph
Returns
-------
subgraph : NetworkX graph
A Kuratowski subgraph that proves that G is not planar.
"""
# copy graph
G = nx.Graph(G)
if check_planarity(G)[0]:
raise nx.NetworkXException("G is planar - no counter example.")
# find Kuratowski subgraph
subgraph = nx.Graph()
for u in G:
nbrs = list(G[u])
for v in nbrs:
G.remove_edge(u, v)
if check_planarity(G)[0]:
G.add_edge(u, v)
subgraph.add_edge(u, v)
return subgraph
def get_counterexample_recursive(G):
"""Recursive version of :meth:`get_counterexample`.
"""
# copy graph
G = nx.Graph(G)
if check_planarity_recursive(G)[0]:
raise nx.NetworkXException("G is planar - no counter example.")
# find Kuratowski subgraph
subgraph = nx.Graph()
for u in G:
nbrs = list(G[u])
for v in nbrs:
G.remove_edge(u, v)
if check_planarity_recursive(G)[0]:
G.add_edge(u, v)
subgraph.add_edge(u, v)
return subgraph
class Interval(object):
"""Represents a set of return edges.
All return edges in an interval induce a same constraint on the contained
edges, which means that all edges must either have a left orientation or
all edges must have a right orientation.
"""
def __init__(self, low=None, high=None):
self.low = low
self.high = high
def empty(self):
"""Check if the interval is empty"""
return self.low is None and self.high is None
def copy(self):
"""Return a copy of this interval"""
return Interval(self.low, self.high)
def conflicting(self, b, planarity_state):
"""Return True if interval I conflicts with edge b"""
return (not self.empty() and
planarity_state.lowpt[self.high] > planarity_state.lowpt[b])
class ConflictPair(object):
"""Represents a different constraint between two intervals.
The edges in the left interval must have a different orientation than
the one in the right interval.
"""
def __init__(self, left=Interval(), right=Interval()):
self.left = left
self.right = right
def swap(self):
"""Swap left and right intervals"""
temp = self.left
self.left = self.right
self.right = temp
def lowest(self, planarity_state):
"""Return the lowest lowpoint of a conflict pair"""
if self.left.empty():
return planarity_state.lowpt[self.right.low]
if self.right.empty():
return planarity_state.lowpt[self.left.low]
return min(planarity_state.lowpt[self.left.low],
planarity_state.lowpt[self.right.low])
def top_of_stack(l):
"""Returns the element on top of the stack."""
if not l:
return None
return l[-1]
class LRPlanarity(object):
"""A class to maintain the state during planarity check."""
__slots__ = [
'G', 'roots', 'height', 'lowpt', 'lowpt2', 'nesting_depth',
'parent_edge', 'DG', 'adjs', 'ordered_adjs', 'ref', 'side', 'S',
'stack_bottom', 'lowpt_edge', 'left_ref', 'right_ref', 'embedding'
]
def __init__(self, G):
# copy G without adding self-loops
self.G = nx.Graph()
self.G.add_nodes_from(G.nodes)
for e in G.edges:
if e[0] != e[1]:
self.G.add_edge(e[0], e[1])
self.roots = []
# distance from tree root
self.height = defaultdict(lambda: None)
self.lowpt = {} # height of lowest return point of an edge
self.lowpt2 = {} # height of second lowest return point
self.nesting_depth = {} # for nesting order
# None -> missing edge
self.parent_edge = defaultdict(lambda: None)
# oriented DFS graph
self.DG = nx.DiGraph()
self.DG.add_nodes_from(G.nodes)
self.adjs = {}
self.ordered_adjs = {}
self.ref = defaultdict(lambda: None)
self.side = defaultdict(lambda: 1)
# stack of conflict pairs
self.S = []
self.stack_bottom = {}
self.lowpt_edge = {}
self.left_ref = {}
self.right_ref = {}
self.embedding = PlanarEmbedding()
def lr_planarity(self):
"""Execute the LR planarity test.
Returns
-------
embedding : dict
If the graph is planar an embedding is returned. Otherwise None.
"""
if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6:
# graph is not planar
return None
# make adjacency lists for dfs
for v in self.G:
self.adjs[v] = list(self.G[v])
# orientation of the graph by depth first search traversal
for v in self.G:
if self.height[v] is None:
self.height[v] = 0
self.roots.append(v)
self.dfs_orientation(v)
# Free no longer used variables
self.G = None
self.lowpt2 = None
self.adjs = None
# testing
for v in self.DG: # sort the adjacency lists by nesting depth
# note: this sorting leads to non linear time
self.ordered_adjs[v] = sorted(
self.DG[v], key=lambda x: self.nesting_depth[(v, x)])
for v in self.roots:
if not self.dfs_testing(v):
return None
# Free no longer used variables
self.height = None
self.lowpt = None
self.S = None
self.stack_bottom = None
self.lowpt_edge = None
for e in self.DG.edges:
self.nesting_depth[e] = self.sign(e) * self.nesting_depth[e]
self.embedding.add_nodes_from(self.DG.nodes)
for v in self.DG:
# sort the adjacency lists again
self.ordered_adjs[v] = sorted(
self.DG[v], key=lambda x: self.nesting_depth[(v, x)])
# initialize the embedding
previous_node = None
for w in self.ordered_adjs[v]:
self.embedding.add_half_edge_cw(v, w, previous_node)
previous_node = w
# Free no longer used variables
self.DG = None
self.nesting_depth = None
self.ref = None
# compute the complete embedding
for v in self.roots:
self.dfs_embedding(v)
# Free no longer used variables
self.roots = None
self.parent_edge = None
self.ordered_adjs = None
self.left_ref = None
self.right_ref = None
self.side = None
return self.embedding
def lr_planarity_recursive(self):
"""Recursive version of :meth:`lr_planarity`."""
if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6:
# graph is not planar
return None
# orientation of the graph by depth first search traversal
for v in self.G:
if self.height[v] is None:
self.height[v] = 0
self.roots.append(v)
self.dfs_orientation_recursive(v)
# Free no longer used variable
self.G = None
# testing
for v in self.DG: # sort the adjacency lists by nesting depth
# note: this sorting leads to non linear time
self.ordered_adjs[v] = sorted(
self.DG[v], key=lambda x: self.nesting_depth[(v, x)])
for v in self.roots:
if not self.dfs_testing_recursive(v):
return None
for e in self.DG.edges:
self.nesting_depth[e] = (self.sign_recursive(e) *
self.nesting_depth[e])
self.embedding.add_nodes_from(self.DG.nodes)
for v in self.DG:
# sort the adjacency lists again
self.ordered_adjs[v] = sorted(
self.DG[v], key=lambda x: self.nesting_depth[(v, x)])
# initialize the embedding
previous_node = None
for w in self.ordered_adjs[v]:
self.embedding.add_half_edge_cw(v, w, previous_node)
previous_node = w
# compute the complete embedding
for v in self.roots:
self.dfs_embedding_recursive(v)
return self.embedding
def dfs_orientation(self, v):
"""Orient the graph by DFS, compute lowpoints and nesting order.
"""
# the recursion stack
dfs_stack = [v]
# index of next edge to handle in adjacency list of each node
ind = defaultdict(lambda: 0)
# boolean to indicate whether to skip the initial work for an edge
skip_init = defaultdict(lambda: False)
while dfs_stack:
v = dfs_stack.pop()
e = self.parent_edge[v]
for w in self.adjs[v][ind[v]:]:
vw = (v, w)
if not skip_init[vw]:
if (v, w) in self.DG.edges or (w, v) in self.DG.edges:
ind[v] += 1
continue # the edge was already oriented
self.DG.add_edge(v, w) # orient the edge
self.lowpt[vw] = self.height[v]
self.lowpt2[vw] = self.height[v]
if self.height[w] is None: # (v, w) is a tree edge
self.parent_edge[w] = vw
self.height[w] = self.height[v] + 1
dfs_stack.append(v) # revisit v after finishing w
dfs_stack.append(w) # visit w next
skip_init[vw] = True # don't redo this block
break # handle next node in dfs_stack (i.e. w)
else: # (v, w) is a back edge
self.lowpt[vw] = self.height[w]
# determine nesting graph
self.nesting_depth[vw] = 2 * self.lowpt[vw]
if self.lowpt2[vw] < self.height[v]: # chordal
self.nesting_depth[vw] += 1
# update lowpoints of parent edge e
if e is not None:
if self.lowpt[vw] < self.lowpt[e]:
self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw])
self.lowpt[e] = self.lowpt[vw]
elif self.lowpt[vw] > self.lowpt[e]:
self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw])
else:
self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw])
ind[v] += 1
def dfs_orientation_recursive(self, v):
"""Recursive version of :meth:`dfs_orientation`."""
e = self.parent_edge[v]
for w in self.G[v]:
if (v, w) in self.DG.edges or (w, v) in self.DG.edges:
continue # the edge was already oriented
vw = (v, w)
self.DG.add_edge(v, w) # orient the edge
self.lowpt[vw] = self.height[v]
self.lowpt2[vw] = self.height[v]
if self.height[w] is None: # (v, w) is a tree edge
self.parent_edge[w] = vw
self.height[w] = self.height[v] + 1
self.dfs_orientation_recursive(w)
else: # (v, w) is a back edge
self.lowpt[vw] = self.height[w]
# determine nesting graph
self.nesting_depth[vw] = 2 * self.lowpt[vw]
if self.lowpt2[vw] < self.height[v]: # chordal
self.nesting_depth[vw] += 1
# update lowpoints of parent edge e
if e is not None:
if self.lowpt[vw] < self.lowpt[e]:
self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw])
self.lowpt[e] = self.lowpt[vw]
elif self.lowpt[vw] > self.lowpt[e]:
self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw])
else:
self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw])
def dfs_testing(self, v):
"""Test for LR partition."""
# the recursion stack
dfs_stack = [v]
# index of next edge to handle in adjacency list of each node
ind = defaultdict(lambda: 0)
# boolean to indicate whether to skip the initial work for an edge
skip_init = defaultdict(lambda: False)
while dfs_stack:
v = dfs_stack.pop()
e = self.parent_edge[v]
# to indicate whether to skip the final block after the for loop
skip_final = False
for w in self.ordered_adjs[v][ind[v]:]:
ei = (v, w)
if not skip_init[ei]:
self.stack_bottom[ei] = top_of_stack(self.S)
if ei == self.parent_edge[w]: # tree edge
dfs_stack.append(v) # revisit v after finishing w
dfs_stack.append(w) # visit w next
skip_init[ei] = True # don't redo this block
skip_final = True # skip final work after breaking
break # handle next node in dfs_stack (i.e. w)
else: # back edge
self.lowpt_edge[ei] = ei
self.S.append(ConflictPair(right=Interval(ei, ei)))
# integrate new return edges
if self.lowpt[ei] < self.height[v]:
if w == self.ordered_adjs[v][0]: # e_i has return edge
self.lowpt_edge[e] = self.lowpt_edge[ei]
else: # add constraints of e_i
if not self.add_constraints(ei, e):
# graph is not planar
return False
ind[v] += 1
if not skip_final:
# remove back edges returning to parent
if e is not None: # v isn't root
self.remove_back_edges(e)
return True
def dfs_testing_recursive(self, v):
"""Recursive version of :meth:`dfs_testing`."""
e = self.parent_edge[v]
for w in self.ordered_adjs[v]:
ei = (v, w)
self.stack_bottom[ei] = top_of_stack(self.S)
if ei == self.parent_edge[w]: # tree edge
if not self.dfs_testing_recursive(w):
return False
else: # back edge
self.lowpt_edge[ei] = ei
self.S.append(ConflictPair(right=Interval(ei, ei)))
# integrate new return edges
if self.lowpt[ei] < self.height[v]:
if w == self.ordered_adjs[v][0]: # e_i has return edge
self.lowpt_edge[e] = self.lowpt_edge[ei]
else: # add constraints of e_i
if not self.add_constraints(ei, e):
# graph is not planar
return False
# remove back edges returning to parent
if e is not None: # v isn't root
self.remove_back_edges(e)
return True
def add_constraints(self, ei, e):
P = ConflictPair()
# merge return edges of e_i into P.right
while True:
Q = self.S.pop()
if not Q.left.empty():
Q.swap()
if not Q.left.empty(): # not planar
return False
if self.lowpt[Q.right.low] > self.lowpt[e]:
# merge intervals
if P.right.empty(): # topmost interval
P.right = Q.right.copy()
else:
self.ref[P.right.low] = Q.right.high
P.right.low = Q.right.low
else: # align
self.ref[Q.right.low] = self.lowpt_edge[e]
if top_of_stack(self.S) == self.stack_bottom[ei]:
break
# merge conflicting return edges of e_1,...,e_i-1 into P.L
while (top_of_stack(self.S).left.conflicting(ei, self) or
top_of_stack(self.S).right.conflicting(ei, self)):
Q = self.S.pop()
if Q.right.conflicting(ei, self):
Q.swap()
if Q.right.conflicting(ei, self): # not planar
return False
# merge interval below lowpt(e_i) into P.R
self.ref[P.right.low] = Q.right.high
if Q.right.low is not None:
P.right.low = Q.right.low
if P.left.empty(): # topmost interval
P.left = Q.left.copy()
else:
self.ref[P.left.low] = Q.left.high
P.left.low = Q.left.low
if not (P.left.empty() and P.right.empty()):
self.S.append(P)
return True
def remove_back_edges(self, e):
u = e[0]
# trim back edges ending at parent u
# drop entire conflict pairs
while self.S and top_of_stack(self.S).lowest(self) == self.height[u]:
P = self.S.pop()
if P.left.low is not None:
self.side[P.left.low] = -1
if self.S: # one more conflict pair to consider
P = self.S.pop()
# trim left interval
while P.left.high is not None and P.left.high[1] == u:
P.left.high = self.ref[P.left.high]
if P.left.high is None and P.left.low is not None:
# just emptied
self.ref[P.left.low] = P.right.low
self.side[P.left.low] = -1
P.left.low = None
# trim right interval
while P.right.high is not None and P.right.high[1] == u:
P.right.high = self.ref[P.right.high]
if P.right.high is None and P.right.low is not None:
# just emptied
self.ref[P.right.low] = P.left.low
self.side[P.right.low] = -1
P.right.low = None
self.S.append(P)
# side of e is side of a highest return edge
if self.lowpt[e] < self.height[u]: # e has return edge
hl = top_of_stack(self.S).left.high
hr = top_of_stack(self.S).right.high
if hl is not None and (
hr is None or self.lowpt[hl] > self.lowpt[hr]):
self.ref[e] = hl
else:
self.ref[e] = hr
def dfs_embedding(self, v):
"""Completes the embedding."""
# the recursion stack
dfs_stack = [v]
# index of next edge to handle in adjacency list of each node
ind = defaultdict(lambda: 0)
while dfs_stack:
v = dfs_stack.pop()
for w in self.ordered_adjs[v][ind[v]:]:
ind[v] += 1
ei = (v, w)
if ei == self.parent_edge[w]: # tree edge
self.embedding.add_half_edge_first(w, v)
self.left_ref[v] = w
self.right_ref[v] = w
dfs_stack.append(v) # revisit v after finishing w
dfs_stack.append(w) # visit w next
break # handle next node in dfs_stack (i.e. w)
else: # back edge
if self.side[ei] == 1:
self.embedding.add_half_edge_cw(w, v,
self.right_ref[w])
else:
self.embedding.add_half_edge_ccw(w, v,
self.left_ref[w])
self.left_ref[w] = v
def dfs_embedding_recursive(self, v):
"""Recursive version of :meth:`dfs_embedding`."""
for w in self.ordered_adjs[v]:
ei = (v, w)
if ei == self.parent_edge[w]: # tree edge
self.embedding.add_half_edge_first(w, v)
self.left_ref[v] = w
self.right_ref[v] = w
self.dfs_embedding_recursive(w)
else: # back edge
if self.side[ei] == 1:
# place v directly after right_ref[w] in embed. list of w
self.embedding.add_half_edge_cw(w, v, self.right_ref[w])
else:
# place v directly before left_ref[w] in embed. list of w
self.embedding.add_half_edge_ccw(w, v, self.left_ref[w])
self.left_ref[w] = v
def sign(self, e):
"""Resolve the relative side of an edge to the absolute side."""
# the recursion stack
dfs_stack = [e]
# dict to remember reference edges
old_ref = defaultdict(lambda: None)
while dfs_stack:
e = dfs_stack.pop()
if self.ref[e] is not None:
dfs_stack.append(e) # revisit e after finishing self.ref[e]
dfs_stack.append(self.ref[e]) # visit self.ref[e] next
old_ref[e] = self.ref[e] # remember value of self.ref[e]
self.ref[e] = None
else:
self.side[e] *= self.side[old_ref[e]]
return self.side[e]
def sign_recursive(self, e):
"""Recursive version of :meth:`sign`."""
if self.ref[e] is not None:
self.side[e] = self.side[e] * self.sign_recursive(self.ref[e])
self.ref[e] = None
return self.side[e]
class PlanarEmbedding(nx.DiGraph):
"""Represents a planar graph with its planar embedding.
The planar embedding is given by a `combinatorial embedding
<https://en.wikipedia.org/wiki/Graph_embedding#Combinatorial_embedding>`_.
**Neighbor ordering:**
In comparison to a usual graph structure, the embedding also stores the
order of all neighbors for every vertex.
The order of the neighbors can be given in clockwise (cw) direction or
counterclockwise (ccw) direction. This order is stored as edge attributes
in the underlying directed graph. For the edge (u, v) the edge attribute
'cw' is set to the neighbor of u that follows immediately after v in
clockwise direction.
In order for a PlanarEmbedding to be valid it must fulfill multiple
conditions. It is possible to check if these conditions are fulfilled with
the method :meth:`check_structure`.
The conditions are:
* Edges must go in both directions (because the edge attributes differ)
* Every edge must have a 'cw' and 'ccw' attribute which corresponds to a
correct planar embedding.
* A node with non zero degree must have a node attribute 'first_nbr'.
As long as a PlanarEmbedding is invalid only the following methods should
be called:
* :meth:`add_half_edge_ccw`
* :meth:`add_half_edge_cw`
* :meth:`connect_components`
* :meth:`add_half_edge_first`
Even though the graph is a subclass of nx.DiGraph, it can still be used
for algorithms that require undirected graphs, because the method
:meth:`is_directed` is overridden. This is possible, because a valid
PlanarGraph must have edges in both directions.
**Half edges:**
In methods like `add_half_edge_ccw` the term "half-edge" is used, which is
a term that is used in `doubly connected edge lists
<https://en.wikipedia.org/wiki/Doubly_connected_edge_list>`_. It is used
to emphasize that the edge is only in one direction and there exists
another half-edge in the opposite direction.
While conventional edges always have two faces (including outer face) next
to them, it is possible to assign each half-edge *exactly one* face.
For a half-edge (u, v) that is orientated such that u is below v then the
face that belongs to (u, v) is to the right of this half-edge.
Examples
--------
Create an embedding of a star graph (compare `nx.star_graph(3)`):
>>> G = nx.PlanarEmbedding()
>>> G.add_half_edge_cw(0, 1, None)
>>> G.add_half_edge_cw(0, 2, 1)
>>> G.add_half_edge_cw(0, 3, 2)
>>> G.add_half_edge_cw(1, 0, None)
>>> G.add_half_edge_cw(2, 0, None)
>>> G.add_half_edge_cw(3, 0, None)
Alternatively the same embedding can also be defined in counterclockwise
orientation. The following results in exactly the same PlanarEmbedding:
>>> G = nx.PlanarEmbedding()
>>> G.add_half_edge_ccw(0, 1, None)
>>> G.add_half_edge_ccw(0, 3, 1)
>>> G.add_half_edge_ccw(0, 2, 3)
>>> G.add_half_edge_ccw(1, 0, None)
>>> G.add_half_edge_ccw(2, 0, None)
>>> G.add_half_edge_ccw(3, 0, None)
After creating a graph, it is possible to validate that the PlanarEmbedding
object is correct:
>>> G.check_structure()
"""
def get_data(self):
"""Converts the adjacency structure into a better readable structure.
Returns
-------
embedding : dict
A dict mapping all nodes to a list of neighbors sorted in
clockwise order.
"""
embedding = dict()
for v in self:
embedding[v] = list(self.neighbors_cw_order(v))
return embedding
def neighbors_cw_order(self, v):
"""Generator for the neighbors of v in clockwise order.
Parameters
----------
v : node
Yields
------
node
"""
if len(self[v]) == 0:
# v has no neighbors
return
start_node = self.nodes[v]['first_nbr']
yield start_node
current_node = self[v][start_node]['cw']
while start_node != current_node:
yield current_node
current_node = self[v][current_node]['cw']
def check_structure(self):
"""Runs without exceptions if this object is valid.
Checks that the following properties are fulfilled:
* Edges go in both directions (because the edge attributes differ).
* Every edge has a 'cw' and 'ccw' attribute which corresponds to a
correct planar embedding.
* A node with a degree larger than 0 has a node attribute 'first_nbr'.
Running this method verifies that the underlying Graph must be planar.
Raises
------
nx.NetworkXException
This exception is raised with a short explanation if the
PlanarEmbedding is invalid.
"""
# Check fundamental structure
for v in self:
try:
sorted_nbrs = set(self.neighbors_cw_order(v))
except KeyError:
msg = "Bad embedding. " \
"Missing orientation for a neighbor of {}".format(v)
raise nx.NetworkXException(msg)
unsorted_nbrs = set(self[v])
if sorted_nbrs != unsorted_nbrs:
msg = "Bad embedding. Edge orientations not set correctly."
raise nx.NetworkXException(msg)
for w in self[v]:
# Check if opposite half-edge exists
if not self.has_edge(w, v):
msg = "Bad embedding. Opposite half-edge is missing."
raise nx.NetworkXException(msg)
# Check planarity
counted_half_edges = set()
for component in nx.connected_components(self):
if len(component) == 1:
# Don't need to check single node component
continue
num_nodes = len(component)
num_half_edges = 0
num_faces = 0
for v in component:
for w in self.neighbors_cw_order(v):
num_half_edges += 1
if (v, w) not in counted_half_edges:
# We encountered a new face
num_faces += 1
# Mark all half-edges belonging to this face
self.traverse_face(v, w, counted_half_edges)
num_edges = num_half_edges // 2 # num_half_edges is even
if num_nodes - num_edges + num_faces != 2:
# The result does not match Euler's formula
msg = "Bad embedding. The graph does not match Euler's formula"
raise nx.NetworkXException(msg)
def add_half_edge_ccw(self, start_node, end_node, reference_neighbor):
"""Adds a half-edge from start_node to end_node.
The half-edge is added counter clockwise next to the existing half-edge
(start_node, reference_neighbor).
Parameters
----------
start_node : node
Start node of inserted edge.
end_node : node
End node of inserted edge.
reference_neighbor: node
End node of reference edge.
Raises
------
nx.NetworkXException
If the reference_neighbor does not exist.
See Also
--------
add_half_edge_cw
connect_components
add_half_edge_first
"""
if reference_neighbor is None:
# The start node has no neighbors
self.add_edge(start_node, end_node) # Add edge to graph
self[start_node][end_node]['cw'] = end_node
self[start_node][end_node]['ccw'] = end_node
self.nodes[start_node]['first_nbr'] = end_node
else:
ccw_reference = self[start_node][reference_neighbor]['ccw']
self.add_half_edge_cw(start_node, end_node, ccw_reference)
if reference_neighbor == self.nodes[start_node].get('first_nbr',
None):
# Update first neighbor
self.nodes[start_node]['first_nbr'] = end_node
def add_half_edge_cw(self, start_node, end_node, reference_neighbor):
"""Adds a half-edge from start_node to end_node.
The half-edge is added clockwise next to the existing half-edge
(start_node, reference_neighbor).
Parameters
----------
start_node : node
Start node of inserted edge.
end_node : node
End node of inserted edge.
reference_neighbor: node
End node of reference edge.
Raises
------
nx.NetworkXException
If the reference_neighbor does not exist.
See Also
--------
add_half_edge_ccw
connect_components
add_half_edge_first
"""
self.add_edge(start_node, end_node) # Add edge to graph
if reference_neighbor is None:
# The start node has no neighbors
self[start_node][end_node]['cw'] = end_node
self[start_node][end_node]['ccw'] = end_node
self.nodes[start_node]['first_nbr'] = end_node
return
if reference_neighbor not in self[start_node]:
raise nx.NetworkXException(
"Cannot add edge. Reference neighbor does not exist")
# Get half-edge at the other side
cw_reference = self[start_node][reference_neighbor]['cw']
# Alter half-edge data structures
self[start_node][reference_neighbor]['cw'] = end_node
self[start_node][end_node]['cw'] = cw_reference
self[start_node][cw_reference]['ccw'] = end_node
self[start_node][end_node]['ccw'] = reference_neighbor
def connect_components(self, v, w):
"""Adds half-edges for (v, w) and (w, v) at some position.
This method should only be called if v and w are in different
components, or it might break the embedding.
This especially means that if `connect_components(v, w)`
is called it is not allowed to call `connect_components(w, v)`
afterwards. The neighbor orientations in both directions are
all set correctly after the first call.
Parameters
----------
v : node
w : node
See Also
--------
add_half_edge_ccw
add_half_edge_cw
add_half_edge_first
"""
self.add_half_edge_first(v, w)
self.add_half_edge_first(w, v)
def add_half_edge_first(self, start_node, end_node):
"""The added half-edge is inserted at the first position in the order.
Parameters
----------
start_node : node
end_node : node
See Also
--------
add_half_edge_ccw
add_half_edge_cw
connect_components
"""
if start_node in self and 'first_nbr' in self.nodes[start_node]:
reference = self.nodes[start_node]['first_nbr']
else:
reference = None
self.add_half_edge_ccw(start_node, end_node, reference)
def next_face_half_edge(self, v, w):
"""Returns the following half-edge left of a face.
Parameters
----------
v : node
w : node
Returns
-------
half-edge : tuple
"""
new_node = self[w][v]['ccw']
return w, new_node
def traverse_face(self, v, w, mark_half_edges=None):
"""Returns nodes on the face that belong to the half-edge (v, w).
The face that is traversed lies to the right of the half-edge (in an
orientation where v is below w).
Optionally it is possible to pass a set to which all encountered half
edges are added. Before calling this method, this set must not include
any half-edges that belong to the face.
Parameters
----------
v : node
Start node of half-edge.
w : node
End node of half-edge.
mark_half_edges: set, optional
Set to which all encountered half-edges are added.
Returns
-------
face : list
A list of nodes that lie on this face.
"""
if mark_half_edges is None:
mark_half_edges = set()
face_nodes = [v]
mark_half_edges.add((v, w))
prev_node = v
cur_node = w
# Last half-edge is (incoming_node, v)
incoming_node = self[v][w]['cw']
while cur_node != v or prev_node != incoming_node:
face_nodes.append(cur_node)
prev_node, cur_node = self.next_face_half_edge(prev_node, cur_node)
if (prev_node, cur_node) in mark_half_edges:
raise nx.NetworkXException(
"Bad planar embedding. Impossible face.")
mark_half_edges.add((prev_node, cur_node))
return face_nodes
def is_directed(self):
"""A valid PlanarEmbedding is undirected.
All reverse edges are contained, i.e. for every existing
half-edge (v, w) the half-edge in the opposite direction (w, v) is also
contained.
"""
return False
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/algorithms/planarity.py | Python | gpl-3.0 | 37,593 | [
"VisIt"
] | e41f2882609408b9295f92df213ce00849585c4f9de38ff8a6336ef10e933092 |
#!/usr/bin/env python
#ryan g. coleman ryan.g.coleman ATSYMBOL gmail.com ryangc ATSYMBOL mail.med.upenn.edu
#kim sharp lab http://crystal.med.upenn.edu
#finds all inter-atom distances
import string
import sys
import geometry
import pdb
if -1 != string.find(sys.argv[0], "pdbDistances.py"):
try:
for pdbName in sys.argv[1:]:
pdbD = pdb.pdbData(pdbName)
outputName = pdbName.replace("pdb", "").replace(".", "")
longestDist, meanDist = geometry.longestAndMeanDist(
pdbD.getHeavyAtomXYZ())
print outputName, "\t", longestDist, "\t", meanDist
except IndexError:
print "pdbDistances.py pdbName [list of more pdbs]"
print "outputs to standard out"
sys.exit(1)
| ryancoleman/traveldistance | src/pdbDistances.py | Python | gpl-2.0 | 710 | [
"CRYSTAL"
] | fdf0ffc537d731692c8435201a83a2d8a1cd145fc84bb294f8763b38af4e7e1f |
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import difflib
import glob
import json
import mmap
import os
import re
import sys
from datetime import date
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
print('File %s is missing the year' % filename, file=verbose_out)
return False
# Replace all occurrences of the regex "CURRENT_YEAR|...|2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
"vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test"]
# Skip custom-metrics-stackdriver-adapter, which is not authored by Google Inc.
skipped_dirs += "custom-metrics-stackdriver-adapter"
# Skip event-adapter, which is not authored by Google Inc.
skipped_dirs += "event-adapter"
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015, 2016, ..., CURRENT_YEAR, company holder names can be anything
years = range(2014, date.today().year + 1)
regexs["date"] = re.compile( '(%s)' % "|".join(map(lambda l: str(l), years)) )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
| kawych/k8s-stackdriver | hack/boilerplate/boilerplate.py | Python | apache-2.0 | 6,502 | [
"VisIt"
] | d1f9274d60afcd2b37fbe283f13cc0f08b19a9c8da985770e3b297abe96e2a47 |
import math
import numpy as np
try:
from tcpb.tcpb import TCProtobufClient
except ImportError:
pass
import os
#################################################
### electronic structure routines go here #######
#################################################
# each electronic structure method requires at least two routines:
# 1) compute_elec_struct_, which computes energies, forces, and wfs
# 2) init_h5_datasets_, which defines the datasets to be output to hdf5
# 3) potential_specific_traj_copy, which copies data that is potential specific
# from one traj data structure to another. This is used when new
# trajectories and centroids are spawned.
# other ancillary routines may be included as well
def compute_elec_struct(self, zbackprop):
"""Subroutine that calls electronic structure calculation in Terachem
through tcpb interface. This version is compatible with tcpb-0.5.0
When running multiple job on the same server we need to make sure we use
different ports for Terachem server. Every trajectory or centroid
has a port variable, which is passed along to children.
Needs to be provided at input in start file as a traj_param"""
if not zbackprop:
cbackprop = ""
else:
cbackprop = "backprop_"
istate = self.get_istate()
nstates = self.get_numstates()
# initialize electronic_phases if not present
if not hasattr(self, 'electronic_phases'):
self.electronic_phases = np.ones(nstates)
if not hasattr(self, 'backprop_electronic_phases'):
self.backprop_electronic_phases = np.ones(nstates)
exec("pos = self.get_" + cbackprop + "positions()")
pos_list = pos.tolist()
TC = TCProtobufClient(host='localhost', port=self.tc_port)
base_options = self.get_tc_options()
options = base_options
options["castarget"] = istate
# TC.update_options(**base_options)
TC.connect()
# Check if the server is available
avail = TC.is_available()
# print "TCPB Server available: {}".format(avail)
# Write CI vectors and orbitals for initial guess and overlaps
cwd = os.getcwd()
if hasattr(self, 'civecs'):
civecout = os.path.join(cwd, "CIvecs.Singlet.old")
orbout = os.path.join(cwd, "c0.old")
orbout_t = os.path.join(cwd, "c0_t.old")
eval("self.get_" + cbackprop + "civecs()").tofile(civecout)
eval("self.get_" + cbackprop + "orbs()").tofile(orbout)
n = int(math.floor(math.sqrt(self.get_norbs())))
((np.resize(eval("self.get_" + cbackprop + "orbs()"),
(n, n)).T).flatten()).tofile(orbout_t)
# print "old civecs", eval("self.get_" + cbackprop + "civecs()")
# print "old orbs", eval("self.get_" + cbackprop + "orbs()")
zolaps = True
if ("casscf" in self.tc_options):
if (self.tc_options["casscf"] == "yes"):
options["caswritevecs"] = "yes"
options["casguess"] = orbout_t
else:
options["caswritevecs"] = "yes"
options["guess"] = orbout
else:
options["caswritevecs"] = "yes"
options["guess"] = orbout
else:
zolaps = False
options["caswritevecs"] = "yes"
# Gradient calculation
# here we call TC once for energies and once for the gradient
# will eventually be replaced by a more efficient interface
results = TC.compute_job_sync("energy", pos_list, "bohr", **options)
# print results
e = np.zeros(nstates)
e = results['energy']
# e[:] = results['energy'][:]
results = TC.compute_job_sync("gradient", pos_list, "bohr", **options)
# print results
civecfilename = os.path.join(results['job_scr_dir'], "CIvecs.Singlet.dat")
exec("self.set_" + cbackprop + "civecs(np.fromfile(civecfilename))")
# print "new civecs", self.civecs
# orbfilename = os.path.join(results['job_scr_dir'], "c0")
orbfilename = results['orbfile']
exec("self.set_" + cbackprop + "orbs((np.fromfile(orbfilename)).flatten())")
self.set_norbs(self.get_orbs().size)
# BGL transpose hack is temporary
n = int(math.floor(math.sqrt(self.get_norbs())))
clastchar = orbfilename.strip()[-1]
# print "n", n
# print "clastchar", clastchar
if clastchar != '0':
tmporbs = eval("self.get_" + cbackprop + "orbs()")
exec("self.set_" + cbackprop +
"orbs(((tmporbs.reshape((n,n))).T).flatten())")
# end transpose hack
# print "new orbs", eval("self.get_" + cbackprop + "orbs()")
orbout2 = os.path.join(cwd, "c0.new")
eval("self.get_" + cbackprop + "orbs()").tofile(orbout2)
self.set_ncivecs(self.get_civecs().size)
f = np.zeros((nstates, self.numdims))
# print "results['gradient'] ", results['gradient']
# print "results['gradient'].flatten() ", results['gradient'].flatten()
f[self.istate, :] = -1.0 * results['gradient'].flatten()
exec("self.set_" + cbackprop + "energies(e)")
exec("self.set_" + cbackprop + "forces(f)")
# if False:
if zolaps:
exec("pos2 = self.get_" + cbackprop + "prev_wf_positions_in_angstrom()")
# print 'pos2.tolist()', pos2.tolist()
# print 'civecfilename', civecfilename
# print 'civecout', civecout
# print 'orbfilename', orbfilename
# print 'orbout2', orbout2
# print 'orbout', orbout
options = base_options
options["geom2"] = pos2.tolist()
options["cvec1file"] = civecfilename
options["cvec2file"] = civecout
options["orb1afile"] = orbout2
options["orb2afile"] = orbout
# print 'pos_list', pos_list
results2 = TC.compute_job_sync("ci_vec_overlap", pos_list,
"bohr", **options)
# print "results2", results2
S = results2['ci_overlap']
# print "S before phasing ", S
# phasing electronic overlaps
for jstate in range(nstates):
S[:, jstate] *= eval("self.get_" + cbackprop +
"electronic_phases()[jstate]")
S[jstate, :] *= eval("self.get_" + cbackprop +
"electronic_phases()[jstate]")
for jstate in range(nstates):
if S[jstate, jstate] < 0.0:
ep = eval("self.get_" + cbackprop + "electronic_phases()")
ep[jstate] *= -1.0
exec("self.set_" + cbackprop + "electronic_phases(ep)")
# I'm not sure if this line is right, but it seems to be working
S[jstate, :] *= -1.0
# print "S", S
exec("self.set_" + cbackprop + "S_elec_flat(S.flatten())")
W = np.zeros((2, 2))
W[0, 0] = S[istate, istate]
tdc = np.zeros(nstates)
for jstate in range(nstates):
if istate == jstate:
tdc[jstate] = 0.0
else:
W[1,0] = S[jstate,istate]
W[0,1] = S[istate,jstate]
W[1,1] = S[jstate,jstate]
tdc[jstate] = self.compute_tdc(W)
# print "tdc", tdc[jstate]
# tmp=self.compute_tdc(W)
# tdc = np.zeros(self.numstates)
# if self.istate == 1:
# jstate = 0
# else:
# jstate = 1
# tdc[jstate] = tmp
#
# print "tdc2 ", tdc
exec("self.set_" + cbackprop + "timederivcoups(tdc)")
else:
exec("self.set_" + cbackprop +
"timederivcoups(np.zeros(self.numstates))")
exec("self.set_" + cbackprop + "prev_wf_positions(pos)")
def compute_electronic_overlap(self, pos1, civec1, orbs1, pos2, civec2, orbs2):
orbout1 = os.path.join(cwd, "c0.1")
orbs1.tofile(orbout1)
orbout2 = os.path.join(cwd, "c0.2")
orbs2.tofile(orbout2)
civecout1 = os.path.join(cwd, "civec.1")
civec1.tofile(civecout1)
civecout2 = os.path.join(cwd, "civec.2")
civec2.tofile(civecout2)
TC = TCProtobufClient(host='localhost', port=self.tc_port)
options = self.get_tc_options()
# TC.update_options(**base_options)
TC.connect()
# Check if the server is available
avail = TC.is_available()
options["geom2"] = (0.529177 * pos2).tolist()
options["cvec1file"] = civecfilename
options["cvec2file"] = civecout
options["orb1afile"] = orbout2
options["orb2afile"] = orbout
results2 = TC.compute_job_sync("ci_vec_overlap", pos1.tolist(),
"bohr", **options)
S = results2['ci_overlap']
return S
def init_h5_datasets(self):
self.h5_datasets["time"] = 1
self.h5_datasets["energies"] = self.numstates
self.h5_datasets["positions"] = self.numdims
self.h5_datasets["momenta"] = self.numdims
self.h5_datasets["forces_i"] = self.numdims
self.h5_datasets["civecs"] = self.ncivecs
self.h5_datasets["orbs"] = self.norbs
self.h5_datasets_half_step["time_half_step"] = 1
self.h5_datasets_half_step["timederivcoups"] = self.numstates
self.h5_datasets_half_step["S_elec_flat"] = self.numstates*self.numstates
def potential_specific_traj_copy(self, from_traj):
self.set_tc_options(from_traj.get_tc_options())
return
def get_wf0(self):
return self.wf[0, :].copy()
def get_wf1(self):
return self.wf[1, :].copy()
def get_backprop_wf0(self):
return self.backprop_wf[0, :].copy()
def get_backprop_wf1(self):
return self.backprop_wf[1, :].copy()
def set_tc_options(self, tco):
self.tc_options = tco.copy()
def get_tc_options(self):
return self.tc_options.copy()
def get_prev_wf_positions(self):
return self.prev_wf_positions.copy()
def get_backprop_prev_wf_positions(self):
return self.backprop_prev_wf_positions.copy()
def get_prev_wf_positions_in_angstrom(self):
return 0.529177*self.prev_wf_positions
def get_backprop_prev_wf_positions_in_angstrom(self):
return 0.529177*self.backprop_prev_wf_positions
def set_prev_wf_positions(self,pos):
self.prev_wf_positions = pos.copy()
def set_backprop_prev_wf_positions(self,pos):
self.backprop_prev_wf_positions = pos.copy()
def get_civecs(self):
return self.civecs.copy()
def set_civecs(self, v):
self.civecs = v.copy()
def get_ncivecs(self):
return self.ncivecs
def set_ncivecs(self, n):
self.ncivecs = n
def get_orbs(self):
return self.orbs.copy()
def set_orbs(self, v):
self.orbs = v.copy()
def get_norbs(self):
return self.norbs
def set_norbs(self, n):
self.norbs = n
def get_backprop_civecs(self):
return self.backprop_civecs.copy()
def set_backprop_civecs(self, v):
self.backprop_civecs = v.copy()
def get_backprop_orbs(self):
return self.backprop_orbs.copy()
def set_backprop_orbs(self, v):
self.backprop_orbs = v.copy()
def get_electronic_phases(self):
return self.electronic_phases.copy()
def set_electronic_phases(self, v):
self.electronic_phases = v.copy()
def get_backprop_electronic_phases(self):
return self.backprop_electronic_phases.copy()
def set_backprop_electronic_phases(self, v):
self.backprop_electronic_phases = v.copy()
###end terachem_cas electronic structure section###
| blevine37/pySpawn17 | pyspawn/potential/terachem_cas.py | Python | mit | 11,239 | [
"TeraChem"
] | aa2a294f1f2342090cc945fa5cf79796f9f1bb30a82e3ee63ec66dc4beb2a65c |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''compare neurom.fst features with values dumped from the original
neurom._point_neurite.features'''
import json
import os
import numpy as np
from itertools import chain
from nose import tools as nt
import neurom as nm
from neurom.core.types import NeuriteType
from neurom import fst
from neurom.fst import _neuronfunc as _nrn
from neurom.fst import _neuritefunc as _nrt
from neurom.fst import sectionfunc as _sec
from neurom.fst import _bifurcationfunc as _bf
from neurom.core import Tree
from utils import _close, _equal
_PWD = os.path.dirname(os.path.abspath(__file__))
SWC_DATA_PATH = os.path.join(_PWD, '../../../test_data/swc')
H5V1_DATA_PATH = os.path.join(_PWD, '../../../test_data/h5/v1')
H5V2_DATA_PATH = os.path.join(_PWD, '../../../test_data/h5/v2')
MORPH_FILENAME = 'Neuron.h5'
SWC_MORPH_FILENAME = 'Neuron.swc'
REF_NEURITE_TYPES = [NeuriteType.apical_dendrite, NeuriteType.basal_dendrite,
NeuriteType.basal_dendrite, NeuriteType.axon]
json_data = json.load(open(
os.path.join(_PWD, '../../../test_data/dataset/point_neuron_feature_values.json')))
def get(feat, neurite_format, **kwargs):
'''using the values captured from the old point_neurite system'''
neurite_type = str(kwargs.get('neurite_type', ''))
return json_data[neurite_format][feat][neurite_type]
def i_chain2(trees, iterator_type=Tree.ipreorder, mapping=None, tree_filter=None):
'''Returns a mapped iterator to a collection of trees
Provides access to all the elements of all the trees
in one iteration sequence.
Parameters:
trees: iterator or iterable of tree objects
iterator_type: type of the iteration (segment, section, triplet...)
mapping: optional function to apply to the iterator's target.
tree_filter: optional top level filter on properties of tree objects.
'''
nrt = (trees if tree_filter is None
else filter(tree_filter, trees))
chain_it = chain.from_iterable(map(iterator_type, nrt))
return chain_it if mapping is None else map(mapping, chain_it)
class SectionTreeBase(object):
'''Base class for section tree tests'''
def setUp(self):
self.ref_nrn = 'h5'
self.ref_types = REF_NEURITE_TYPES
def test_neurite_type(self):
neurite_types = [n0.type for n0 in self.sec_nrn.neurites]
nt.assert_equal(neurite_types, self.ref_types)
def test_get_n_sections(self):
nt.assert_equal(_nrt.n_sections(self.sec_nrn),
get('number_of_sections', self.ref_nrn)[0])
for t in NeuriteType:
nt.assert_equal(_nrt.n_sections(self.sec_nrn, neurite_type=t),
get('number_of_sections', self.ref_nrn, neurite_type=t)[0])
def test_get_number_of_sections_per_neurite(self):
_equal(_nrt.number_of_sections_per_neurite(self.sec_nrn),
get('number_of_sections_per_neurite', self.ref_nrn))
for t in NeuriteType:
_equal(_nrt.number_of_sections_per_neurite(self.sec_nrn, neurite_type=t),
get('number_of_sections_per_neurite', self.ref_nrn, neurite_type=t))
def test_get_n_segments(self):
nt.assert_equal(_nrt.n_segments(self.sec_nrn), get('number_of_segments', self.ref_nrn)[0])
for t in NeuriteType:
nt.assert_equal(_nrt.n_segments(self.sec_nrn, neurite_type=t),
get('number_of_segments', self.ref_nrn, neurite_type=t)[0])
def test_get_number_of_neurites(self):
nt.assert_equal(_nrt.n_neurites(self.sec_nrn), get('number_of_neurites', self.ref_nrn)[0])
for t in NeuriteType:
nt.assert_equal(_nrt.n_neurites(self.sec_nrn, neurite_type=t),
get('number_of_neurites', self.ref_nrn, neurite_type=t)[0])
def test_get_section_path_distances(self):
_close(_nrt.section_path_lengths(self.sec_nrn), get('section_path_distances', self.ref_nrn))
for t in NeuriteType:
_close(_nrt.section_path_lengths(self.sec_nrn, neurite_type=t),
get('section_path_distances', self.ref_nrn, neurite_type=t))
pl = [_sec.section_path_length(s) for s in i_chain2(self.sec_nrn_trees)]
_close(pl, get('section_path_distances', self.ref_nrn))
def test_get_soma_radius(self):
nt.assert_equal(self.sec_nrn.soma.radius, get('soma_radii', self.ref_nrn)[0])
def test_get_soma_surface_area(self):
nt.assert_equal(fst._nrn.soma_surface_area(self.sec_nrn), get('soma_surface_areas', self.ref_nrn)[0])
def test_get_local_bifurcation_angles(self):
_close(_nrt.local_bifurcation_angles(self.sec_nrn),
get('local_bifurcation_angles', self.ref_nrn))
for t in NeuriteType:
_close(_nrt.local_bifurcation_angles(self.sec_nrn, neurite_type=t),
get('local_bifurcation_angles', self.ref_nrn, neurite_type=t))
ba = [_bf.local_bifurcation_angle(b)
for b in i_chain2(self.sec_nrn_trees, iterator_type=Tree.ibifurcation_point)]
_close(ba, get('local_bifurcation_angles', self.ref_nrn))
def test_get_remote_bifurcation_angles(self):
_close(_nrt.remote_bifurcation_angles(self.sec_nrn),
get('remote_bifurcation_angles', self.ref_nrn))
for t in NeuriteType:
_close(_nrt.remote_bifurcation_angles(self.sec_nrn, neurite_type=t),
get('remote_bifurcation_angles', self.ref_nrn, neurite_type=t))
ba = [_bf.remote_bifurcation_angle(b)
for b in i_chain2(self.sec_nrn_trees, iterator_type=Tree.ibifurcation_point)]
_close(ba, get('remote_bifurcation_angles', self.ref_nrn))
def test_get_section_radial_distances(self):
_close(_nrt.section_radial_distances(self.sec_nrn),
get('section_radial_distances', self.ref_nrn))
for t in NeuriteType:
_close(_nrt.section_radial_distances(self.sec_nrn, neurite_type=t),
get('section_radial_distances', self.ref_nrn, neurite_type=t))
def test_get_trunk_origin_radii(self):
_equal(fst._nrn.trunk_origin_radii(self.sec_nrn), get('trunk_origin_radii', self.ref_nrn))
for t in NeuriteType:
_equal(_nrn.trunk_origin_radii(self.sec_nrn, neurite_type=t),
get('trunk_origin_radii', self.ref_nrn, neurite_type=t))
def test_get_trunk_section_lengths(self):
_close(_nrn.trunk_section_lengths(self.sec_nrn), get('trunk_section_lengths', self.ref_nrn))
for t in NeuriteType:
_close(_nrn.trunk_section_lengths(self.sec_nrn, neurite_type=t),
get('trunk_section_lengths', self.ref_nrn, neurite_type=t))
class TestH5V1(SectionTreeBase):
def setUp(self):
super(TestH5V1, self).setUp()
self.sec_nrn = nm.load_neuron(os.path.join(H5V1_DATA_PATH, MORPH_FILENAME))
self.sec_nrn_trees = [n.root_node for n in self.sec_nrn.neurites]
# Overriding soma values as the same soma points in SWC and ASC have different
# meanings. Hence leading to different values
def test_get_soma_radius(self):
nt.assert_equal(self.sec_nrn.soma.radius, 0.09249506049313666)
def test_get_soma_surface_area(self):
nt.assert_equal(fst._nrn.soma_surface_area(self.sec_nrn),
0.1075095256160432)
class TestH5V2(SectionTreeBase):
def setUp(self):
super(TestH5V2, self).setUp()
self.sec_nrn = nm.load_neuron(os.path.join(H5V2_DATA_PATH, MORPH_FILENAME))
self.sec_nrn_trees = [n.root_node for n in self.sec_nrn.neurites]
# Overriding soma values as the same soma points in SWC and ASC have different
# meanings. Hence leading to different values
def test_get_soma_radius(self):
nt.assert_equal(self.sec_nrn.soma.radius, 0.09249506049313666)
def test_get_soma_surface_area(self):
nt.assert_equal(fst._nrn.soma_surface_area(self.sec_nrn),
0.1075095256160432)
class TestSWC(SectionTreeBase):
def setUp(self):
self.ref_nrn = 'swc'
self.sec_nrn = nm.load_neuron(os.path.join(SWC_DATA_PATH, SWC_MORPH_FILENAME))
self.sec_nrn_trees = [n.root_node for n in self.sec_nrn.neurites]
self.ref_types = [NeuriteType.axon,
NeuriteType.basal_dendrite,
NeuriteType.basal_dendrite,
NeuriteType.apical_dendrite,
]
| lidakanari/NeuroM | neurom/fst/tests/test_feature_compat.py | Python | bsd-3-clause | 10,193 | [
"NEURON"
] | 0052091b3f19a9b8e93779ab4245a27b6a034e026885b54aa1da05377fb8e72e |
# -*- encoding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1417089926.323608
_template_filename=u'templates/webapps/galaxy/dataset/display.mako'
_template_uri=u'/dataset/display.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='ascii'
_exports = ['title', 'center_panel', 'right_panel', 'render_item', 'init', 'render_item_links', 'render_deleted_data_message', 'javascripts']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
# SOURCE LINE 3
ns = runtime.TemplateNamespace('__anon_0x7f313c53ca10', context._clean_inheritance_tokens(), templateuri=u'/display_common.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x7f313c53ca10')] = ns
# SOURCE LINE 4
ns = runtime.TemplateNamespace('__anon_0x7f313c53c5d0', context._clean_inheritance_tokens(), templateuri=u'/tagging_common.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x7f313c53c5d0')] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/display_base.mako', _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f313c53ca10')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x7f313c53c5d0')._populate(_import_ns, [u'render_individual_tagging_element', u'render_community_tagging_element'])
__M_writer = context.writer()
# SOURCE LINE 2
__M_writer(u'\n')
# SOURCE LINE 3
__M_writer(u'\n')
# SOURCE LINE 4
__M_writer(u'\n\n')
# SOURCE LINE 42
__M_writer(u'\n\n')
# SOURCE LINE 52
__M_writer(u'\n\n')
# SOURCE LINE 56
__M_writer(u'\n\n')
# SOURCE LINE 65
__M_writer(u'\n\n')
# SOURCE LINE 82
__M_writer(u'\n\n')
# SOURCE LINE 94
__M_writer(u'\n\n')
# SOURCE LINE 118
__M_writer(u'\n\n')
# SOURCE LINE 162
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_title(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f313c53ca10')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x7f313c53c5d0')._populate(_import_ns, [u'render_individual_tagging_element', u'render_community_tagging_element'])
item = _import_ns.get('item', context.get('item', UNDEFINED))
get_class_display_name = _import_ns.get('get_class_display_name', context.get('get_class_display_name', UNDEFINED))
get_item_name = _import_ns.get('get_item_name', context.get('get_item_name', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 54
__M_writer(u'\n Galaxy | ')
# SOURCE LINE 55
__M_writer(unicode(get_class_display_name( item.__class__ )))
__M_writer(u' | ')
__M_writer(filters.html_escape(unicode(get_item_name( item ) )))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_center_panel(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f313c53ca10')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x7f313c53c5d0')._populate(_import_ns, [u'render_individual_tagging_element', u'render_community_tagging_element'])
item_data = _import_ns.get('item_data', context.get('item_data', UNDEFINED))
get_class_display_name = _import_ns.get('get_class_display_name', context.get('get_class_display_name', UNDEFINED))
self = _import_ns.get('self', context.get('self', UNDEFINED))
item = _import_ns.get('item', context.get('item', UNDEFINED))
get_item_name = _import_ns.get('get_item_name', context.get('get_item_name', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 96
__M_writer(u'\n <div class="unified-panel-header" unselectable="on">\n <div class="unified-panel-header-inner">\n ')
# SOURCE LINE 99
__M_writer(unicode(get_class_display_name( item.__class__ )))
__M_writer(u'\n | ')
# SOURCE LINE 100
__M_writer(filters.html_escape(unicode(get_item_name( item ) )))
__M_writer(u'\n </div>\n </div>\n\n <div class="unified-panel-body">\n <div style="overflow: auto; height: 100%;">\n <div class="page-body">\n <div style="float: right">\n ')
# SOURCE LINE 108
__M_writer(unicode(self.render_item_links( item )))
__M_writer(u'\n </div>\n <div>\n ')
# SOURCE LINE 111
__M_writer(unicode(self.render_item_header( item )))
__M_writer(u'\n </div>\n\n ')
# SOURCE LINE 114
__M_writer(unicode(self.render_item( item, item_data )))
__M_writer(u'\n </div>\n </div>\n </div>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_right_panel(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f313c53ca10')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x7f313c53c5d0')._populate(_import_ns, [u'render_individual_tagging_element', u'render_community_tagging_element'])
h = _import_ns.get('h', context.get('h', UNDEFINED))
len = _import_ns.get('len', context.get('len', UNDEFINED))
render_community_tagging_element = _import_ns.get('render_community_tagging_element', context.get('render_community_tagging_element', UNDEFINED))
item = _import_ns.get('item', context.get('item', UNDEFINED))
get_class_display_name = _import_ns.get('get_class_display_name', context.get('get_class_display_name', UNDEFINED))
render_individual_tagging_element = _import_ns.get('render_individual_tagging_element', context.get('render_individual_tagging_element', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 120
__M_writer(u'\n <div class="unified-panel-header" unselectable="on">\n <div class="unified-panel-header-inner">\n About this ')
# SOURCE LINE 123
__M_writer(unicode(get_class_display_name( item.__class__ )))
__M_writer(u'\n </div>\n </div>\n\n <div class="unified-panel-body">\n <div style="overflow: auto; height: 100%;">\n <div style="padding: 10px;">\n <h4>Author</h4>\n\n <p>')
# SOURCE LINE 132
__M_writer(filters.html_escape(unicode(item.history.user.username )))
__M_writer(u'</p>\n\n <div><img src="https://secure.gravatar.com/avatar/')
# SOURCE LINE 134
__M_writer(unicode(h.md5(item.history.user.email)))
__M_writer(u'?d=identicon&s=150"></div>\n\n')
# SOURCE LINE 137
__M_writer(u'\n')
# SOURCE LINE 139
__M_writer(u'\n')
# SOURCE LINE 141
__M_writer(u' <p>\n <h4>Tags</h4>\n <p>\n')
# SOURCE LINE 145
__M_writer(u' <div>\n Community:\n ')
# SOURCE LINE 147
__M_writer(unicode(render_community_tagging_element( tagged_item=item, tag_click_fn='community_tag_click', use_toggle_link=False )))
__M_writer(u'\n')
# SOURCE LINE 148
if len ( item.tags ) == 0:
# SOURCE LINE 149
__M_writer(u' none\n')
pass
# SOURCE LINE 151
__M_writer(u' </div>\n')
# SOURCE LINE 153
__M_writer(u' <p>\n <div>\n Yours:\n ')
# SOURCE LINE 156
__M_writer(unicode(render_individual_tagging_element( user=trans.get_user(), tagged_item=item, elt_context='view.mako', use_toggle_link=False, tag_click_fn='community_tag_click' )))
__M_writer(u'\n </div>\n </div>\n </div>\n </div>\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_render_item(context,data,data_to_render):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f313c53ca10')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x7f313c53c5d0')._populate(_import_ns, [u'render_individual_tagging_element', u'render_community_tagging_element'])
h = _import_ns.get('h', context.get('h', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
def render_deleted_data_message(data):
return render_render_deleted_data_message(context,data)
truncated = _import_ns.get('truncated', context.get('truncated', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 68
__M_writer(u'\n ')
# SOURCE LINE 69
__M_writer(unicode( render_deleted_data_message( data ) ))
__M_writer(u'\n')
# SOURCE LINE 70
if data_to_render:
# SOURCE LINE 71
if truncated:
# SOURCE LINE 72
__M_writer(u' <div class="warningmessagelarge">\n This dataset is large and only the first megabyte is shown below. |\n <a href="')
# SOURCE LINE 74
__M_writer(unicode(h.url_for( controller='dataset', action='display_by_username_and_slug', username=data.history.user.username, slug=trans.security.encode_id( data.id ), preview=False )))
__M_writer(u'">Show all</a>\n </div>\n')
pass
# SOURCE LINE 78
__M_writer(u' <pre style="font-size: 135%">')
__M_writer(filters.html_escape(unicode( data_to_render )))
__M_writer(u'</pre>\n')
# SOURCE LINE 79
else:
# SOURCE LINE 80
__M_writer(u" <p align='center'>Cannot show dataset content</p>\n")
pass
return ''
finally:
context.caller_stack._pop_frame()
def render_init(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f313c53ca10')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x7f313c53c5d0')._populate(_import_ns, [u'render_individual_tagging_element', u'render_community_tagging_element'])
self = _import_ns.get('self', context.get('self', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 44
__M_writer(u'\n')
# SOURCE LINE 45
self.has_left_panel=False
self.has_right_panel=True
self.message_box_visible=False
self.active_view="user"
self.overlay_visible=False
# SOURCE LINE 51
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_render_item_links(context,data):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f313c53ca10')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x7f313c53c5d0')._populate(_import_ns, [u'render_individual_tagging_element', u'render_community_tagging_element'])
h = _import_ns.get('h', context.get('h', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 58
__M_writer(u'\n')
# SOURCE LINE 60
__M_writer(u' <a href="')
__M_writer(unicode(h.url_for( controller='/dataset', action='display', dataset_id=trans.security.encode_id( data.id ), to_ext=data.ext )))
__M_writer(u'" class="icon-button disk" title="Save dataset"></a>\n <a\n href="')
# SOURCE LINE 62
__M_writer(unicode(h.url_for( controller='/dataset', action='imp', dataset_id=trans.security.encode_id( data.id ) )))
__M_writer(u'"\n class="icon-button import"\n title="Import dataset"></a>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_render_deleted_data_message(context,data):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f313c53ca10')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x7f313c53c5d0')._populate(_import_ns, [u'render_individual_tagging_element', u'render_community_tagging_element'])
h = _import_ns.get('h', context.get('h', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 84
__M_writer(u'\n')
# SOURCE LINE 85
if data.deleted:
# SOURCE LINE 86
__M_writer(u' <div class="errormessagelarge" id="deleted-data-message">\n You are viewing a deleted dataset.\n')
# SOURCE LINE 88
if data.history and data.history.user == trans.get_user():
# SOURCE LINE 89
__M_writer(u' <br />\n <a href="#" onclick="$.ajax( {type: \'GET\', cache: false, url: \'')
# SOURCE LINE 90
__M_writer(unicode(h.url_for( controller='dataset', action='undelete_async', dataset_id=trans.security.encode_id( data.id ) )))
__M_writer(u'\', dataType: \'text\', contentType: \'text/html\', success: function( data, textStatus, jqXHR ){ if (data == \'OK\' ){ $( \'#deleted-data-message\' ).slideUp( \'slow\' ) } else { alert( \'Undelete failed.\' ) } }, error: function( data, textStatus, jqXHR ){ alert( \'Undelete failed.\' ); } } );">Undelete</a>\n')
pass
# SOURCE LINE 92
__M_writer(u' </div>\n')
pass
return ''
finally:
context.caller_stack._pop_frame()
def render_javascripts(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f313c53ca10')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x7f313c53c5d0')._populate(_import_ns, [u'render_individual_tagging_element', u'render_community_tagging_element'])
item = _import_ns.get('item', context.get('item', UNDEFINED))
first_chunk = _import_ns.get('first_chunk', context.get('first_chunk', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 6
__M_writer(u'\n ')
# SOURCE LINE 7
__M_writer(unicode(parent.javascripts()))
__M_writer(u'\n')
# SOURCE LINE 9
if item.datatype.CHUNKABLE:
# SOURCE LINE 10
__M_writer(u'\n <script type="text/javascript">\n require.config({\n baseUrl: "')
# SOURCE LINE 13
__M_writer(unicode(h.url_for('/static/scripts')))
__M_writer(u'",\n shim: {\n "libs/backbone/backbone": { exports: "Backbone" },\n }\n });\n\n require([\'mvc/data\'], function(data) {\n //\n // Use tabular data display progressively by deleting data from page body\n // and then showing dataset view.\n //\n $(\'.page-body\').children().remove();\n\n data.createTabularDatasetChunkedView({\n // TODO: encode id.\n dataset_config:\n _.extend( ')
# SOURCE LINE 29
__M_writer(unicode(h.dumps( item.to_dict() )))
__M_writer(u',\n {\n chunk_url: "')
# SOURCE LINE 31
__M_writer(unicode(h.url_for( controller='/dataset', action='display',
dataset_id=trans.security.encode_id( item.id ))))
# SOURCE LINE 32
__M_writer(u'",\n first_data_chunk: ')
# SOURCE LINE 33
__M_writer(unicode(first_chunk))
__M_writer(u"\n }\n ),\n parent_elt: $('.page-body')\n });\n });\n </script>\n\n")
pass
return ''
finally:
context.caller_stack._pop_frame()
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/database/compiled_templates/dataset/display.mako.py | Python | gpl-3.0 | 17,557 | [
"Galaxy"
] | e8230d29125c4ff8c711f66e82dc9cbb86ffcbc198a9b07e637eda7862163e68 |
from __future__ import print_function
import os
import vtk
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from PyQt4 import QtCore, QtGui, uic
class GlyphViewerApp(QtGui.QMainWindow):
def __init__(self, data_dir):
#Parent constructor
super(GlyphViewerApp,self).__init__()
self.vtk_widget = None
self.ui = None
self.setup(data_dir)
def setup(self, data_dir):
import glyph_ui
self.ui = glyph_ui.Ui_MainWindow()
self.ui.setupUi(self)
self.vtk_widget = QGlyphViewer(self.ui.vtk_panel, data_dir)
self.ui.vtk_layout = QtGui.QHBoxLayout()
self.ui.vtk_layout.addWidget(self.vtk_widget)
self.ui.vtk_layout.setContentsMargins(0,0,0,0)
self.ui.vtk_panel.setLayout(self.ui.vtk_layout)
def initialize(self):
self.vtk_widget.start()
class QGlyphViewer(QtGui.QFrame):
def __init__(self, parent, data_dir):
super(QGlyphViewer,self).__init__(parent)
# Make tha actual QtWidget a child so that it can be re parented
interactor = QVTKRenderWindowInteractor(self)
self.layout = QtGui.QHBoxLayout()
self.layout.addWidget(interactor)
self.layout.setContentsMargins(0,0,0,0)
self.setLayout(self.layout)
# Read the data
xyx_file = os.path.join(data_dir, "combxyz.bin")
q_file = os.path.join(data_dir, "combq.bin")
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(xyx_file)
pl3d.SetQFileName(q_file)
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
blocks = pl3d.GetOutput()
b0 = blocks.GetBlock(0)
# Setup VTK environment
renderer = vtk.vtkRenderer()
render_window = interactor.GetRenderWindow()
render_window.AddRenderer(renderer)
interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
render_window.SetInteractor(interactor)
renderer.SetBackground(0.2,0.2,0.2)
# Draw Outline
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(b0)
outline_mapper = vtk.vtkPolyDataMapper()
outline_mapper.SetInputConnection(outline.GetOutputPort())
outline_actor = vtk.vtkActor()
outline_actor.SetMapper(outline_mapper)
outline_actor.GetProperty().SetColor(1,1,1)
renderer.AddActor(outline_actor)
renderer.ResetCamera()
# Draw Outline
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(b0)
outline_mapper = vtk.vtkPolyDataMapper()
outline_mapper.SetInputConnection(outline.GetOutputPort())
outline_actor = vtk.vtkActor()
outline_actor.SetMapper(outline_mapper)
outline_actor.GetProperty().SetColor(1,1,1)
renderer.AddActor(outline_actor)
renderer.ResetCamera()
# Threshold points
threshold = vtk.vtkThresholdPoints()
threshold.SetInputData(b0)
threshold.ThresholdByUpper(0.5)
# Draw arrows
arrow = vtk.vtkArrowSource()
glyphs = vtk.vtkGlyph3D()
glyphs.SetInputData(b0)
glyphs.SetSourceConnection(arrow.GetOutputPort())
glyphs.SetInputConnection(threshold.GetOutputPort())
glyphs.SetVectorModeToUseVector()
glyphs.SetScaleModeToScaleByVector()
glyphs.SetScaleFactor(0.005)
glyphs.SetColorModeToColorByVector()
# Mapper
glyph_mapper = vtk.vtkPolyDataMapper()
glyph_mapper.SetInputConnection(glyphs.GetOutputPort())
glyph_actor = vtk.vtkActor()
glyph_actor.SetMapper(glyph_mapper)
glyph_mapper.UseLookupTableScalarRangeOn()
renderer.AddActor(glyph_actor)
# Set color lookuptable
glyphs.Update()
s0,sf = glyphs.GetOutput().GetScalarRange()
lut = vtk.vtkColorTransferFunction()
lut.AddRGBPoint(s0, 1,0,0)
lut.AddRGBPoint(sf, 0,1,0)
glyph_mapper.SetLookupTable(lut)
self.b0 = b0
self.renderer = renderer
self.interactor = interactor
self.threshold = threshold
def start(self):
self.interactor.Initialize()
self.interactor.Start()
if __name__ == "__main__":
os.chdir(os.path.dirname(__file__))
# Recompile ui
with open("glyph_view.ui") as ui_file:
with open("glyph_ui.py","w") as py_ui_file:
uic.compileUi(ui_file,py_ui_file)
app = QtGui.QApplication([])
main_window = GlyphViewerApp("volume")
main_window.show()
main_window.initialize()
app.exec_()
| diego0020/tutorial-vtk-pyqt | 02_embed_in_qt.py | Python | mit | 4,664 | [
"VTK"
] | fad487e9937b2d1443ab10bf78cb88a4cc8849451ce7c22391b1fdabbad09ab9 |
# -*- coding: utf-8 -*-
#
# multimeter_file.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
This file illustrates recording from a iaf_cond_alpha neuron
using a multimeter and writing data to a file.
'''
import nest
import numpy as np
import pylab as pl
nest.ResetKernel()
nest.SetKernelStatus({'overwrite_files': True, # set to True to permit overwriting
'data_path': '', # path to all data files, from working dir
'data_prefix': ''}) # prefix for all data files
# display recordables for illustration
print('iaf_cond_alpha recordables: {0}'.format(nest.GetDefaults('iaf_cond_alpha')['recordables']))
# create neuron and multimeter
n = nest.Create('iaf_cond_alpha',
params = {'tau_syn_ex': 1.0, 'V_reset': -70.0})
m = nest.Create('multimeter',
params = {'withtime': True, # store time for each data point
'withgid': True, # store gid for each data point
'to_file': True, # write data to file
'label': 'my_multimeter', # part of file name
'interval': 0.1,
'record_from': ['V_m', 'g_ex', 'g_in']})
# Create spike generators and connect
gex = nest.Create('spike_generator',
params = {'spike_times': np.array([10.0, 20.0, 50.0])})
gin = nest.Create('spike_generator',
params = {'spike_times': np.array([15.0, 25.0, 55.0])})
nest.Connect(gex, n, syn_spec={'weight': 40.0}) # excitatory
nest.Connect(gin, n, syn_spec={'weight': -20.0}) # inhibitory
nest.Connect(m, n)
# simulate
nest.Simulate(100)
# obtain and display data
events = nest.GetStatus(m)[0]['events']
t = events['times'];
pl.clf()
pl.subplot(211)
pl.plot(t, events['V_m'])
pl.axis([0, 100, -75, -53])
pl.ylabel('Membrane potential [mV]')
pl.subplot(212)
pl.plot(t, events['g_ex'], t, events['g_in'])
pl.axis([0, 100, 0, 45])
pl.xlabel('Time [ms]')
pl.ylabel('Synaptic conductance [nS]')
pl.legend(('g_exc', 'g_inh'))
pl.show()
| synergetics/nest | pynest/examples/multimeter_file.py | Python | gpl-2.0 | 2,713 | [
"NEURON"
] | d12cd957141b64b8b56d1eeb35acd32dfe1b305728e745cea925191c67942381 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
.. versionadded:: 1.9.0
This module implements the abstract base class for PMGSONable pymatgen objects,
i.e., objects that can be converted to a json representation. MSON stands for
materials json.
It also implements general JSON encoders and decoders for pymatgen. Only
supports pymatgen objects version >= 1.9.0.
Current support for all core objects that obey the as_dict/from_dict API,
including Site, PeriodicSite, Structure, Specie, Dos, Lattice, etc. and all
Entry and all Transformations. Note that nested lists and dicts of these
objects are supported as well.
.. note::
The decoder depends on finding a "@module" and "@class" key in the dict in
order to decode the necessary python object. All as_dict() properties must
therefore have the module name and class embedded. In general, the
MontyEncoder will add these keys if they are not present, but for better
long term stability, the easiest way is to add the following to any as_dict()
property::
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 30, 2012"
import six
import json
import functools
from abc import ABCMeta
from monty.io import zopen
from monty.json import MSONable, MontyEncoder, MontyDecoder, MSONError
from monty.dev import deprecated
def pmg_serialize(method):
"""
Decorator for methods that add MSON serializations keys
to the dictionary. See documentation of MSON for more details
"""
@functools.wraps(method)
def wrapper(*args, **kwargs):
self = args[0]
d = method(*args, **kwargs)
# Add @module and @class
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
return wrapper
class PMGSONable(six.with_metaclass(ABCMeta, MSONable)):
"""
This is an abstract base class specifying an API for MSONable objects.
MSON is Pymatgen JSON. Essentially, PMGSONable objects must
implement an as_dict() method and a from_dict static method.
"""
@property
@deprecated(
message="All to_dict properties have been deprecated. They will be "
"removed from v3.1. Use the as_dict() method instead.")
def to_dict(self):
"""
A JSON serializable dict representation of an object.
"""
return self.as_dict()
@classmethod
def from_dict(cls, d):
"""
This implements a default from_dict method which supports all
classes that simply saves all init arguments in a "init_args"
key. Otherwise, the PMGSONable class must override this class method.
"""
if "init_args" in d:
return cls(**d['init_args'])
raise MSONError("Invalid dict for default from_dict. Please "
"override from_dict for ".format(cls))
def json_pretty_dump(obj, filename):
"""
Serialize obj as a JSON formatted stream to the given filename (
pretty printing version)
"""
with open(filename, "w") as fh:
json.dump(obj, fh, indent=4, sort_keys=4)
def pmg_load(filename, **kwargs):
"""
Loads a json file and deserialize it with MontyDecoder.
Args:
filename (str): Filename of file to open. Can be gzipped or bzipped.
\*\*kwargs: Any of the keyword arguments supported by the json.load
method.
Returns:
Deserialized pymatgen object. Note that these objects can be lists,
dicts or otherwise nested pymatgen objects that support the as_dict()
and from_dict PMGSONable protocol.
"""
return json.load(zopen(filename, "rt"), cls=MontyDecoder, **kwargs)
def pmg_dump(obj, filename, **kwargs):
"""
Dump an object to a json file using MontyEncoder. Note that these
objects can be lists, dicts or otherwise nested pymatgen objects that
support the as_dict() and from_dict PMGSONable protocol.
Args:
obj (object): Object to dump.
filename (str): Filename of file to open. Can be gzipped or bzipped.
\*\*kwargs: Any of the keyword arguments supported by the json.dump
method.
"""
return json.dump(obj, zopen(filename, "wb"), cls=MontyEncoder, **kwargs)
| sonium0/pymatgen | pymatgen/serializers/json_coders.py | Python | mit | 4,577 | [
"pymatgen"
] | 1fb9e7a538c6260caccdc19e6b45d46885475bbbc6e7241781465fa0e3094368 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from io import StringIO
from itertools import chain, groupby, product, repeat
import numpy as np
from numpy import pi
from numpy.linalg import inv, norm
from .species_data import get_property
__version__ = '0.1.0'
__all__ = ['Geometry', 'loads', 'readfile']
class Geometry(object):
"""
Represents a single molecule or a crystal.
:param list species: list of element symbols
:param list coords: list of atomic coordinates in angstroms (as 3-tuples)
:param list lattice: list of lattice vectors (:data:`None` for a moleucle)
Iterating over a geometry yields 2-tuples of symbols and coordinates.
:func:`len` returns the number of atoms in a geometry. The class supports
:func:`format` with the same available formats as :meth:`dump`.
"""
def __init__(self, species, coords, lattice=None):
self.species = species
self.coords = np.array(coords)
self.lattice = np.array(lattice) if lattice is not None else None
@classmethod
def from_atoms(cls, atoms, lattice=None, unit=1.0):
"""Alternative contructor.
:param list atoms: list of 2-tuples with an elemnt symbol and
a coordinate
:param float unit: value to multiple atomic coordiantes with
:param list lattice: list of lattice vectors (:data:`None` for a moleucle)
"""
species = [sp for sp, _ in atoms]
coords = [np.array(coord, dtype=float) * unit for _, coord in atoms]
return cls(species, coords, lattice)
def __repr__(self):
s = repr(self.formula)
if self.lattice is not None:
s += ' in a lattice'
return '<{} {}>'.format(self.__class__.__name__, s)
def __iter__(self):
for specie, coord in zip(self.species, self.coords):
yield specie, coord
def __len__(self):
return len(self.species)
@property
def formula(self):
"""Chemical formula of the molecule or a unit cell."""
composition = sorted(
(sp, len(list(g))) for sp, g in groupby(sorted(self.species))
)
return ''.join('{}{}'.format(sp, n if n > 1 else '') for sp, n in composition)
def __format__(self, fmt):
"""Return the geometry represented as a string, delegates to :meth:`dump`."""
fp = StringIO()
self.dump(fp, fmt)
return fp.getvalue()
dumps = __format__
def dump(self, f, fmt):
"""Save the geometry into a file.
:param file f: file object
:param str fmt: geometry format, one of ``""``, ``"xyz"``, ``"aims"``,
``"mopac"``.
"""
if fmt == '':
f.write(repr(self))
elif fmt == 'xyz':
f.write('{}\n'.format(len(self)))
f.write('Formula: {}\n'.format(self.formula))
for specie, coord in self:
f.write(
'{:>2} {}\n'.format(
specie, ' '.join('{:15.8}'.format(x) for x in coord)
)
)
elif fmt == 'aims':
f.write('# Formula: {}\n'.format(self.formula))
for specie, coord in self:
f.write(
'atom {} {:>2}\n'.format(
' '.join('{:15.8}'.format(x) for x in coord), specie
)
)
elif fmt == 'mopac':
f.write('* Formula: {}\n'.format(self.formula))
for specie, coord in self:
f.write(
'{:>2} {}\n'.format(
specie, ' '.join('{:15.8} 1'.format(x) for x in coord)
)
)
else:
raise ValueError('Unknown format: "{}"'.format(fmt))
def copy(self):
"""Make a copy of the geometry."""
return Geometry(
list(self.species),
self.coords.copy(),
self.lattice.copy() if self.lattice is not None else None,
)
def write(self, filename):
"""
Write the geometry into a file, delegates to :meth:`dump`.
:param str filename: path that will be overwritten
"""
ext = os.path.splitext(filename)[1]
if ext == '.xyz':
fmt = 'xyz'
elif ext == '.aims' or os.path.basename(filename) == 'geometry.in':
fmt = 'aims'
elif ext == '.mopac':
fmt = 'mopac'
else:
raise ValueError('Unknown file extension')
with open(filename, 'w') as f:
self.dump(f, fmt)
def super_circum(self, radius):
"""
Supercell dimensions such that the supercell circumsribes a sphere.
:param float radius: circumscribed radius in angstroms
Returns :data:`None` when geometry is not a crystal.
"""
if self.lattice is None:
return
rec_lattice = 2 * pi * inv(self.lattice.T)
layer_sep = np.array(
[
sum(vec * rvec / norm(rvec))
for vec, rvec in zip(self.lattice, rec_lattice)
]
)
return np.array(np.ceil(radius / layer_sep + 0.5), dtype=int)
def supercell(self, ranges=((-1, 1), (-1, 1), (-1, 1)), cutoff=None):
"""
Create a crystal supercell.
:param list ranges: list of 2-tuples specifying the range of multiples
of the unit-cell vectors
:param float cutoff: if given, the ranges are determined such that
the supercell contains a sphere with the radius qual to the cutoff
Returns a copy of itself when geometry is not a crystal.
"""
if self.lattice is None:
return self.copy()
if cutoff:
ranges = [(-r, r) for r in self.super_circum(cutoff)]
latt_vectors = np.array(
[(0, 0, 0)]
+ [
sum(k * vec for k, vec in zip(shift, self.lattice))
for shift in product(*[range(a, b + 1) for a, b in ranges])
if shift != (0, 0, 0)
]
)
species = list(chain.from_iterable(repeat(self.species, len(latt_vectors))))
coords = (self.coords[None, :, :] + latt_vectors[:, None, :]).reshape((-1, 3))
lattice = self.lattice * np.array([b - a for a, b in ranges])[:, None]
return Geometry(species, coords, lattice)
def dist_diff(self, other=None):
r"""
Calculate distances and vectors between atoms.
Args:
other (:class:`~berny.Geometry`): calculate distances between two
geometries if given or within a geometry if not
Returns:
:math:`R_{ij}:=|\mathbf R_i-\mathbf R_j|` and
:math:`R_{ij\alpha}:=(\mathbf R_i)_\alpha-(\mathbf R_j)_\alpha`.
"""
if other is None:
other = self
diff = self.coords[:, None, :] - other.coords[None, :, :]
dist = np.sqrt(np.sum(diff ** 2, 2))
dist[np.diag_indices(len(self))] = np.inf
return dist, diff
def dist(self, other=None):
"""Alias for the first element of :meth:`dist_diff`."""
return self.dist_diff(other)[0]
def bondmatrix(self, scale=1.3):
r"""
Calculate the covalent connectedness matrix.
:param float scale: threshold for accepting a distance as a covalent bond
Returns:
:math:`b_{ij}:=R_{ij}<\text{scale}\times (R_i^\text{cov}+R_j^\text{cov})`.
"""
dist = self.dist(self)
radii = np.array([get_property(sp, 'covalent_radius') for sp in self.species])
return dist < scale * (radii[None, :] + radii[:, None])
def rho(self):
r"""
Calculate a measure of covalentness.
Returns:
:math:`\rho_{ij}:=\exp\big(-R_{ij}/(R_i^\text{cov}+R_j^\text{cov})\big)`.
"""
geom = self.supercell()
dist = geom.dist(geom)
radii = np.array([get_property(sp, 'covalent_radius') for sp in geom.species])
return np.exp(-dist / (radii[None, :] + radii[:, None]) + 1)
@property
def masses(self):
"""Numpy array of atomic masses."""
return np.array([get_property(sp, 'mass') for sp in self.species])
@property
def cms(self):
r"""Calculate the center of mass, :math:`\mathbf R_\text{CMS}`."""
masses = self.masses
return np.sum(masses[:, None] * self.coords, 0) / masses.sum()
@property
def inertia(self):
r"""Calculate the moment of inertia.
.. math::
I_{\alpha\beta}:=
\sum_im_i\big(r_i^2\delta_{\alpha\beta}-(\mathbf r_i)_\alpha(\mathbf
r_i)_\beta\big),\qquad
\mathbf r_i=\mathbf R_i-\mathbf R_\text{CMS}
"""
coords_w = np.sqrt(self.masses)[:, None] * (self.coords - self.cms)
A = np.array([np.diag(np.full(3, r)) for r in np.sum(coords_w ** 2, 1)])
B = coords_w[:, :, None] * coords_w[:, None, :]
return np.sum(A - B, 0)
def load(fp, fmt):
"""
Read a geometry from a file object.
:param file fp: file object
:param str fmt: the format of the geometry file, can be one of ``"xyz"``,
``"aims"``
Returns :class:`~berny.Geometry`.
"""
if fmt == 'xyz':
n = int(fp.readline())
fp.readline()
species = []
coords = []
for _ in range(n):
l = fp.readline().split()
species.append(l[0])
coords.append([float(x) for x in l[1:4]])
return Geometry(species, coords)
if fmt == 'aims':
species = []
coords = []
lattice = []
while True:
l = fp.readline()
if l == '':
break
l = l.strip()
if not l or l.startswith('#'):
continue
l = l.split()
what = l[0]
if what == 'atom':
species.append(l[4])
coords.append([float(x) for x in l[1:4]])
elif what == 'lattice_vector':
lattice.append([float(x) for x in l[1:4]])
if lattice:
assert len(lattice) == 3
return Geometry(species, coords, lattice)
else:
return Geometry(species, coords)
def loads(s, fmt):
"""
Read a geometry from a string, delegates to :func:`load`.
:param str s: string with geometry
"""
fp = StringIO(s)
return load(fp, fmt)
def readfile(path, fmt=None):
"""
Read a geometry from a file path, delegates to :func:`load`.
:param str path: path to a geometry file
:param str fmt: if not given, the format is given from the file extension
"""
if not fmt:
ext = os.path.splitext(path)[1]
if ext == '.xyz':
fmt = 'xyz'
if ext == '.aims' or os.path.basename(path) == 'geometry.in':
fmt = 'aims'
with open(path) as f:
return load(f, fmt)
| azag0/pyberny | src/berny/geomlib.py | Python | mpl-2.0 | 11,144 | [
"CRYSTAL",
"MOPAC"
] | 04437b1f458341e33d4bc93b8e0f35f0582c0547023d1c7c001c7000a865f0ce |
import pandas as pd
from ctc_observ import *
from ctc_arrays import *
from scipy.interpolate import pchip
from statsmodels.nonparametric.smoothers_lowess import lowess
# load HSC catalog first
# hsc = pd.read_csv('/cuc36/xxl/multiwavelength/HSC/wide.csv')
def pdf_sep_gen(sep_arcsec, xposerr, opterr, pdf='Rayleigh'):
'''
PDF of angular separation between an X-ray object and the other input
catalog with positional error poserr
'''
if pdf == 'Gaussian':
# that was 2d-normal
poserr = 2 * (opterr ** 2 + xposerr ** 2) # this is 2*sigma^2
return np.exp(-sep_arcsec ** 2 / poserr) / (np.pi * poserr)
else:
poserr = (opterr ** 2 + xposerr ** 2)
return (sep_arcsec / poserr) * np.exp((-sep_arcsec ** 2) / poserr)
def getbkgcat(xcat, catopt, optdf, r_in=7., r_out=35., magonly=False,
nmagbin=15, magname='imag_psf', ora='ra', odec='dec',
corr_glob=False, globonly=False):
'''
Takes in xcat and catopt,
find optical sources with separation from any x-ray sources
between r_in and r_out (in arcsec),
and derive the magnitude dependence of these background sources
optdf = optdf_in.copy()
optdf.reset_index(inplace=True)
if len(catopt) != len(optdf):
print("catopt should be the astropy coordinate object computed from optdf!")
sys.exit(1)
'''
idhsc,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r_in*u.arcsec)
#Excluding each optical source with an x-ray source within r_in
itmp=np.arange(len(catopt))
itmp[np.unique(idhsc)]=-1
#indicies for optical sources with **NO** X-ray counterparts within r_in
idhsc_ext=np.where(np.equal(optdf.index.values, itmp))[0]
#Now search for X-ray and optical matches within r_out
idhsc_in,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r_out*u.arcsec)
idhsc_in = np.unique(idhsc_in)
#Cross-correlated the ``no r_in list'', and the ``r_out list''
#This will create a list of ``background optical sources''
idhsc_bkgd=np.intersect1d(idhsc_ext,idhsc_in)
hsc_bkgd=optdf.loc[idhsc_bkgd].copy()
hsc_bkgd.reset_index(inplace=True)
if magonly:
return hsc_bkgd[magname].values
else:
out,rmagbin=pd.cut(hsc_bkgd[magname].values,bins=nmagbin,retbins=True)
groups=hsc_bkgd.groupby(out)
#number density = total number of sources divided by the area of annulus
N_xmm=len(xcat) #number of unique XMM sources
N_bkgd=len(hsc_bkgd)
nm=groups[ora].count().values/(np.pi*(r_out**2-r_in**2)*N_xmm)
if corr_glob | globonly:
#According to Brusa et al. 2007, at faint magnitudes
#nm is not correct and should use a global one.
out,rmagbin_global=pd.cut(optdf[magname].values,bins=nmagbin,retbins=True)
groups=optdf.groupby(out)
rmag_global = binvalue(rmagbin_global)
area = \
(optdf[ora].max() - optdf[ora].min())*(optdf[odec].max() - optdf[odec].min())*3600**2
nm_global = groups[ora].count().values/area
iglobal = np.where(rmagbin > 23.)[0][:-1]
if corr_glob:
nm[iglobal] = nm_global[iglobal]
elif globonly:
return nm_global, rmagbin
return nm,rmagbin
#def getqm(match,rmagbin, Q, NX, nm, r0=2.5):
def getqm(match,rmagbin, Q, nm, NX, r0=3.0):
'''
Estimate q(m) -- the expected optical counterpart magnitude
distribution of at magintude m
'''
grp=match.groupby(pd.cut(match['rmag'].values,bins=rmagbin))
real_m=grp.rax.count().values# - np.pi*r0**2*NX*nm
real_m[np.where(real_m < 0.)] = \
0.1*nm[np.where(real_m < 0.)]*np.pi*NX*r0**2
qm = real_m*Q/np.sum(real_m)
rmagarr = np.array([])
qmarr = np.array([])
nmarr = np.array([])
for index, i in enumerate(rmagbin[:-1]):
rmagarr = np.hstack((rmagarr,np.linspace(i, rmagbin[index+1], 5)))
qmarr = np.hstack((qmarr, np.zeros(5) + qm[index]))
result = lowess(qmarr,rmagarr,frac=0.2)
x_smooth = result[:,0]
y_smooth = result[:,1]
return x_smooth, y_smooth, Q, qm#, real_m
def calc_RCMAX(match, quntarr, Q,NX,LRfrac=0.2,first=False):
'''
R and C for a single LRthreshold value
'''
if type(NX) != float:
NX = float(NX)
LRth = quntarr
tmp = match[match.LR > LRth].copy().reset_index().drop('index',axis=1)
grp = tmp.groupby('xid')
#select sources with only one match
onematch = grp.filter(lambda x: len(x) == 1).copy()
onematch['Rc'] = onematch.LR.values/(onematch.LR.values + 1 - Q)
#these are sources with multiple matches
multimatch = tmp.loc[np.delete(tmp.index.values, onematch.index.values),:].reset_index().drop('index',axis=1)
onematch.reset_index(inplace=True)
nmx = tmp.xid.nunique() - onematch.xid.nunique()
if nmx == 0:
allmatch = onematch
elif nmx == 1:
multimatch['Rc'] = multimatch.LR/(multimatch.LR.sum() + (1-Q))
allmatch = pd.concat([onematch,multimatch],ignore_index=False)
else:
#regroup, and for each group only keep sources with LR larger than LRfrac*max(LR)
grp = multimatch.groupby('xid')
igood = grp.apply(lambda df:df.LR/df.LR.max() >= LRfrac).values
multimatch = multimatch[igood].reset_index().drop('index',axis=1)
grp = multimatch.groupby('xid')
multiRc = grp.apply(lambda df: df.LR/(df.LR.sum()+(1-Q))).values
multimatch['Rc'] = multiRc
allmatch = pd.concat([onematch,multimatch],ignore_index=False)
R = allmatch.Rc.mean()
C = allmatch.Rc.sum()/NX
return allmatch, R, C, LRth
def calc_RC(match, quntarr, Q,NX,LRfrac=0.2,first=False):
'''
R and C for an array of LRthreshold values
if first==True,
quantarr should be between 0 to 1
and the LRthreshold values to be looped through would be
match.LR.quantile(quntarr)
If quntarr is an array with length > 1 (and values between 0 to 1)
This subroutine finds the LRth value that maximize C and R.
'''
if type(NX) != float:
NX = float(NX)
if first:
#if it's the first time, loop through the LR values in quantile arrays
#return R, C, LRth
LRth = match.LR.quantile(quntarr).values
print('first -- ', 'min/max LRth are ', np.min(LRth), np.max(LRth))
else:
LRth = quntarr
R = np.zeros(len(quntarr))
C = np.zeros(len(quntarr))
for index, lrthiter in enumerate(LRth):
tmp = match[match.LR > lrthiter].copy().reset_index().drop('index',axis=1)
grp = tmp.groupby('xid')
onematch = grp.filter(lambda x: len(x) == 1).copy() #select sources with only one match
#onematch.reset_index(inplace=True)
onematch['Rc'] = onematch.LR.values/(onematch.LR.values + 1 - Q)
#these are sources with multiple matches
multimatch = tmp.loc[np.delete(tmp.index.values, onematch.index.values),:].reset_index().drop('index',axis=1)
onematch.reset_index(inplace=True)
nmx = tmp.xid.nunique() - onematch.xid.nunique()
if nmx == 0:
#no x-ray sources have multiple good counterparts
allmatch = onematch
elif nmx == 1:
#only one x-ray sources have multiple good counterparts
multimatch['Rc'] = multimatch.LR/(multimatch.LR.sum() + (1-Q))
allmatch = pd.concat([onematch,multimatch],ignore_index=True)
else:
grp = multimatch.groupby('xid')
igood = grp.apply(lambda df:df.LR/df.LR.max() >= LRfrac).values
#dropping sources with LR < LRfrac*LRmax
multimatch = multimatch[igood].reset_index().drop('index',axis=1)
#regroup
grp = multimatch.groupby('xid')
multiRc = grp.apply(lambda df: df.LR/(df.LR.sum()+(1-Q))).values
multimatch['Rc'] = multiRc
allmatch = pd.concat([onematch,multimatch],ignore_index=False)
R[index] = allmatch.Rc.mean()
C[index] = allmatch.Rc.sum()/NX
return R, C, LRth
def calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, NX,rsearch=5.0,\
lth = None, LRfrac=0.2,lrmax=None,\
magname = 'imag_psf',xerrname='xposerr',
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',
opticalid = 'hscid',opterr = 0.1,pdf='Rayleigh',first=False):
'''
input variables:
xdf, xcat, optdf,catopt,optdf,nm, qm, Q, rmag, rsearch=5.0,\
magname = 'rmag_psf',xerrname='xposerr',
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',
opticalid = 'hscid'
For computing LR for every optical source within rsearch:
'''
if first:
print('first calc_LR')
idxmm, idhsc, d2d , d3d=catopt.search_around_sky(xcat,rsearch*u.arcsec)
match = pd.DataFrame({'xid':idxmm,'optid':idhsc,'dist':d2d.arcsec,\
'rmag':optdf.loc[idhsc,magname].values,'xposerr':xdf.loc[idxmm,xerrname],\
'raopt':optdf.loc[idhsc,ora].values,'decopt':optdf.loc[idhsc,odec].values,\
'rax':xdf.loc[idxmm,xra].values,'decx':xdf.loc[idxmm,xdec].values,\
'optname':optdf.loc[idhsc,opticalid].values})
#print('match len = ',len(match), 'xid nunique = ', match.xid.nunique())
fr = pdf_sep_gen(match.dist.values,match.xposerr.values,opterr,pdf=pdf)
n_m = pchip(rmag, nm)#, bounds_error=False,fill_value='extrapolate')
q_m = pchip(rmag, qm)#, bounds_error=False,fill_value='extrapolate')
fnm = n_m(match.rmag.values)
fqm = q_m(match.rmag.values)
fqm[np.where(fqm < 0.)] = 1e-8
fnm[np.where(fnm < 0.)] = 1e-8
LR = fr*fqm/fnm
match['LR'] = pd.Series(LR, index=match.index)
match['matchid'] = pd.Series(range(len(match)),index=match.index)
match['raoff'] = pd.Series((match.rax - match.raopt)*3600., index=match.index)
match['decoff'] = pd.Series((match.decx - match.decopt)*3600., index=match.index)
#several situations :
#1. all matches are unique, no further action is required.
if match.xid.nunique() - len(match) == 0:
return match, match, 1.0, 1.0, match.LR.min()
else:
if lth is None:
#If the array of lth values is not provided,
#guess it by assuming that only NX sources would be reliable,
#so loop through the LR values around that LR quantile
#qcenter = match.LR.quantile(float(NX)/len(match))
qcenter = 1. - 1.5*float(NX)/len(match)
if qcenter < 0.:
qcenter = 0.1
lth = np.linspace(0.5*qcenter,
min([2.0*qcenter, 0.95]), 30.)
#print(lth)
if lrmax is None:
#first
R, C, LRth = calc_RC(match, lth, Q, NX,LRfrac=LRfrac,first=first)
lthmax = LRth[np.argmax((R+C))]
if not np.isscalar(lthmax):
if len(lthmax) >= 1:
lthmax = lthmax[0]
goodmatch, R, C, LRth = calc_RCMAX(match,lthmax, Q, len(xcat),LRfrac=LRfrac)
return match, goodmatch, R, C, lthmax, LRth
else:
goodmatch, R, C, LRth = calc_RCMAX(match,lrmax, Q, len(xcat),LRfrac=LRfrac)
return match, goodmatch, R, C, lrmax, LRth
def likmatch(xdf, xcat, optdf_in, catopt, radecerr = False, r0=2.5,rsearch=5.0, \
r_in = 7., r_out=35., lth = None,LRfrac=0.5,lrmax=None,\
nmagbin=15, niter=10,numid='numid',magname = 'imag_psf',xerrname='xposerr',\
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',\
opticalid = 'hscid',opterr=0.1,pdf='Rayleigh',verbose=True):
'''
Likelihood ratio based source matching.
Currently is based on HSC public data release 1
(wide survey) in the XMM-LSS region.
Input: source list data frame or fits filename of the source lists.
See the input parameters for default column names
***Note that ``opticalid''
should be provided for each unique optical source
Default : xdf is in XMM SRCLIST format
optdf is for HSC.
Input parameters:
r0 - radius used for defining q(m)
r_in and r_out - radius used for selecting background sources
(X-ray sources with distance from optical counterparts that's larger than
r_in and smaller than r_out are defined as background sources.)
if (len(catopt) != len(optdf)) or (len(xcat) != len(xdf)) :
print("x/opt catalogs should be the astropy coordinate objects computed from the dataframes!!")
sys.exit(1)
'''
optdf = optdf_in.copy(deep=True)
optdf.set_index(numid,inplace=True)
#making a copy for output
dfout = xdf.copy(deep=True)
dfout.reset_index(inplace=True)
#Background number surface density
nm, rmagbin = getbkgcat(xcat,catopt,optdf,r_in = r_in, r_out=r_out,
nmagbin=nmagbin, magname = magname,ora=ora,odec=odec)
if verbose:print('Calculating background mag. distribution, nm')
#nm = nm/np.sum(nm)
#find the number of X-ray sources at least one matching withn 1' (sample completeness)
idopt_r0,d2d,d3d=xcat.match_to_catalog_sky(catopt)#,1.0*u.arcmin)
NX = sum(d2d.arcmin <= 1.)
idopt_r0,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r0*u.arcsec)
N1 = float(len(np.unique(idopt_r0)))
Q = N1/NX
print('Q = ', Q, ', N1 = ',N1, ' NX = ', NX)
if (N1 != float(len(idopt_r0))):
print('duplicated optical sources in qm calculation')
opt_qm = optdf.loc[idopt_r0,:]
grp=opt_qm.groupby(pd.cut(opt_qm[magname].values,bins=rmagbin))
total_m=grp[ora].count().values
real_m0=total_m-np.pi*r0**2*NX*nm
real_m0[np.where(real_m0 < 0.)] = 0.1*nm[np.where(real_m0 < 0.)]*np.pi*NX*r0**2
qm0 = real_m0*(Q/np.sum(real_m0))
rmagarr = np.array([])
qmarr = np.array([])
nmarr = np.array([])
for index, i in enumerate(rmagbin[:-1]):
rmagarr = np.hstack((rmagarr,np.linspace(i, rmagbin[index+1], 5)))
qmarr = np.hstack((qmarr, np.zeros(5) + qm0[index]))
nmarr = np.hstack((nmarr, np.zeros(5) + nm[index]))
result = lowess(qmarr,rmagarr,frac=0.2)
rmagsmooth = result[:,0]
qmsmooth = result[:,1]
result = lowess(nmarr,rmagarr,frac=0.2)
#rmagsmooth = result[:,0]
nmsmooth = result[:,1]
#for unrealistical qm values (<0), assuming the real counterpart distribution is the same
#as the background
#qm0[np.where(qm0 < 0.)] = nm[np.where(qm0 < 0.)]
rmag = rmagsmooth#binvalue(rmagbin)
if verbose:print('Calculating initial counterpart mag. dist., qm')
if verbose:print('Calculating background mag. distribution, rmag')
density_raw = pd.DataFrame({
'rmag':binvalue(rmagbin),
'qm0':qm0,
'nm':nm
}
)
density = pd.DataFrame({'rmag':rmag,'qm0':qmsmooth,'qms'+str(np.round(Q,2)):qmsmooth,'nm':nmsmooth})#,'real_ms':real_m0})
#With qm, nm, and Q, calculate the first match
if verbose:print('First LR matching')
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nmsmooth, qmsmooth, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf,first=True)
if verbose:print('Q0='+str(Q), 'R0='+str(R),'C0='+str(C), len(goodmatch), lthmax)
#With the new ``matched sources'', recalculate qm again until C and R converges
if lrmax is None:
for i in range(niter):
if len(goodmatch) == 0:
print('No goodmatches (LRthreshold = ',lthmax,'), resetting to 0.4')
lthmax = 0.4
lth = np.sort(np.hstack((match.LR.quantile([0.1, 0.25, 0.5, 0.75, 0.9]).values, \
np.linspace(lthmax*0.5,lthmax*1.5,5))))
lthmax0 = lthmax * 1.
x_smooth, qm, Q, qmraw = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)#, NX, nm)
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nmsmooth, qm, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth, lrmax=lrmax , magname = magname,xerrname=xerrname,\
xra = xra, xdec = xdec, ora = ora, odec = odec,\
opticalid = opticalid,opterr=opterr,pdf=pdf, first=False)
density['qm'+str(i)+'_'+str(np.round(Q,2))] = pd.Series(qm,index=density.index)
density_raw['qm'+str(i)+'_'+str(np.round(Q,2))] = pd.Series(qmraw,index=density_raw.index)
#density['real_m'+str(i)] = pd.Series(real_m,index=density.index)
if verbose:print('R, C, len(goodmatch), LRth:' ,R, C, len(goodmatch),lthmax)
if verbose:print('Iter',i, 'new LRth = ', lthmax, 'old LRth =', lthmax0 )
if (np.abs(lthmax0 - lthmax) < 0.01) & (lthmax > 0.1) & (i >= 4):
if verbose:print('LR threshold converges, breaking now')
density['qmfinal'] = pd.Series(qm,index=density.index)
density_raw['qmfinal'] = pd.Series(qmraw,index=density_raw.index)
break
elif i == max(range(niter)):
density['qmfinal'] = pd.Series(qm,index=density.index)
density_raw['qmfinal'] = pd.Series(qmraw,index=density_raw.index)
return match,goodmatch, R, C, density, density_raw, lthmax, rmagbin
else:
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nmsmooth, qmsmooth, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf)
x_smooth, qm, Q, qmraw = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)
density['qmfinal'] = pd.Series(qm,index=density.index)
density_raw['qmfinal'] = pd.Series(qmraw,index=density_raw.index)
return match,goodmatch, R, C, density, density_raw, lthmax, rmagbin
def likmatch_rerun(xdf, xcat, optdf_in, catopt, density, radecerr = False, r0=2.5,rsearch=5.0, \
r_in = 7., r_out=35., lth = np.linspace(0.05,0.9,10),LRfrac=0.2,lrmax=None,\
nmagbin=15, niter=10,numid='numid',magname = 'imag_psf',xerrname='xposerr',\
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',\
opticalid = 'hscid',opterr=0.1,pdf='Rayleigh',verbose=True,rc=False):
'''
similar to likmatch, but requires the density output from likmatch
useful for shift-and-rematch simulations
'''
optdf = optdf_in.copy(deep=True)
optdf.set_index(numid,inplace=True)
NX = float(len(xcat))
idopt_r0,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r0*u.arcsec)
N1 = float(len(np.unique(idopt_r0)))
Q = N1/NX
nm = density.nm.values
qm = density.qmfinal.values
rmag = density.rmag.values
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf)
if rc:
return match,goodmatch, R, C
else:
return match,goodmatch
def likmatch_ext(
xdf, xcat, optdf_in, catopt, density, r0=3.0, rsearch=10.0, \
r_in = 10., r_out=50., \
lth = None, LRfrac=0.5, lrmax=None, \
nmagbin=15, niter=10, numid='numid', magname = 'imag_psf',
xerrname='xposerr', xra = 'RA', xdec = 'DEC', \
ora = 'ra', odec = 'dec', opticalid = 'hscid',opterr=0.1, \
pdf='Rayleigh',verbose=True):
'''
Likelihood ratio based source matching.
different from the original likmatch function,
this one requires an input array true-counterpart mag, which will be used
to calculate q(m) using kernel density estimation
The background mag. distribution nm is optional
'''
optdf = optdf_in.copy(deep=True)
optdf.set_index(numid,inplace=True)
NX = float(len(xcat))
idopt_r0,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r0*u.arcsec)
N1 = float(len(np.unique(idopt_r0)))
Q = N1/NX
nm = density.nm.values
qm = density.qmfinal.values
rmag = density.rmag.values
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf,first=True)
if verbose:print('Q0='+str(Q), 'R0='+str(R),'C0='+str(C), len(goodmatch), lthmax)
return match,goodmatch, R, C, lthmax
'''
#With the new ``matched sources'', recalculate qm again until C and R converges
if lrmax is None:
for i in range(niter):
if len(goodmatch) == 0:
print('No goodmatches (LRthreshold = ',lthmax,'), resetting to 0.4')
lthmax = 0.4
lth = np.sort(np.hstack((match.LR.quantile([0.1, 0.25, 0.5, 0.75, 0.9]).values, \
np.linspace(lthmax*0.5,lthmax*1.5,5))))
lthmax0 = lthmax * 1.
#qm, Q, real_m = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)#, NX, nm)
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth, lrmax=lrmax , magname = magname,xerrname=xerrname,\
xra = xra, xdec = xdec, ora = ora, odec = odec,\
opticalid = opticalid,opterr=opterr,pdf=pdf, first=False)
#density['qm'+str(i)+'_'+str(np.round(Q,2))] = pd.Series(qm,index=density.index)
#density['real_m'+str(i)] = pd.Series(real_m,index=density.index)
if verbose:print(R, C, len(goodmatch),lthmax)
if verbose:print('Iter',i, 'new LRth = ', lthmax, 'old LRth =', lthmax0 )
if (np.abs(lthmax0 - lthmax) < 0.01) & (lthmax > 0.1) & (i >= 4):
if verbose:print('LR threshold converges, breaking now')
#density['qmfinal'] = pd.Series(qm,index=density.index)
break
elif i == max(range(niter)):
print('max niter reached, should check convergence')
#density['qmfinal'] = pd.Series(qm,index=density.index)
return match,goodmatch, R, C, lthmax
else:
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf)
#qm, Q, real_m = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)
#density['qmfinal'] = pd.Series(qm,index=density.index)
return match,goodmatch, R, C, lthmax
'''
def finalmatch(match,goodmatch):
match.set_index(match.matchid.values,inplace=True)
mid_all = np.arange(len(match))
mid_all[goodmatch.matchid.values] = -1
badmatch = match.loc[mid_all[mid_all > 0],:]
#if an xid alread has a counterpart in goodmatch, drop it.
badmatch = badmatch[np.in1d(badmatch.xid.values, goodmatch.xid.unique(),invert=True)].copy()
badmatch.reset_index(inplace=True)
bad_ok = badmatch.drop_duplicates('xid',keep=False)
ibad = np.arange(len(badmatch))
ibad[bad_ok.index.values] = -1
bad_bad = badmatch.loc[np.where(ibad > -1)[0],:]
bad_bad.drop('index',axis=1,inplace=True)
okmatch = pd.concat([goodmatch, bad_ok])
return okmatch, bad_bad
| CTJChen/ctc_astropylib | mlematch.py | Python | apache-2.0 | 23,653 | [
"Gaussian"
] | 1a07d2f0cdaa6650ef81eae31625ab2f8bb61bf0914c6fa9741d3a73d8d1d472 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.160346
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/serviceplayable.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class serviceplayable(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(serviceplayable, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_11093939 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2serviceplayable>
\t<e2servicereference>''')
_v = VFFSL(SL,"service.servicereference",True) # u'$service.servicereference' on line 4, col 22
if _v is not None: write(_filter(_v, rawExpr=u'$service.servicereference')) # from line 4, col 22.
write(u'''</e2servicereference>
\t<e2isplayable>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"service.isplayable",True)) # u'$str($service.isplayable)' on line 5, col 16
if _v is not None: write(_filter(_v, rawExpr=u'$str($service.isplayable)')) # from line 5, col 16.
write(u'''</e2isplayable>
</e2serviceplayable>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_11093939
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_serviceplayable= 'respond'
## END CLASS DEFINITION
if not hasattr(serviceplayable, '_initCheetahAttributes'):
templateAPIClass = getattr(serviceplayable, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(serviceplayable)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=serviceplayable()).run()
| pli3/e2-openwbif | plugin/controllers/views/web/serviceplayable.py | Python | gpl-2.0 | 5,360 | [
"VisIt"
] | b54f428d706f63fb2adc9bf13e7c03dd7880c6c3ff37a9609b332b9bee1f7a5f |
from ...tests.instance import gaussian_instance
import numpy as np
from regreg.atoms.slope import slope as slope_atom
import regreg.api as rr
from ..slope import slope
from ..lasso import full_targets
from ...tests.decorators import rpy_test_safe
try:
from rpy2.robjects.packages import importr
from rpy2 import robjects
import rpy2.robjects.numpy2ri
rpy_loaded = True
except ImportError:
rpy_loaded = False
if rpy_loaded:
def slope_R(X, Y, W = None, normalize = True, choice_weights = "gaussian", sigma = None):
rpy2.robjects.numpy2ri.activate()
robjects.r('''
slope = function(X, Y, W , normalize, choice_weights, sigma, fdr = NA){
if(is.na(sigma)){
sigma=NULL} else{
sigma = as.matrix(sigma)[1,1]}
if(is.na(fdr)){
fdr = 0.1 }
if(normalize=="TRUE"){
normalize = TRUE} else{
normalize = FALSE}
if(is.na(W))
{
if(choice_weights == "gaussian"){
lambda = "gaussian"} else{
lambda = "bhq"}
result = SLOPE(X, Y, fdr = fdr, lambda = lambda, normalize = normalize, sigma = sigma)
} else{
result = SLOPE(X, Y, fdr = fdr, lambda = W, normalize = normalize, sigma = sigma)
}
return(list(beta = result$beta, E = result$selected, lambda_seq = result$lambda, sigma = result$sigma))
}''')
r_slope = robjects.globalenv['slope']
n, p = X.shape
r_X = robjects.r.matrix(X, nrow=n, ncol=p)
r_Y = robjects.r.matrix(Y, nrow=n, ncol=1)
if normalize is True:
r_normalize = robjects.StrVector('True')
else:
r_normalize = robjects.StrVector('False')
if W is None:
r_W = robjects.NA_Logical
if choice_weights is "gaussian":
r_choice_weights = robjects.StrVector('gaussian')
elif choice_weights is "bhq":
r_choice_weights = robjects.StrVector('bhq')
else:
r_W = robjects.r.matrix(W, nrow=p, ncol=1)
if sigma is None:
r_sigma = robjects.NA_Logical
else:
r_sigma = robjects.r.matrix(sigma, nrow=1, ncol=1)
result = r_slope(r_X, r_Y, r_W, r_normalize, r_choice_weights, r_sigma)
result = np.asarray(result.rx2('beta')), np.asarray(result.rx2('E')), \
np.asarray(result.rx2('lambda_seq')), np.asscalar(np.array(result.rx2('sigma')))
rpy2.robjects.numpy2ri.deactivate()
return result
@rpy_test_safe(libraries=['SLOPE'])
def test_outputs_SLOPE_weights(n=500, p=100, signal_fac=1., s=5, sigma=3., rho=0.35):
inst = gaussian_instance
signal = np.sqrt(signal_fac * 2. * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
sigma_ = np.sqrt(np.linalg.norm(Y - X.dot(np.linalg.pinv(X).dot(Y))) ** 2 / (n - p))
r_beta, r_E, r_lambda_seq, r_sigma = slope_R(X,
Y,
W = None,
normalize = True,
choice_weights = "gaussian",
sigma = sigma_)
print("estimated sigma", sigma_, r_sigma)
print("weights output by R", r_lambda_seq)
print("output of est coefs R", r_beta)
pen = slope_atom(r_sigma * r_lambda_seq, lagrange=1.)
loss = rr.squared_error(X, Y)
problem = rr.simple_problem(loss, pen)
soln = problem.solve()
print("output of est coefs python", soln)
print("relative difference in solns", np.linalg.norm(soln-r_beta)/np.linalg.norm(r_beta))
@rpy_test_safe(libraries=['SLOPE'])
def test_randomized_slope(n=500, p=100, signal_fac=1.2, s=5, sigma=1., rho=0.35, randomizer_scale= np.sqrt(0.25),
target = "full", use_MLE=True):
while True:
inst = gaussian_instance
signal = np.sqrt(signal_fac * 2. * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
sigma_ = np.sqrt(np.linalg.norm(Y - X.dot(np.linalg.pinv(X).dot(Y))) ** 2 / (n - p))
r_beta, r_E, r_lambda_seq, r_sigma = slope_R(X,
Y,
W=None,
normalize=True,
choice_weights="gaussian", #put gaussian
sigma=sigma_)
conv = slope.gaussian(X,
Y,
r_sigma * r_lambda_seq,
randomizer_scale=randomizer_scale * sigma_)
signs = conv.fit()
nonzero = signs != 0
print("dimensions", n, p, nonzero.sum())
if nonzero.sum() > 0:
if target == 'full':
(observed_target,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero, dispersion=sigma_)
elif target == 'selected':
(observed_target,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero, dispersion=sigma_)
if target == "selected":
beta_target = np.linalg.pinv(X[:, nonzero]).dot(X.dot(beta))
else:
beta_target = beta[nonzero]
if use_MLE:
estimate, _, _, pval, intervals, _ = conv.selective_MLE(observed_target,
cov_target,
cov_target_score)
else:
_, pval, intervals = conv.summary(observed_target,
cov_target,
cov_target_score,
alternatives,
compute_intervals=True)
coverage = (beta_target > intervals[:, 0]) * (beta_target < intervals[:, 1])
break
if True:
return pval[beta_target == 0], pval[beta_target != 0], coverage, intervals
def main(nsim=100):
P0, PA, cover, length_int = [], [], [], []
for i in range(nsim):
p0, pA, cover_, intervals = test_randomized_slope()
cover.extend(cover_)
P0.extend(p0)
PA.extend(pA)
print('coverage', np.mean(cover))
| selective-inference/selective-inference | selectinf/randomized/tests/test_slope.py | Python | bsd-3-clause | 7,374 | [
"Gaussian"
] | a2f84396cf7727a40528739bb6cf1e46a799fddb4d44a57f28a1ee40b054c302 |
# -*- coding: utf-8 -*-
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bug signature module.
Used to compute the "signature" of a bug, typically using the error message or the function name from the top of the
stack trace.
"""
import argparse
import re
import sys
from pathlib import Path
from typing import Callable, Match, Optional, Pattern
from gfauto import subprocess_util, util
from gfauto.gflogging import log
# .* does not match newlines
# (?: ) non-group parentheses
NO_SIGNATURE = "no_signature"
HEX_LIKE = r"(?:0x)?[0-9a-fA-F]"
# 06-15 21:17:00.039 7517 7517 F DEBUG : #00 pc 00000000009d9c34 /my/library.so ((anonymous namespace)::Bar::Baz(aaa::MyInstr*, void* (*)(unsigned int))+456)
# Another example of the function signature: /my/library.so (myFunction+372)
# Another example of the function signature: /my/library.so (myFunction(...)+372)
# Just look for anything that contains "word(" or "word+" from after the (hex-like) PC address.
PATTERN_ANDROID_BACKTRACE_FUNCTION = re.compile(
r"\n.*#00 pc " + HEX_LIKE + r"+ (.*[\w\d_]+[(+].*)"
)
ANDROID_BACKTRACE_COMMON_TEXT_TO_REMOVE = re.compile(
r"(?:"
r"vendor/"
r"|hw/"
r"|data/local/(?:tmp/)?"
r"|system/(?:lib(?:64)?/)?"
r"|lib(?:64)?/"
r"|apex/"
r"|bionic/"
r"|com.android.runtime(?:/lib(?:64)?)?/"
r"|anonymous namespace"
r"|\(BuildId: " + HEX_LIKE + r"+\)"
r"|.*\.apk!"
r"|offset"
r")"
)
PATTERN_CDB_CALL_SITE = re.compile(
fr"\n{HEX_LIKE}+`{HEX_LIKE}+ {HEX_LIKE}+`{HEX_LIKE}+ (.*)"
)
# E.g.
# 01-23 11:51:21.141 1814 1811 F DEBUG : #00 pc 00000000012313ec /vendor/lib64/egl/libGLES.so (BuildId: 123b5800abef5fc4b86c0032ddd223d5)
PATTERN_ANDROID_BACKTRACE_CATCHALL = re.compile(fr"\n.*#00 pc ({HEX_LIKE}+ .*)")
# E.g. ERROR: temp/.../variant/shader.frag:549: 'variable indexing fragment shader output array' : not supported with this profile: es
# variable indexing fragment shader output array <-- group 1
# E.g. ERROR: reports/.../part_1_preserve_semantics/reduction_work/variant/shader_reduced_0173/0_glsl/shader_reduced_0173.frag:456: '=' : cannot convert from ' const 3-component vector of bool' to ' temp bool'
PATTERN_GLSLANG_ERROR = re.compile(r"ERROR: .*?:\d+: (.*)")
# E.g.
# glslangValidator: ../glslang/MachineIndependent/ParseHelper.cpp:2212: void glslang::TParseContext::nonOpBuiltInCheck(const glslang::TSourceLoc&, const glslang::TFunction&, glslang::TIntermAggregate&): Assertion `PureOperatorBuiltins == false' failed.
PATTERN_ASSERTION_FAILURE = re.compile(r"\n.*?:\d+: (.*? [Aa]ssert(?:ion)?)")
# Only used if "0 pass, 1 fail" is found.
# E.g. /data/local/tmp/graphicsfuzz/test.amber: 256: probe ssbo format does not match buffer format
# probe ssbo format does not match buffer format
PATTERN_AMBER_ERROR = re.compile(r"\n.*?[.]amber: \d+: (.*)")
# E.g. error: line 0: Module contains unreachable blocks during merge return. Run dead branch elimination before merge return.
# error: line 0: Module contains unreachable blocks during merge return. Run dead branch elimination before merge return.
# Module contains unreachable blocks during merge return. Run dead branch elimination before merge return.
PATTERN_SPIRV_OPT_ERROR: Pattern[str] = re.compile(r"error: line \d+: (.*)")
# E.g.
# Backtrace:
# /data/git/graphicsfuzz/graphicsfuzz/target/graphicsfuzz/bin/Linux/spirv-opt(_ZN8spvtools3opt21StructuredCFGAnalysis16SwitchMergeBlockEj+0x369)[0x5bd6d9]
# |--- group 1 -------------------------------------------------------|
#
# E.g. Backtrace:
# /home/runner/work/gfbuild-llpc/gfbuild-llpc/vulkandriver/drivers/llvm-project/llvm/lib/CodeGen/LiveInterval.cpp:758(_ZN4llvm9LiveRange20MergeValueNumberIntoEPNS_6VNInfoES2_)[0x135342c]
# |--- group 1 ----------------------------------------------------------------|
#
# |--- group 1 ----|
# Using "c" "<>" "<cc...>" for chars c.*c(<ccc>*<><ccccc>+)<cc>
PATTERN_CATCHSEGV_STACK_FRAME = re.compile(r"/.*/([^/(]*\([^)+\[]+)[+)]")
# E.g.
# /data/git/graphicsfuzz/gfauto/temp/june_20/binaries/swiftshader_vulkan/Linux/libvk_swiftshader.so(+0x1d537d)[0x7f51ebd1237d]
# /data/git/graphicsfuzz/gfauto/temp/june_20/binaries/swiftshader_vulkan/Linux/libvk_swiftshader.so 0x1d537d
# ^ group 1 ^ group 2
PATTERN_CATCHSEGV_STACK_FRAME_ADDRESS = re.compile(r"(.*)\(\+([x\da-fA-F]+)+\)\[")
PATTERN_SWIFT_SHADER_ABORT = re.compile(r":\d+ ABORT:(.*)")
PATTERN_SWIFT_SHADER_WARNING = re.compile(r":\d+ WARNING:(.*)")
PATTERN_CATCH_ALL_ERROR = re.compile(r"\nERROR: (.*)", flags=re.IGNORECASE)
# [\s\S] matches anything, including newlines.
PATTERN_LLVM_FATAL_ERROR = re.compile(
r"LLVM FATAL ERROR:[ ]*Broken function found, compilation aborted![\s\S]*STDERR:\n(.*)"
)
PATTERN_LLVM_MACHINE_CODE_ERROR = re.compile(
r"ERROR: LLVM FATAL ERROR:[ ]*Found .* machine code error[\s\S]*Bad machine code: (.*)"
)
PATTERN_LLVM_ERROR_DIAGNOSIS = re.compile(r"ERROR: LLVM DIAGNOSIS INFO: (.*)")
PATTERN_ADDRESS_SANITIZER_ERROR = re.compile(
r"SUMMARY: AddressSanitizer: ([a-z\-]+) .* in (.*)"
)
PATTERN_MESA_NIR_VALIDATION_ERROR = re.compile(
r"error: (.*)\(\.\./src/compiler/nir/nir_validate\.c:\d+\)"
)
PATTERN_MESA_SPIRV_PARSE_ERROR = re.compile(
r"SPIR-V parsing FAILED:\s+In file.*\n\s+(.*)"
)
PATTERN_AMBER_TOLERANCE_ERROR = re.compile(
r"is greater th[ae]n tolerance|Buffers have different values"
)
PATTERN_MALI_ERROR = re.compile(r"E mali [\w.]+:(.*)")
BAD_IMAGE_SIGNATURE = "bad_image"
def remove_hex_like(string: str) -> str:
temp = string
# Remove hex like chunks of 4 or more.
temp = re.sub(HEX_LIKE + r"{4,}", "", temp)
return temp
def clean_up(string: str, remove_numbers: bool = True) -> str:
temp: str = string
# Remove numbers.
if remove_numbers:
temp = re.sub(r"\d+", "", temp)
# Replace spaces with _.
temp = re.sub(r" ", "_", temp)
# Remove non-word, non-_ characters.
temp = re.sub(r"[^\w_]", "", temp)
# Replace multiple _ with _.
temp = re.sub(r"__+", "_", temp)
# Strip _
temp = temp.strip("_")
return temp
def reduce_length(string: str) -> str:
return string[:50]
def basic_match(pattern: Pattern[str], log_contents: str) -> Optional[str]:
match: Optional[Match[str]] = re.search(pattern, log_contents)
if not match:
return None
group = match.group(1)
group = clean_up(group)
group = reduce_length(group)
return group
def get_function_signature_from_address(
module: Path,
address: str,
addr2line_mock: Optional[Callable[[Path, str], str]] = None,
) -> Optional[str]:
# stdout result can be mocked for testing.
stdout: str
if addr2line_mock:
stdout = addr2line_mock(module, address)
else:
try:
address_tool = util.tool_on_path("addr2line")
except util.ToolNotOnPathError:
return None
result = subprocess_util.run(
[str(address_tool), "-e", str(module), address, "-f", "-C"],
check_exit_code=False,
verbose=True,
)
if result.returncode != 0:
return None
stdout = result.stdout
lines = stdout.splitlines()
if not lines:
return None
if lines[0].startswith("??"):
return None
return lines[0]
def get_signature_from_log_contents( # pylint: disable=too-many-return-statements, too-many-branches, too-many-statements;
log_contents: str, addr2line_mock: Optional[Callable[[Path, str], str]] = None,
) -> str:
# noinspection PyUnusedLocal
match: Optional[Match[str]]
# noinspection PyUnusedLocal
group: Optional[str]
# Mali error.
# Find the last match.
last_match = None
for match in re.finditer(PATTERN_MALI_ERROR, log_contents):
# Assign each time to avoid linter warning B007.
last_match = match
if last_match:
group = "mali_" + last_match.group(1)
group = clean_up(group, remove_numbers=False)
group = reduce_length(group)
return group
# LLVM FATAL ERROR (special override).
group = basic_match(PATTERN_LLVM_FATAL_ERROR, log_contents)
if group:
return group
# LLVM MACHINE CODE ERROR (special override).
group = basic_match(PATTERN_LLVM_MACHINE_CODE_ERROR, log_contents)
if group:
return group
# LLVM ERROR DIAGNOSIS: should come before PATTERN_ASSERTION_FAILURE.
group = basic_match(PATTERN_LLVM_ERROR_DIAGNOSIS, log_contents)
if group:
return group
# AddressSanitizer error.
match = re.search(PATTERN_ADDRESS_SANITIZER_ERROR, log_contents)
if match:
group = match.group(1) + "_" + match.group(2)
group = clean_up(group)
group = reduce_length(group)
return group
# Mesa NIR validation error.
if "NIR validation failed" in log_contents:
group = basic_match(PATTERN_MESA_NIR_VALIDATION_ERROR, log_contents)
if group:
return group
# Mesa SPIR-V parse failure.
if "SPIR-V parsing FAILED" in log_contents:
group = basic_match(PATTERN_MESA_SPIRV_PARSE_ERROR, log_contents)
if group:
return group
# glslang error.
group = basic_match(PATTERN_GLSLANG_ERROR, log_contents)
if group:
return group
# Assertion error pattern, used by glslang.
group = basic_match(PATTERN_ASSERTION_FAILURE, log_contents)
if group:
return group
# Spirv-opt error.
group = basic_match(PATTERN_SPIRV_OPT_ERROR, log_contents)
if group:
return group
# ABORT message from SwiftShader.
group = basic_match(PATTERN_SWIFT_SHADER_ABORT, log_contents)
if group:
return group
# WARNING message from SwiftShader.
group = basic_match(PATTERN_SWIFT_SHADER_WARNING, log_contents)
if group:
return group
match = re.search(PATTERN_AMBER_TOLERANCE_ERROR, log_contents)
if match:
return BAD_IMAGE_SIGNATURE
# Amber error.
if "0 pass, 1 fail" in log_contents:
group = basic_match(PATTERN_AMBER_ERROR, log_contents)
if group:
return group
# Cdb stack trace
cdb_call_site = re.search(
PATTERN_CDB_CALL_SITE, log_contents
) # type: Optional[Match[str]]
if cdb_call_site:
site = cdb_call_site.group(1)
if "!" in site:
# We probably have symbols, so remove the address and everything after e.g. "+0x111 [file/path @ 123]"
site = re.sub(rf"\+{HEX_LIKE}+.*", "", site)
site = clean_up(site)
else:
# We don't have symbols so we may as well keep offsets around; don't remove numbers.
site = clean_up(site, remove_numbers=False)
site = reduce_length(site)
return site
# Android stack traces.
if "#00 pc" in log_contents:
lines = log_contents.split("\n")
for line in lines:
pc_pos = line.find("#00 pc")
if pc_pos == -1:
continue
line = line[pc_pos:]
if "/amber_ndk" in line:
return "amber_ndk"
break
# Check for stack line with libc alloc.
if re.search(r"\n.*#\d+ pc .*libc\.so \(\w?alloc", log_contents):
# Find the first stack frame without libc.so and replace the log_contents with that frame.
# We do this because the error is better identified by this line and because out of memory errors
# often occur at a nondeterministic location within libc.
for line in lines:
if (
re.search(r" #\d+ pc ", line)
and "libc.so" not in line
and "operator new" not in line
):
# Replace the stack frame number so it looks like the 0th frame.
line = re.sub(r" #\d+ ", " #00 ", line)
log_contents = f"\n{line}\n"
break
match = re.search(PATTERN_ANDROID_BACKTRACE_FUNCTION, log_contents)
if match:
group = match.group(1)
# Remove common text.
group = re.sub(ANDROID_BACKTRACE_COMMON_TEXT_TO_REMOVE, "", group)
group = clean_up(group)
group = reduce_length(group)
return group
# TODO: Maybe more.
# If we get here, we found #00 pc, but nothing else.
# This regex essentially matches the entire line after the hex-like PC address.
match = re.search(PATTERN_ANDROID_BACKTRACE_CATCHALL, log_contents)
if match:
group = match.group(1)
# Remove common text.
group = re.sub(ANDROID_BACKTRACE_COMMON_TEXT_TO_REMOVE, "", group)
# Don't remove hex-like chunks, nor numbers because we want to fallback to any hex offsets in this case.
# group = remove_hex_like(group)
group = clean_up(group, remove_numbers=False)
group = reduce_length(group)
return group
if "\nBacktrace:\n" in log_contents:
result = get_signature_from_catchsegv_backtrace(log_contents, addr2line_mock)
if result:
return result
group = basic_match(PATTERN_CATCH_ALL_ERROR, log_contents)
if group:
return group
if "Shader compilation failed" in log_contents:
return "compile_error"
if "Failed to link shaders" in log_contents:
return "link_error"
if "Calling vkCreateGraphicsPipelines Fail" in log_contents:
return "pipeline_failure"
# TODO: Check for Amber fence failure.
if "Resource deadlock would occur" in log_contents:
return "Resource_deadlock_would_occur"
if "pure virtual method called" in log_contents:
return "pure_virtual_method_called"
return NO_SIGNATURE
def get_signature_from_catchsegv_backtrace(
log_contents: str, addr2line_mock: Optional[Callable[[Path, str], str]] = None,
) -> Optional[str]:
lines = log_contents.splitlines()
i = 0
# Skip to just after "Backtrace:".
while True:
if i >= len(lines):
return None
if lines[i] == "Backtrace:":
i += 1
break
i += 1
# Find the first stack frame line.
# It will normally be the first line.
# It should start with "/".
# We skip libc stack frames.
while True:
if i >= len(lines):
return None
if lines[i].startswith("/"):
# Skip frame if it is libc.
if "libc.so" in lines[i]:
i += 1
continue
break
i += 1
group = basic_match(PATTERN_CATCHSEGV_STACK_FRAME, lines[i])
if group:
return group
result = get_signature_from_catchsegv_frame_address(lines[i], addr2line_mock)
if result:
return result
return None
def get_signature_from_catchsegv_frame_address(
log_contents: str, addr2line_mock: Optional[Callable[[Path, str], str]] = None,
) -> Optional[str]:
match = re.search(PATTERN_CATCHSEGV_STACK_FRAME_ADDRESS, log_contents)
if not match:
return None
module = Path(match.group(1))
address = match.group(2)
function_signature = None
if module.exists():
function_signature = get_function_signature_from_address(
module, address, addr2line_mock
)
if not function_signature or "nvvm" in function_signature:
# The module does not exist or we could not get any symbols using addr2line.
# Or: the function name contains "nvvm", which is seen in NVIDIA drivers but
# leads to a poor signature.
# As a last resort, we can use the module name + offset as the signature.
return get_hex_signature_from_frame(module, address)
function_signature = clean_up(function_signature)
function_signature = reduce_length(function_signature)
return function_signature
def get_hex_signature_from_frame(module: Path, address: str) -> str:
signature = f"{module.name}+{address}"
signature = clean_up(signature, remove_numbers=False)
signature = reduce_length(signature)
return signature
def main() -> None:
parser = argparse.ArgumentParser(
description="A tool for extracting a signature from a log file."
)
parser.add_argument(
"log_file", help="The log file from which a signature should be extracted.",
)
parsed_args = parser.parse_args(sys.argv[1:])
log_file: Path = Path(parsed_args.log_file)
log(get_signature_from_log_contents(util.file_read_text(log_file)))
if __name__ == "__main__":
main()
| google/graphicsfuzz | gfauto/gfauto/signature_util.py | Python | apache-2.0 | 17,596 | [
"Amber"
] | 4a12b538025e9cb2afbcf5d91ae9f7545bfa1685546b54f5b9bc2cb1e8dda2a0 |
#!/usr/bin/env python3
'''
Implementations of the various surrogate model options for use with Bayesian
optimisation. Currently, only different Gaussian process implementations are
available, however other machine learning models and variations of Gaussian
processes are possible so long as they can predict uncertainty as well as a mean
prediction.
'''
# python 2 compatibility
from __future__ import (absolute_import, division, print_function, unicode_literals)
from .py2 import *
try:
import sklearn.gaussian_process as sk_gp
except ImportError:
pass # not required if not used
try:
import gpy
except ImportError:
pass # not required if not used
# local imports
from .utils import *
#TODO: tests
class Surrogate(object):
'''
A wrapper around specific models or libraries suitable for being used as a
surrogate model for Bayesian optimisation.
'''
def fit(self, X, y, hyper_params=None, max_its=None):
'''
train the model to fit the given data set {X, y}. If hyperparameters are
not provided then they are obtained by optimising the data likelihood,
guided by the given gp_parmas. If max_its is provided, then it
overwrites the default maximum iterations parameter.
'''
raise NotImplementedError()
def predict(self, X, std_dev=False):
'''
return the mean y-prediction for the given Xs, and also the variance if
std_dev=True
'''
raise NotImplementedError()
def get_hyper_params(self):
'''
return the hyperparameters of the model in a format suitable for storage
and passing to train.
When training with the same hyperparameters and dataset, the resulting
model should be identical. Alternatively, training with the same
hyperparameters with a different dataset is also possible.
'''
raise NotImplementedError()
def sample(self, x, n):
'''
return n y-samples for given the input x
'''
raise NotImplementedError()
def get_training_set(self):
'''
return the X and y arrays that fit() was called with
'''
raise NotImplementedError()
class SciKitGPSurrogate(Surrogate):
@staticmethod
def Custom(gp_params):
'''
Specialise a SciKitGPSurrogate with the given parameters which are passed to
the scikit GaussianProcessRegressor constructor
gp_params: a dictionary of parameters
'''
class SciKitSurrogate_Specialised(SciKitGPSurrogate):
def __init__(self, optimiser):
super().__init__(optimiser, gp_params)
return SciKitSurrogate_Specialised
def __init__(self, optimiser, gp_params=None):
if gp_params is None:
self.gp_params = dict(
alpha = 1e-10, # larger => more noise. Default = 1e-10
# nu=1.5 assumes the target function is once-differentiable
kernel = 1.0 * sk_gp.kernels.Matern(nu=1.5) + sk_gp.kernels.WhiteKernel(),
#kernel = 1.0 * sk_gp.kernels.RBF(),
n_restarts_optimizer = 10,
# make the mean 0 (theoretically a bad thing, see docs, but can help)
# with the constant offset in the kernel this shouldn't be required
# this may be a dangerous option, seems to make worse predictions
#normalize_y = True,
copy_X_train = True # whether to make a copy of the training data (in-case it is modified)
)
else:
self.gp_params = gp_params
self.log_warning = lambda warn: optimiser._log('GP warning: {}'.format(warn))
def fit(self, X, y, hyper_params=None, max_its=None):
# max_its overwrites the default parameters
if max_its is None:
gp_params = self.gp_params
else:
gp_params = self.gp_params.copy()
gp_params['n_restarts_optimizer'] = max_its
self.model = sk_gp.GaussianProcessRegressor(**gp_params)
with WarningCatcher(self.log_warning):
if hyper_params is None:
self.model.fit(X, y)
else:
# gp_params may not have everything defined
p = self.model.get_params()
kernel = p['kernel']
trained_kernel = kernel.clone_with_theta(np.array(hyper_params))
opt = p['optimizer']
self.model.set_params(optimizer=None)
# don't want to modify the kernel which is part of gp_params, so modify a clone
self.model.set_params(kernel=trained_kernel)
self.model.fit(X, y)
self.model.set_params(kernel=kernel, optimizer=opt)
def predict(self, X, std_dev=False):
with WarningCatcher(self.log_warning):
return self.model.predict(X, return_std=std_dev)
def get_hyper_params(self):
return np.copy(self.model.kernel_.theta)
def sample(self, x, n):
with WarningCatcher(self.log_warning):
# by default random_state uses a fixed seed! Setting to None uses the
# current numpy random state.
return self.model.sample_y(x, n, random_state=None)
def get_training_set(self):
return (self.model.X_train_, self.model.y_train_)
| mbway/Bayesian-Optimisation | old_library/surrogates.py | Python | gpl-3.0 | 5,378 | [
"Gaussian"
] | 4e6c527624e1b8de03c28e373873fb847e12f9a2eb11cfbe8db9323ac37aa15d |
# -----------------------------------------------------------------------------
# Name: iodefs.py (part of PyGMI)
#
# Author: Patrick Cole
# E-Mail: pcole@geoscience.org.za
#
# Copyright: (c) 2013 Council for Geoscience
# Licence: GPL-3.0
#
# This file is part of PyGMI
#
# PyGMI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyGMI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Import raster data."""
import warnings
import os
import copy
from PyQt5 import QtWidgets, QtCore
import numpy as np
import rasterio
from rasterio.plot import plotting_extent
from rasterio.windows import Window
from rasterio.crs import CRS
from pygmi.raster.datatypes import Data
from pygmi.raster.dataprep import lstack
from pygmi.misc import ProgressBarText
class ComboBoxBasic(QtWidgets.QDialog):
"""
A combobox to select data bands.
Attributes
----------
parent : parent
reference to the parent routine
indata : dictionary
dictionary of input datasets
outdata : dictionary
dictionary of output datasets
"""
def __init__(self, parent=None):
super().__init__(parent)
self.parent = parent
self.indata = {}
self.outdata = {}
# create GUI
self.setWindowTitle('Band Selection')
self.vbox = QtWidgets.QVBoxLayout()
self.setLayout(self.vbox)
self.combo = QtWidgets.QListWidget()
self.combo.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.vbox.addWidget(self.combo)
self.buttonbox = QtWidgets.QDialogButtonBox()
self.buttonbox.setOrientation(QtCore.Qt.Horizontal)
self.buttonbox.setCenterButtons(True)
self.buttonbox.setStandardButtons(
QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.vbox.addWidget(self.buttonbox)
self.buttonbox.accepted.connect(self.accept)
self.buttonbox.rejected.connect(self.reject)
def run(self):
"""
Run.
Returns
-------
bool
True if successful, False otherwise.
"""
self.parent.scene.selectedItems()[0].update_indata()
my_class = self.parent.scene.selectedItems()[0].my_class
data = my_class.indata.copy()
tmp = []
for i in data['Raster']:
tmp.append(i.dataid)
self.combo.addItems(tmp)
if not tmp:
return False
tmp = self.exec_()
if tmp != 1:
return False
atmp = [i.row() for i in self.combo.selectedIndexes()]
if atmp:
dtmp = []
for i in atmp:
dtmp.append(data['Raster'][i])
data['Raster'] = dtmp
my_class.indata = data
if hasattr(my_class, 'data_reset'):
my_class.data_reset()
if hasattr(my_class, 'data_init'):
my_class.data_init()
self.parent.scene.selected_item_info()
return True
class ImportData():
"""
Import Data - Interfaces with rasterio routines.
Attributes
----------
parent : parent
reference to the parent routine
outdata : dictionary
dictionary of output datasets
ifile : str
input file name. Used in main.py
"""
def __init__(self, parent=None):
self.ifile = ''
self.parent = parent
self.indata = {}
self.outdata = {}
self.filt = ''
if parent is None:
self.showprocesslog = print
self.piter = ProgressBarText().iter
else:
self.showprocesslog = parent.showprocesslog
self.piter = parent.pbar.iter
def settings(self, nodialog=False):
"""
Entry point into item.
Parameters
----------
nodialog : bool, optional
Run settings without a dialog. The default is False.
Returns
-------
bool
True if successful, False otherwise.
"""
if not nodialog:
ext = ('Common formats (*.ers *.hdr *.tif *.tiff *.sdat *.img '
'*.pix *.bil);;'
'ArcGIS BIL (*.bil);;'
'Arcinfo Binary Grid (hdr.adf);;'
'ASCII with .hdr header (*.asc);;'
'ASCII XYZ (*.xyz);;'
'ENVI (*.hdr);;'
'ESRI ASCII (*.asc);;'
'ERMapper (*.ers);;'
'ERDAS Imagine (*.img);;'
'GeoPak grid (*.grd);;'
'Geosoft UNCOMPRESSED grid (*.grd);;'
'Geosoft (*.gxf);;'
'GeoTiff (*.tif *.tiff);;'
'GMT netCDF grid (*.grd);;'
'PCI Geomatics Database File (*.pix);;'
'SAGA binary grid (*.sdat);;'
'Surfer grid (*.grd);;'
)
self.ifile, self.filt = QtWidgets.QFileDialog.getOpenFileName(
self.parent, 'Open File', '.', ext)
if self.ifile == '':
return False
os.chdir(os.path.dirname(self.ifile))
if self.filt == 'GeoPak grid (*.grd)':
dat = get_geopak(self.ifile)
elif self.filt == 'Geosoft UNCOMPRESSED grid (*.grd)':
dat = get_geosoft(self.ifile)
elif self.filt == 'ASCII with .hdr header (*.asc)':
dat = get_ascii(self.ifile)
elif self.filt == 'ESRI ASCII (*.asc)':
dat = get_ascii(self.ifile)
elif self.filt == 'ASCII XYZ (*.xyz)':
nval = 0.0
nval, ok = QtWidgets.QInputDialog.getDouble(self.parent,
'Null Value',
'Enter Null Value',
nval)
if not ok:
nval = 0.0
dat = get_raster(self.ifile, nval, piter=self.piter,
showprocesslog=self.showprocesslog)
else:
dat = get_raster(self.ifile, piter=self.piter,
showprocesslog=self.showprocesslog)
if dat is None:
if self.filt == 'Geosoft UNCOMPRESSED grid (*.grd)':
QtWidgets.QMessageBox.warning(self.parent, 'Error',
'Could not import the grid. '
'Please make sure it is a '
'Geosoft FLOAT grid, and not a '
'compressed grid. You can '
'export your grid to '
'this format using the Geosoft '
'Viewer.',
QtWidgets.QMessageBox.Ok)
else:
QtWidgets.QMessageBox.warning(self.parent, 'Error',
'Could not import the grid.',
QtWidgets.QMessageBox.Ok)
return False
output_type = 'Raster'
if 'Cluster' in dat[0].dataid:
dat = clusterprep(dat)
output_type = 'Cluster'
self.outdata[output_type] = dat
if dat[0].crs is None:
self.showprocesslog('Warning: Your data has no projection. '
'Please add a projection in the Display/Edit '
'Metadata interface.')
return True
def loadproj(self, projdata):
"""
Load project data into class.
Parameters
----------
projdata : dictionary
Project data loaded from JSON project file.
Returns
-------
chk : bool
A check to see if settings was successfully run.
"""
self.ifile = projdata['ifile']
self.filt = projdata['filt']
chk = self.settings(True)
return chk
def saveproj(self):
"""
Save project data from class.
Returns
-------
projdata : dictionary
Project data to be saved to JSON project file.
"""
projdata = {}
projdata['ifile'] = self.ifile
projdata['filt'] = self.filt
return projdata
class ImportRGBData():
"""
Import RGB Image - Interfaces with rasterio routines.
Attributes
----------
parent : parent
reference to the parent routine
outdata : dictionary
dictionary of output datasets
ifile : str
input file name. Used in main.py
"""
def __init__(self, parent=None):
self.ifile = ''
self.parent = parent
self.indata = {}
self.outdata = {}
if parent is None:
self.showprocesslog = print
self.piter = ProgressBarText().iter
else:
self.showprocesslog = parent.showprocesslog
self.piter = parent.pbar.iter
def settings(self, nodialog=False):
"""
Entry point into item.
Parameters
----------
nodialog : bool, optional
Run settings without a dialog. The default is False.
Returns
-------
bool
True if successful, False otherwise.
"""
ext = 'GeoTiff (*.tif)'
if not nodialog:
self.ifile, _ = QtWidgets.QFileDialog.getOpenFileName(
self.parent, 'Open File', '.', ext)
if self.ifile == '':
return False
os.chdir(os.path.dirname(self.ifile))
dat = get_raster(self.ifile, piter=self.piter,
showprocesslog=self.showprocesslog)
if dat is None:
QtWidgets.QMessageBox.warning(self.parent, 'Error',
'Could not import the image.',
QtWidgets.QMessageBox.Ok)
return False
if len(dat) < 3:
QtWidgets.QMessageBox.warning(self.parent, 'Error',
'Not RGB Image, less than 3 bands.',
QtWidgets.QMessageBox.Ok)
return False
output_type = 'Raster'
if len(dat) == 4:
dat2 = np.ma.transpose([dat[0].data.T, dat[1].data.T,
dat[2].data.T, dat[3].data.T])
else:
dat2 = np.ma.transpose([dat[0].data.T, dat[1].data.T,
dat[2].data.T])
dat = [dat[0]]
dat[0].data = dat2
dat[0].isrgb = True
if dat[0].data.dtype == np.uint16:
iidat = np.iinfo(dat[0].data.dtype)
dat[0].data = dat[0].data.astype(float)
dat[0].data = (dat[0].data-iidat.min)/(iidat.max-iidat.min)
self.outdata[output_type] = dat
return True
def loadproj(self, projdata):
"""
Load project data into class.
Parameters
----------
projdata : dictionary
Project data loaded from JSON project file.
Returns
-------
chk : bool
A check to see if settings was successfully run.
"""
self.ifile = projdata['ifile']
chk = self.settings(True)
return chk
def saveproj(self):
"""
Save project data from class.
Returns
-------
projdata : dictionary
Project data to be saved to JSON project file.
"""
projdata = {}
projdata['ifile'] = self.ifile
return projdata
def clusterprep(dat):
"""
Prepare Cluster data from raster data.
Parameters
----------
dat : list
List of PyGMI datasets.
Returns
-------
dat2 : list
List of PyGMI datasets.
"""
dat2 = []
for i in dat:
if 'Cluster' in i.dataid and 'Membership' not in i.dataid:
numclus = int(i.data.max())
i.metadata['Cluster']['no_clusters'] = numclus
i.metadata['Cluster']['memdat'] = [[]] * numclus
for j in dat:
if 'Membership' in j.dataid and i.dataid in j.dataid:
cnt = int(j.dataid.split(':')[0].split()[-1])-1
i.metadata['Cluster']['memdat'][cnt] = j.data
dat2.append(i)
return dat2
def get_ascii(ifile):
"""
Import ascii raster dataset.
Parameters
----------
ifile : str
filename to import
Returns
-------
dat : PyGMI raster Data
dataset imported
"""
isESRI = False
with open(ifile, 'r', encoding='utf-8') as afile:
adata = afile.read()
adata = adata.split()
if adata[0] == 'ncols':
isESRI = True
if isESRI:
nbands = 1
ncols = int(adata[1])
nrows = int(adata[3])
xdim = float(adata[9])
ydim = float(adata[9])
nval = float(adata[11])
ulxmap = float(adata[5])
ulymap = float(adata[7])+ydim*nrows
if 'center' in adata[4].lower():
ulxmap = ulxmap - xdim/2
if 'center' in adata[6].lower():
ulymap = ulymap - ydim/2
adata = adata[12:]
else:
with open(ifile[:-3]+'hdr', 'r', encoding='utf-8') as hfile:
tmp = hfile.readlines()
xdim = float(tmp[0].split()[-1])
ydim = float(tmp[1].split()[-1])
ncols = int(tmp[2].split()[-1])
nrows = int(tmp[3].split()[-1])
nbands = int(tmp[4].split()[-1])
ulxmap = float(tmp[5].split()[-1])
ulymap = float(tmp[6].split()[-1])
nval = -9999.0
bandid = ifile[:-4].rsplit('/')[-1]
adata = np.array(adata, dtype=float)
adata.shape = (nrows, ncols)
if nbands > 1:
warnings.warn('PyGMI only supports single band ASCII files. '
'Only first band will be exported.')
dat = [Data()]
i = 0
dat[i].data = np.ma.masked_equal(adata, nval)
if dat[i].data.mask.size == 1:
dat[i].data.mask = (np.ma.make_mask_none(dat[i].data.shape) +
dat[i].data.mask)
dat[i].dataid = bandid
dat[i].nodata = nval
dat[i].filename = ifile
xmin = ulxmap
ymax = ulymap
dat[i].set_transform(xdim, xmin, ydim, ymax)
dat[i].crs = CRS.from_string('LOCAL_CS["Arbitrary",UNIT["metre",1,'
'AUTHORITY["EPSG","9001"]],'
'AXIS["Easting",EAST],'
'AXIS["Northing",NORTH]]')
return dat
def get_raster(ifile, nval=None, piter=None, showprocesslog=print,
iraster=None, driver=None):
"""
Get raster dataset.
This function loads a raster dataset off the disk using the rasterio
libraries. It returns the data in a PyGMI data object.
Parameters
----------
ifile : str
filename to import
nval : float, optional
No data/null value. The default is None.
piter : iterable from misc.ProgressBar or misc.ProgressBarText
progress bar iterable
showprocesslog : function, optional
Routine to show text messages. The default is print.
iraster : None or tuple
Incremental raster import, to import a section of a file. The tuple is
(xoff, yoff, xsize, ysize)
Returns
-------
dat : PyGMI raster Data
dataset imported
"""
if piter is None:
piter = ProgressBarText().iter
dat = []
bname = os.path.basename(ifile).rpartition('.')[0]
ext = ifile[-3:]
custom_wkt = ''
filename = ifile
# Envi Case
if ext == 'hdr':
ifile = ifile[:-4]
if os.path.exists(ifile+'.dat'):
ifile = ifile+'.dat'
elif os.path.exists(ifile+'.raw'):
ifile = ifile+'.raw'
elif os.path.exists(ifile+'.img'):
ifile = ifile+'.img'
elif not os.path.exists(ifile):
return None
if ext == 'ers':
with open(ifile, encoding='utf-8') as f:
metadata = f.read()
if 'STMLO' in metadata:
clong = metadata.split('STMLO')[1][:2]
if 'CAPE' in metadata:
custom_wkt = ('PROJCS["Cape / TM'+clong+'",'
'GEOGCS["Cape",'
'DATUM["Cape",'
'SPHEROID["Clarke 1880 (Arc)",'
'6378249.145,293.4663077,'
'AUTHORITY["EPSG","7013"]],'
'AUTHORITY["EPSG","6222"]],'
'PRIMEM["Greenwich",0,'
'AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.0174532925199433,'
'AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4222"]],'
'PROJECTION["Transverse_Mercator"],'
'PARAMETER["latitude_of_origin",0],'
'PARAMETER["central_meridian",'+clong+'],'
'PARAMETER["scale_factor",1],'
'PARAMETER["false_easting",0],'
'PARAMETER["false_northing",0],'
'UNIT["metre",1,AUTHORITY["EPSG","9001"]],'
'AXIS["Easting",EAST],'
'AXIS["Northing",NORTH]]')
elif 'WGS84' in metadata:
custom_wkt = ('PROJCS["Hartebeesthoek94 / TM'+clong+'",'
'GEOGCS["Hartebeesthoek94",'
'DATUM["Hartebeesthoek94",'
'SPHEROID["WGS 84",6378137,298.257223563,'
'AUTHORITY["EPSG","7030"]],'
'AUTHORITY["EPSG","6148"]],'
'PRIMEM["Greenwich",0,'
'AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.0174532925199433,'
'AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4148"]],'
'PROJECTION["Transverse_Mercator"],'
'PARAMETER["latitude_of_origin",0],'
'PARAMETER["central_meridian",'+clong+'],'
'PARAMETER["scale_factor",1],'
'PARAMETER["false_easting",0],'
'PARAMETER["false_northing",0],'
'UNIT["metre",1,AUTHORITY["EPSG","9001"]],'
'AXIS["Easting",EAST],'
'AXIS["Northing",NORTH]]')
dmeta = {}
with rasterio.open(ifile, driver=driver) as dataset:
if dataset is None:
return None
# allns = dataset.tag_namespaces()
gmeta = dataset.tags()
istruct = dataset.tags(ns='IMAGE_STRUCTURE')
driver = dataset.driver
if driver == 'ENVI':
dmeta = dataset.tags(ns='ENVI')
if custom_wkt == '' and dataset.crs is not None:
custom_wkt = dataset.crs.to_wkt()
cols = dataset.width
rows = dataset.height
bands = dataset.count
if nval is None:
nval = dataset.nodata
dtype = rasterio.band(dataset, 1).dtype
if custom_wkt != '':
crs = CRS.from_string(custom_wkt)
else:
showprocesslog('Warning: Your data does not have a projection. '
'Assigning local coordinate system.')
crs = CRS.from_string('LOCAL_CS["Arbitrary",UNIT["metre",1,'
'AUTHORITY["EPSG","9001"]],'
'AXIS["Easting",EAST],'
'AXIS["Northing",NORTH]]')
isbil = False
if 'INTERLEAVE' in istruct and driver in ['ENVI', 'ERS', 'EHdr']:
if istruct['INTERLEAVE'] == 'LINE' and iraster is None:
isbil = True
datin = get_bil(ifile, bands, cols, rows, dtype, piter)
with rasterio.open(ifile) as dataset:
for i in piter(range(dataset.count)):
index = dataset.indexes[i]
bandid = dataset.descriptions[i]
if bandid == '' or bandid is None:
bandid = 'Band '+str(index)+' '+bname
unit = dataset.units[i]
if unit is None:
unit = ''
if unit.lower() == 'micrometers':
dat[i].units = 'μm'
elif unit.lower() == 'nanometers':
dat[i].units = 'nm'
if nval is None:
nval = dataset.nodata
dat.append(Data())
if isbil is True:
dat[i].data = datin[i]
elif iraster is None:
dat[i].data = dataset.read(index)
else:
xoff, yoff, xsize, ysize = iraster
dat[i].data = dataset.read(1, window=Window(xoff, yoff,
xsize, ysize))
if dat[i].data.dtype.kind == 'i':
if nval is None:
nval = 999999
showprocesslog('Adjusting null value to '+str(nval))
nval = int(nval)
elif dat[i].data.dtype.kind == 'u':
if nval is None:
nval = 0
showprocesslog('Adjusting null value to '+str(nval))
nval = int(nval)
else:
if nval is None:
nval = 1e+20
nval = float(nval)
if nval not in dat[i].data and np.isclose(dat[i].data.min(),
nval):
nval = dat[i].data.min()
showprocesslog('Adjusting null value to '+str(nval))
if nval not in dat[i].data and np.isclose(dat[i].data.max(),
nval):
nval = dat[i].data.max()
showprocesslog('Adjusting null value to '+str(nval))
if ext == 'ers' and nval == -1.0e+32:
dat[i].data[dat[i].data <= nval] = -1.0e+32
# Note that because the data is stored in a masked array, the array ends up
# being double the size that it was on the disk.
dat[i].data = np.ma.masked_invalid(dat[i].data)
dat[i].data.mask = (np.ma.getmaskarray(dat[i].data) |
(dat[i].data == nval))
dat[i].extent = plotting_extent(dataset)
dat[i].bounds = dataset.bounds
dat[i].dataid = bandid
dat[i].nodata = nval
# dat[i].wkt = custom_wkt
dat[i].filename = filename
dat[i].units = unit
dat[i].transform = dataset.transform
if driver == 'netCDF' and dataset.crs is None:
if 'x#actual_range' in gmeta and 'y#actual_range' in gmeta:
xrng = gmeta['x#actual_range']
xrng = xrng.strip('}{').split(',')
xrng = [float(i) for i in xrng]
xmin = min(xrng)
xdim = (xrng[1]-xrng[0])/cols
yrng = gmeta['y#actual_range']
yrng = yrng.strip('}{').split(',')
yrng = [float(i) for i in yrng]
ymin = min(yrng)
ydim = (yrng[1]-yrng[0])/rows
dat[i].set_transform(xdim, xmin, ydim, ymin)
dat[i].crs = crs
dat[i].xdim, dat[i].ydim = dataset.res
dat[i].meta = dataset.meta
dest = dataset.tags(index)
for j in ['Wavelength', 'WAVELENGTH']:
if j in dest:
dest[j.lower()] = dest[j]
del dest[j]
if 'fwhm' in dmeta:
fwhm = [float(i) for i in dmeta['fwhm'][1:-1].split(',')]
dest['fwhm'] = fwhm[index-1]
if '.raw' in ifile:
dmeta['reflectance_scale_factor'] = 10000.
if 'reflectance scale factor' in dmeta:
dmeta['reflectance_scale_factor'] = dmeta['reflectance scale factor']
dat[i].metadata['Raster'] = {**dmeta, **dest}
return dat
def get_bil(ifile, bands, cols, rows, dtype, piter):
"""
Get BIL format file.
This routine is called from get_raster
Parameters
----------
ifile : str
filename to import
bands : int
Number of bands.
cols : int
Number of columns.
rows : int
Number of rows.
dtype : data type
Data type.
piter : iterable from misc.ProgressBar or misc.ProgressBarText
progress bar iterable
Returns
-------
datin : PyGMI raster Data
dataset imported
"""
dtype = np.dtype(dtype)
count = bands*cols*rows
offset = 0
icount = count//10
datin = []
dsize = dtype.itemsize
for _ in piter(range(0, 10)):
tmp = np.fromfile(ifile, dtype=dtype, sep='', count=icount,
offset=offset)
offset += icount*dsize
datin.append(tmp)
extra = int(count-offset/dsize)
if extra > 0:
tmp = np.fromfile(ifile, dtype=dtype, sep='', count=extra,
offset=offset)
datin.append(tmp)
datin = np.concatenate(datin)
datin.shape = (rows, bands, cols)
datin = np.swapaxes(datin, 0, 1)
return datin
def get_geopak(hfile):
"""
Geopak Import.
Parameters
----------
hfile : str
filename to import
Returns
-------
dat : PyGMI raster Data
dataset imported
Returns
-------
dat : PyGMI Data
PyGMI raster dataset.
"""
with open(hfile, 'rb') as fin:
fall = fin.read()
off = 0
fnew = []
while off < len(fall):
off += 1
breclen = np.frombuffer(fall, dtype=np.uint8, count=1, offset=off)[0]
if breclen == 130:
break
reclen = breclen
if breclen == 129:
reclen = 128
off += 1
fnew.append(fall[off:off+reclen])
off += reclen
fnew = b''.join(fnew)
header = np.frombuffer(fnew, dtype=np.float32, count=32, offset=0)
# Lines in grid 1
# Points per line 2
# Grid factor 3
# Grid base value 4
# Grid X origin 5
# Grid Y origin 6
# Grid rotation 7
# Grid dummy value 8
# Map scale 9
# Cell size (X) 10
# Cell size (Y) 11
# Inches/unit 12
# Grid X offset 13
# Grid Y offset 14
# Grid hdr version 15
#
# Lines in grid 17
# Points per line 18
# Grid factor 21
# Grid base value 22
# Z maximum 23
# Z minimum 24
#
# Grid dummy value 26
nrows = int(header[0])
ncols = int(header[1])
gfactor = header[2]
gbase = header[3]
x0 = header[4]
y0 = header[5]
# rotation = header[6]
nval = header[7]
# mapscale = header[8]
dx = header[9]
dy = header[10]
# inches_per_unit = header[11]
# xoffset = header[12]
# yoffset = header[13]
# hver = header[14]
# zmax = header[22]
# zmin = header[23]
data = np.frombuffer(fnew, dtype=np.int16, count=(nrows*ncols), offset=128)
data = np.ma.masked_equal(data, nval)
data = data/gfactor+gbase
data.shape = (nrows, ncols)
data = data[::-1]
dat = []
dat.append(Data())
i = 0
dat[i].data = data
dat[i].dataid = hfile[:-4]
dat[i].nodata = nval
xmin = x0
ymax = y0 + dy*nrows
dat[i].set_transform(dx, xmin, dy, ymax)
dat[i].filename = hfile
dat[i].crs = CRS.from_string('LOCAL_CS["Arbitrary",UNIT["metre",1,'
'AUTHORITY["EPSG","9001"]],'
'AXIS["Easting",EAST],'
'AXIS["Northing",NORTH]]')
return dat
def get_geosoft(hfile):
"""
Get Geosoft file.
Parameters
----------
ifile : str
filename to import
Returns
-------
dat : PyGMI Data
Dataset imported
"""
f = open(hfile, mode='rb')
es = np.fromfile(f, dtype=np.int32, count=1)[0] # 4
sf = np.fromfile(f, dtype=np.int32, count=1)[0] # signf
ncols = np.fromfile(f, dtype=np.int32, count=1)[0] # ncol
nrows = np.fromfile(f, dtype=np.int32, count=1)[0] # nrow
kx = np.fromfile(f, dtype=np.int32, count=1)[0] # 1
dx = np.fromfile(f, dtype=np.float64, count=1)[0] # dx
dy = np.fromfile(f, dtype=np.float64, count=1)[0] # dy
x0 = np.fromfile(f, dtype=np.float64, count=1)[0] # xllcor
y0 = np.fromfile(f, dtype=np.float64, count=1)[0] # yllcor
rot = np.fromfile(f, dtype=np.float64, count=1)[0] # rot
zbase = np.fromfile(f, dtype=np.float64, count=1)[0] # zbase
zmult = np.fromfile(f, dtype=np.float64, count=1)[0] # zmult
label = np.fromfile(f, dtype='a48', count=1)[0]
mapno = np.fromfile(f, dtype='a16', count=1)[0]
proj = np.fromfile(f, dtype=np.int32, count=1)[0]
unitx = np.fromfile(f, dtype=np.int32, count=1)[0]
unity = np.fromfile(f, dtype=np.int32, count=1)[0]
unitz = np.fromfile(f, dtype=np.int32, count=1)[0]
nvpts = np.fromfile(f, dtype=np.int32, count=1)[0]
izmin = np.fromfile(f, dtype=np.int32, count=1)[0]
izmax = np.fromfile(f, dtype=np.int32, count=1)[0]
izmed = np.fromfile(f, dtype=np.int32, count=1)[0]
izmea = np.fromfile(f, dtype=np.int32, count=1)[0]
zvar = np.fromfile(f, dtype=np.float64, count=1)[0]
prcs = np.fromfile(f, dtype=np.int32, count=1)[0]
temspc = np.fromfile(f, dtype='a324', count=1)[0]
if es == 2:
nval = -32767
data = np.fromfile(f, dtype=np.int16, count=nrows*ncols)
elif es == 4:
data = np.fromfile(f, dtype=np.float32, count=nrows*ncols)
nval = -1.0E+32
else:
return None
data = np.ma.masked_equal(data, nval)
data = data/zmult + zbase
data.shape = (nrows, ncols)
data = data[::-1]
f.close()
dat = []
dat.append(Data())
i = 0
dat[i].data = data
dat[i].dataid = hfile[:-4]
dat[i].nodata = nval
xmin = x0
ymax = y0 + dy*nrows
dat[i].set_transform(dx, xmin, dy, ymax)
dat[i].filename = hfile
dat[i].crs = CRS.from_string('LOCAL_CS["Arbitrary",UNIT["metre",1,'
'AUTHORITY["EPSG","9001"]],'
'AXIS["Easting",EAST],'
'AXIS["Northing",NORTH]]')
return dat
class ExportData():
"""
Export Data.
Attributes
----------
parent : parent
reference to the parent routine
outdata : dictionary
dictionary of output datasets
ifile : str
input file name. Used in main.py
"""
def __init__(self, parent=None):
self.ifile = ''
if parent is None:
self.piter = ProgressBarText().iter
else:
self.piter = parent.pbar.iter
self.parent = parent
self.indata = {}
self.outdata = {}
if parent is None:
self.showprocesslog = print
else:
self.showprocesslog = parent.showprocesslog
def run(self):
"""
Run.
Returns
-------
bool
True if successful, False otherwise.
"""
self.parent.process_is_active(True)
if 'Cluster' in self.indata:
data = self.indata['Cluster']
newdat = copy.deepcopy(data)
for i in data:
if 'memdat' not in i.metadata['Cluster']:
continue
for j, val in enumerate(i.metadata['Cluster']['memdat']):
tmp = copy.deepcopy(i)
tmp.memdat = None
tmp.data = val
tmp.dataid = ('Membership of class ' + str(j+1)
+ ': '+tmp.dataid)
newdat.append(tmp)
data = newdat
elif 'Raster' in self.indata:
data = self.indata['Raster']
else:
self.showprocesslog('No raster data')
self.parent.process_is_active(False)
return False
ext = ('GeoTiff (*.tif);;'
'GeoTiff compressed using ZSTD (*.tif);;'
'ENVI (*.hdr);;'
'ERMapper (*.ers);;'
'Geosoft (*.gxf);;'
'ERDAS Imagine (*.img);;'
'SAGA binary grid (*.sdat);;'
'Surfer grid (*.grd);;'
'ArcInfo ASCII (*.asc);;'
'ASCII XYZ (*.xyz);;'
'ArcGIS BIL (*.bil)')
filename, filt = QtWidgets.QFileDialog.getSaveFileName(
self.parent, 'Save File', '.', ext)
if filename == '':
self.parent.process_is_active(False)
return False
os.chdir(os.path.dirname(filename))
self.ifile = str(filename)
self.showprocesslog('Export Data Busy...')
# Pop up save dialog box
if filt == 'ArcInfo ASCII (*.asc)':
self.export_ascii(data)
if filt == 'ASCII XYZ (*.xyz)':
self.export_ascii_xyz(data)
if filt == 'Geosoft (*.gxf)':
self.export_gxf(data)
if filt == 'Surfer grid (*.grd)':
self.export_surfer(data)
if filt == 'ERDAS Imagine (*.img)':
export_raster(self.ifile, data, 'HFA', piter=self.piter)
if filt == 'ERMapper (*.ers)':
export_raster(self.ifile, data, 'ERS', piter=self.piter)
if filt == 'SAGA binary grid (*.sdat)':
if len(data) > 1:
for i, dat in enumerate(data):
file_out = self.get_filename(dat, 'sdat')
export_raster(file_out, [dat], 'SAGA', piter=self.piter)
else:
export_raster(self.ifile, data, 'SAGA', piter=self.piter)
if filt == 'GeoTiff (*.tif)':
export_raster(self.ifile, data, 'GTiff', piter=self.piter)
if filt == 'GeoTiff compressed using ZSTD (*.tif)':
export_raster(self.ifile, data, 'GTiff', piter=self.piter,
compression='ZSTD')
if filt == 'ENVI (*.hdr)':
export_raster(self.ifile, data, 'ENVI', piter=self.piter)
if filt == 'ArcGIS BIL (*.bil)':
export_raster(self.ifile, data, 'EHdr', piter=self.piter)
self.showprocesslog('Export Data Finished!')
self.parent.process_is_active(False)
return True
def export_gxf(self, data):
"""
Export GXF data.
Parameters
----------
data : PyGMI raster Data
dataset to export
Returns
-------
None.
"""
if len(data) > 1:
self.showprocesslog('Band names will be appended to the output '
'filenames since you have a multiple band '
'image')
file_out = self.ifile.rpartition('.')[0]+'.gxf'
for k in data:
if len(data) > 1:
file_out = self.get_filename(k, 'gxf')
fno = open(file_out, 'w', encoding='utf-8')
xmin = k.extent[0]
ymin = k.extent[2]
krows, kcols = k.data.shape
fno.write('#TITLE\n')
fno.write('Export Data')
fno.write('\n#POINTS\n')
fno.write(str(kcols))
fno.write('\n#ROWS\n')
fno.write(str(krows))
fno.write('\n#PTSEPARATION\n')
fno.write(str(k.xdim))
fno.write('\n#RWSEPARATION\n')
fno.write(str(k.ydim))
fno.write('\n#XORIGIN\n')
fno.write(str(xmin))
fno.write('\n#YORIGIN\n')
fno.write(str(ymin))
fno.write('\n#SENSE\n')
fno.write('1')
fno.write('\n#DUMMY\n')
fno.write(str(k.nodata))
fno.write('\n#GRID\n')
tmp = k.data.filled(k.nodata)
for i in range(k.data.shape[0]-1, -1, -1):
kkk = 0
# write only 5 numbers in a row
for j in range(k.data.shape[1]):
if kkk == 5:
kkk = 0
if kkk == 0:
fno.write('\n')
fno.write(str(tmp[i, j]) + ' ')
kkk += 1
fno.close()
def export_surfer(self, data):
"""
Export a surfer binary grid.
Parameters
----------
data : PyGMI raster Data
dataset to export
Returns
-------
None.
"""
if len(data) > 1:
self.showprocesslog('Band names will be appended to the output '
'filenames since you have a multiple band '
'image')
file_out = self.ifile.rpartition('.')[0] + '.grd'
for k0 in data:
k = copy.deepcopy(k0)
if len(data) > 1:
file_out = self.get_filename(k, 'grd')
k.data = k.data.filled(1.701410009187828e+38)
k.nodata = 1.701410009187828e+38
export_raster(file_out, [k], 'GS7BG', piter=self.piter)
def export_ascii(self, data):
"""
Export ASCII file.
Parameters
----------
data : PyGMI raster Data
dataset to export
Returns
-------
None.
"""
if len(data) > 1:
self.showprocesslog('Band names will be appended to the output '
'filenames since you have a multiple band '
'image')
file_out = self.ifile.rpartition('.')[0]+'.asc'
for k in data:
if len(data) > 1:
file_out = self.get_filename(k, 'asc')
fno = open(file_out, 'w', encoding='utf-8')
extent = k.extent
xmin = extent[0]
ymin = extent[2]
krows, kcols = k.data.shape
fno.write('ncols \t\t\t' + str(kcols))
fno.write('\nnrows \t\t\t' + str(krows))
fno.write('\nxllcorner \t\t\t' + str(xmin))
fno.write('\nyllcorner \t\t\t' + str(ymin))
fno.write('\ncellsize \t\t\t' + str(k.xdim))
fno.write('\nnodata_value \t\t' + str(k.nodata))
tmp = k.data.filled(k.nodata)
krows, kcols = k.data.shape
for j in range(krows):
fno.write('\n')
for i in range(kcols):
fno.write(str(tmp[j, i]) + ' ')
fno.close()
def export_ascii_xyz(self, data):
"""
Export and xyz file.
Parameters
----------
data : PyGMI raster Data
dataset to export
Returns
-------
None.
"""
if len(data) > 1:
self.showprocesslog('Band names will be appended to the output '
'filenames since you have a multiple band '
'image')
file_out = self.ifile.rpartition('.')[0]+'.xyz'
for k in data:
if len(data) > 1:
file_out = self.get_filename(k, 'xyz')
fno = open(file_out, 'w', encoding='utf-8')
tmp = k.data.filled(k.nodata)
xmin = k.extent[0]
ymax = k.extent[-1]
krows, kcols = k.data.shape
for j in range(krows):
for i in range(kcols):
fno.write(str(xmin+i*k.xdim) + ' ' +
str(ymax-j*k.ydim) + ' ' +
str(tmp[j, i]) + '\n')
fno.close()
def get_filename(self, data, ext):
"""
Get a valid filename in the case of multi band image.
Parameters
----------
data : PyGMI raster Data
dataset to get filename from
ext : str
filename extension to use
Returns
-------
file_out : str
Output filename.
"""
file_band = data.dataid.strip('"')
file_band = file_band.replace('/', '')
file_band = file_band.replace(':', '')
file_out = self.ifile.rpartition('.')[0]+'_'+file_band+'.'+ext
return file_out
def export_raster(ofile, dat, drv, envimeta='', piter=None,
compression='NONE'):
"""
Export to rasterio format.
Parameters
----------
ofile : str
Output file name.
dat : PyGMI raster Data
dataset to export
drv : str
name of the rasterio driver to use
envimeta : str, optional
ENVI metadata. The default is ''.
piter : ProgressBar.iter/ProgressBarText.iter, optional
Progressbar iterable from misc. The default is None.
Returns
-------
None.
"""
if piter is None:
piter = ProgressBarText().iter
if isinstance(dat, dict):
dat2 = []
for i in dat:
dat2.append(dat[i])
else:
dat2 = dat
data = lstack(dat2, piter)
dtype = data[0].data.dtype
nodata = dat[0].nodata
trans = dat[0].transform
crs = dat[0].crs
try:
nodata = dtype.type(nodata)
except OverflowError:
print('Invalid nodata for dtype, resetting to 0')
nodata = 0
if trans is None:
trans = rasterio.transform.from_origin(dat[0].extent[0],
dat[0].extent[3],
dat[0].xdim, dat[0].ydim)
tmp = ofile.rpartition('.')
if drv == 'GTiff':
tmpfile = tmp[0] + '.tif'
elif drv == 'EHdr':
dtype = np.float32
tmpfile = tmp[0] + '.bil'
elif drv == 'GSBG':
tmpfile = tmp[0]+'.grd'
dtype = np.float32
elif drv == 'SAGA':
tmpfile = tmp[0]+'.sdat'
data[0].nodata = -99999.0
elif drv == 'HFA':
tmpfile = tmp[0]+'.img'
elif drv == 'ENVI':
tmpfile = tmp[0]+'.dat'
elif drv == 'ERS': # ER Mapper
tmpfile = tmp[0]
else:
tmpfile = ofile
drows, dcols = data[0].data.shape
kwargs = {}
if drv == 'GTiff':
kwargs = {'COMPRESS': compression,
'ZLEVEL': '1',
'ZSTD_LEVEL': '1',
'BIGTIFF': 'YES',
'INTERLEAVE': 'BAND',
'TFW': 'YES',
'PROFILE': 'GeoTIFF'}
if dtype == np.float32 or dtype == np.float64:
kwargs['PREDICTOR'] = '3'
with rasterio.open(tmpfile, 'w', driver=drv,
width=int(dcols), height=int(drows), count=len(data),
dtype=dtype, transform=trans, crs=crs,
nodata=nodata, **kwargs) as out:
numbands = len(data)
wavelength = []
fwhm = []
# cov = []
# for idata in data:
# cov.append(idata.data.flatten())
# cov = np.ma.array(cov)
# cov = np.ma.cov(cov)
for i in piter(range(numbands)):
datai = data[i]
out.set_band_description(i+1, datai.dataid)
# rtmp.SetDescription(datai.dataid)
# rtmp.SetMetadataItem('BandName', datai.dataid)
dtmp = np.ma.array(datai.data)
dtmp.set_fill_value(datai.nodata)
dtmp = dtmp.filled()
# rtmp.GetStatistics(False, True)
out.write(dtmp, i+1)
# icov = str(cov[i])[1:-1].replace(' ', ', ')
# out.update_tags(i+1, STATISTICS_COVARIANCES=icov)
out.update_tags(i+1, STATISTICS_EXCLUDEDVALUES='')
out.update_tags(i+1, STATISTICS_MAXIMUM=datai.data.max())
out.update_tags(i+1, STATISTICS_MEAN=datai.data.mean())
# out.update_tags(i+1, STATISTICS_MEDIAN=np.ma.median(datai.data))
out.update_tags(i+1, STATISTICS_MINIMUM=datai.data.min())
out.update_tags(i+1, STATISTICS_SKIPFACTORX=1)
out.update_tags(i+1, STATISTICS_SKIPFACTORY=1)
out.update_tags(i+1, STATISTICS_STDDEV=datai.data.std())
if 'Raster' in datai.metadata:
if 'wavelength' in datai.metadata['Raster']:
out.update_tags(i+1, wavelength=str(datai.metadata['Raster']['wavelength']))
wavelength.append(datai.metadata['Raster']['wavelength'])
if 'fwhm' in datai.metadata['Raster']:
fwhm.append(datai.metadata['Raster']['fwhm'])
if 'reflectance_scale_factor' in datai.metadata['Raster']:
out.update_tags(i+1, reflectance_scale_factor=str(datai.metadata['Raster']['reflectance_scale_factor']))
if 'WavelengthMin' in datai.metadata:
out.update_tags(i+1, WavelengthMin=str(datai.metadata['WavelengthMin']))
out.update_tags(i+1, WavelengthMax=str(datai.metadata['WavelengthMax']))
if drv == 'ENVI':
wout = ''
if (wavelength and envimeta is not None and
'wavelength' not in envimeta):
wout = str(wavelength)
wout = wout.replace('[', '{')
wout = wout.replace(']', '}')
wout = wout.replace("'", '')
wout = 'wavelength = '+wout+'\n'
if fwhm:
fwhm = str(fwhm)
fwhm = fwhm.replace('[', '{')
fwhm = fwhm.replace(']', '}')
fwhm = fwhm.replace("'", '')
wout += 'fwhm = ' + fwhm+'\n'
if 'reflectance_scale_factor' in datai.metadata['Raster']:
wout += 'reflectance scale factor = '+ str(datai.metadata['Raster']['reflectance_scale_factor'])+'\n'
with open(tmpfile[:-4]+'.hdr', 'a', encoding='utf-8') as myfile:
myfile.write(wout)
myfile.write(envimeta)
def _filespeedtest():
"""Test."""
import matplotlib.pyplot as plt
from pygmi.misc import getinfo
print('Starting')
pbar = ProgressBarText()
# ifile = r'd:\WorkData\Richtersveld\Reprocessed\RSarea_Hyper.dat'
# ifile = r'd:\WorkData\Hyperspectral\056_0818-1125_ref_rect.dat'
# ifile = r'd:\WorkData\Hyperspectral\056_0818-1125_ref_rect_BSQ.dat'
# ifile = r"d:\Workdata\testdata.hdr"
# ifile = r"d:\Workdata\raster\rad_3bands.ers"
# ofile = r"d:\Workdata\hope.tif"
# xoff = 0
# yoff = 0
# xsize = None
# ysize = 1000
# iraster = (xoff, yoff, xsize, ysize)
ifile = r"d:\Workdata\compress\New_max_22-55_iMNF15_ferriciron_UTM33s.tif"
ifile = r"d:\Downloads\caldefo_o_unwrap_goldstein64_OrbAdj_FlatEarth-defo_raw11_ref20210226_dep20210322.pix"
# ofile = r"d:\Workdata\compress\New_max_22-55_iMNF15_ferriciron_UTM33s_DEFLATE3ZL1.tif"
ifile = r"C:\WorkProjects\Script6c_disp\disp_data.tif"
# ifile = r'd:/Workdata/compress/017_0823-1146_ref_rect_BSQ_291div283_194div291_219div303.tif'
# ofile = ifile[:-4]+'_DEFLATE3.tiff'
# ifile = r"d:/Workdata/testdata.hdr"
# ofile = r'd:/Workdata/testdata.grd'
ifile = r"D:\Workdata\people\rahul\gravity_final.grd"
ifile = r"D:\Workdata\people\rahul\grav.grd"
iraster = None
getinfo('Start')
dataset = get_raster(ifile, iraster=iraster)
# ofile = ifile[:-4]+'_hope.tif'
# export_raster(ofile, dataset, 'GTiff')
# k = dataset[0]
# k.data = k.data.filled(1.701410009187828e+38)
# export_raster(ofile, [k], 'GS7BG')
# dataset = get_raster(ofile, iraster=iraster)
plt.figure(dpi=150)
plt.imshow(dataset[0].data, extent=dataset[0].extent)
plt.colorbar()
plt.show()
# for i in dataset:
# i.data = i.data*10000
# i.data = i.data.astype(np.int16)
# export_raster(ofile, dataset, 'GS7BG', piter=pbar.iter)
# export_raster(ofile, dataset, 'GTiff', compression='PACKBITS') # 182s
# export_raster(ofile, dataset, 'GTiff', compression='LZW') # 191, 140 with pred=3
# export_raster(ofile, dataset, 'GTiff', compression='LZMA') #
# export_raster(ifile[:-4]+'_DEFLATE3ZL1.tiff', dataset, 'GTiff', compression='DEFLATE') # 318, 277 PRED 3
# export_raster(ifile[:-4]+'_ZSTD3ZL1.tiff', dataset, 'GTiff', compression='ZSTD') # 241, 281 pred=3
# best is zstd pred 3 zlvl 1
# then deflade pred 3 zlvl 1
getinfo('End')
breakpoint()
if __name__ == "__main__":
_filespeedtest()
| Patrick-Cole/pygmi | pygmi/raster/iodefs.py | Python | gpl-3.0 | 49,696 | [
"ADF",
"NetCDF"
] | bee071ce4da88c26e348b184b24a5bb92c7c3c1a637f6ef55b8a6c4fe463b7b8 |
from packaging import version
import warnings
import sys
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import matplotlib as mpl
import matplotlib.colors as mplcolors
import numpy as np
import matplotlib.ticker as mtik
import types
try:
import scipy.ndimage
from scipy.stats import norm
haveScipy = True
except ImportError:
haveScipy = False
PYVER = sys.version_info[0]
MPLVER = version.parse(mpl.__version__)
if MPLVER >= version.parse('2.1'):
density_kw = {'density':True}
else:
warnings.warn("Future versions of pygtc will require matplotlib >= 2.1", DeprecationWarning)
density_kw = {'normed':True}
__all__ = ['plotGTC']
# Create a full GTC
def plotGTC(chains, **kwargs):
r"""Make a great looking Giant Triangle Confusogram (GTC) with one line of
code! A GTC is like a triangle (or corner) plot, but you get to put as
many sets of data, and overlay as many truths as you like. That's what can
make it so *confusing*!
Parameters
----------
chains : array-like[nSamples,nDims] or a
list[[nSamples1,nDims], [nSamples2,nDims], ...]
All chains (where a chain is [nSamples,nDims]) in the list must have
the same number of dimensions. Note: If you are using ``emcee``
(http://dan.iel.fm/emcee/current/) - and you should! - each element
of chains is an ``EnsembleSampler.flatchain`` object.
Keyword Arguments
-----------------
weights : array-like[nSamples] or a list[[nSamples1], ...]
Weights for the sample points. The number of 1d arrays passed must
correspond to the number of `chains`, and each `weights` array must
have the same length nSamples as its corresponding chain.
chainLabels : array-like[nChains]
A list of text labels describing each chain passed to chains.
len(chainLabels) must equal len(chains). chainLabels supports LaTex
commands enclosed in $..$. Additionally, you can pass None as a label.
Default is ``None``.
paramNames : list-like[nDims]
A list of text labels describing each dimension of chains.
len(paramNames) must equal nDims=chains[0].shape[1]. paramNames
supports LaTex commands enclosed in $..$. Additionally, you can pass
None as a label. Default is None, however if you pass a
``pandas.DataFrame`` object, `paramNames` defaults to the ``DataFrame``
column names.
truths : list-like[nDims] or [[nDims], ...]
A list of parameter values, one for each parameter in `chains` to
highlight in the GTC parameter space, or a list of lists of values to
highlight in the parameter space. For each set of truths passed to
`truths`, there must be a value corresponding to every dimension in
`chains`, although any value may be ``None``. Default is ``None``.
truthLabels : list-like[nTruths]
A list of labels, one for each list passed to truths. truthLabels
supports LaTex commands enclosed in $..$. Additionally, you can pass
``None`` as a label. Default is ``None``.
truthColors : list-like[nTruths]
User-defined colors for the truth lines, must be one per set of truths
passed to `truths`. Default color is gray ``#4d4d4d`` for up to three
lines.
truthLineStyles : list-like[nTruths]
User-defined line styles for the truth lines, must be one per set of
truths passed to `truths`. Default line styles are
``['--', ':', 'dashdot']``.
priors : list of tuples [(mu1, sigma1), ...]
Each tuple describes a Gaussian to be plotted over that parameter's
histogram. The number of priors must equal the number of dimensions in
`chains`. Default is ``None``.
plotName : string
A path to save the GTC to in pdf form. Default is ``None``.
nContourLevels : int
The number of contour levels to plot in the 2d histograms. May be 1, 2,
or 3. Default is 2.
sigmaContourLevels : bool
Whether you want 2d "sigma" contour levels (39%, 86%, 99%) instead of
the standard contour levels (68%, 95%, 99%). Default is ``False``.
nBins : int
An integer describing the number of bins used to compute the
histograms. Default is 30.
smoothingKernel : float
Size of the Gaussian smoothing kernel in bins. Default is 1. Set to 0
for no smoothing.
filledPlots : bool
Whether you want the 2d contours and the 1d histograms to be filled.
Default is ``True``.
plotDensity : bool
Whether you want to see the 2d density of points. Default is ``False``.
figureSize : float or string
A number in inches describing the length = width of the GTC, or a
string indicating a predefined journal setting and whether the figure
will span one column or the full page width. Default is 70/dpi where
``dpi = plt.rcParams['figure.dpi']``. Options to choose from are
``'APJ_column'``, ``'APJ_page'``, ``'MNRAS_column'``, ``'MNRAS_page'``,
``'AandA_column'``, ``'AandA_page'``.
panelSpacing : string
Options are ``'loose'`` or ``'tight'``. Determines whether there is
some space between the subplots of the GTC or not. Default is
``'tight'``.
legendMarker : string
Options are ``'All'``, ``'None'``, ``'Auto'``. ``'All'`` and ``'None'``
force-show or force-hide all label markers. ``'Auto'`` shows label
markers if two or more truths are plotted.
paramRanges : list of tuples [nDim]
Set the boundaries of each parameter range. Must provide a tuple for
each dimension of `chains`. If ``None`` is provided for a parameter,
the range defaults to the width of the histogram.
labelRotation : tuple [2]
Rotate the tick labels by 45 degrees for less overlap. Sets the x- and
y-axis separately. Options are ``(True,True)``, ``(True,False)``,
``(False,True)``, ``(False,False)``, ``None``. Using ``None`` sets to
default ``(True,True)``.
tickShifts : tuple [2]
Shift the x/y tick labels horizontally/vertically by a fraction of the
tick spacing. Example tickShifts = (0.1, 0.05) shifts the x-tick labels
right by ten percent of the tick spacing and shifts the y-tick labels
up by five percent of the tick spacing. Default is (0.1, 0.1). If tick
rotation is turned off for either axis, then the corresponding shift is
set to zero.
colorsOrder : list-like[nDims]
The color order for chains passed to `chains`. Default is ``['blues',
'oranges', 'greens', 'reds', 'purples', 'browns', 'pinks', 'grays',
'yellows', 'cyans']``. Currently, ``pygtc`` is limited to these color
values, so you can reorder them, but can't yet define your own colors.
If you really love the old colors, you can get at them by calling:
``['blues_old', 'greens_old', ...]``.
do1dPlots : bool
Whether or not 1d histrograms are plotted on the diagonal. Default is
``True``.
doOnly1dPlot : bool
Plot only ONE 1d histogram. If this is True, then chains must have
shape ``(samples,1)``. Default is ``False``.
mathTextFontSet : string
Set font family for rendering LaTex. Default is ``'stixsans'``. Set to
``None`` to use the default setting in your matplotlib rc. See Notes
for known issues regarding this keyword.
customLabelFont : ``matplotlib.fontdict``
Full customization of label fonts. See matplotlib for full
documentation. Default is ``{'family':'Arial', 'size':9}``.
customLegendFont : ``matplotlib.fontdict``
Full customization of legend fonts. See matplotlib for full
documentation. Default is ``{'family':'Arial', 'size':9}``.
customTickFont : ``matplotlib.fontdict``
Full customization of tick label fonts. See matplotlib for full
documentation. Default is ``{'family':'Arial', 'size':6}``. Attempting
to set the color will result in an error.
holdRC : bool
Whether or not to reset rcParams back to default. You may wish to set
this to ``True`` if you are working in interactive mode (ie with
IPython or in a JuPyter notebook) and you want the plots that display
to be identical to the plots that save in the pdf. See Notes below for
more information. Default is ``False``.
Returns
-------
fig : ``matplotlib.figure`` object
You can do all sorts of fun things with this in terms of customization
after it gets returned. If you are using a ``JuPyter`` notebook with
inline plotting enabled, you should assign a variable to catch the
return or else the figure will plot twice.
Note
----
If you are calling ``plotGTC`` from within an interactive python session
(ie via IPython or in a JuPyter notebook), the label font in the saved pdf
may differ from the plot that appears when calling
``matplotlib.pyplot.show()``.
This will happen if the mathTextFontSet keyword sets a value that is
different than the one stored in ``rcParams['mathtext.fontset']`` and you
are using equations in your labels by enclosing them in $..$. The output
pdf will display correctly, but the interactive plot will use whatever is
stored in the rcParams default to render the text that is inside the $..$.
Unfortunately, this is an oversight in matplotlib's design, which only
allows one global location for specifying this setting. As a workaround,
you can set ``holdRC = True`` when calling ``plotGTC`` and it will *not*
reset your rcParams back to their default state. Thus, when the figure
renders in interactive mode, it will match the saved pdf. If you wish to
reset your rcParams back to default at any point, you can call
``matplotlib.rcdefaults()``. However, if you are in a jupyter notebook and
have set ``%matplotlib inline``, then calling ``matplotlib.rcdefaults()``
may not set things back the way they were, but rerunning the line magic
will.
This is all due to a bug in matplotlib that is slated to be fixed in the
upcoming 2.0 release."""
# Figure setting
# Set up some colors
truthsDefaultColors = ['#4d4d4d', '#4d4d4d', '#4d4d4d']
truthsDefaultLS = ['--', ':', 'dashdot']
colorsDict = {
# Match pygtc up to v0.2.4
'blues_old': ('#4c72b0', '#7fa5e3', '#b2d8ff'),
'greens_old': ('#55a868', '#88db9b', '#bbffce'),
'yellows_old': ('#f5964f', '#ffc982', '#fffcb5'),
'reds_old': ('#c44e52', '#f78185', '#ffb4b8'),
'purples_old': ('#8172b2', '#b4a5e5', '#37d8ff'),
# New color scheme, dark colors match matplotlib v2
'blues': ('#1f77b4', '#52aae7', '#85ddff'),
'oranges': ('#ff7f0e', '#ffb241', '#ffe574'),
'greens': ('#2ca02c', '#5fd35f', '#92ff92'),
'reds': ('#d62728', '#ff5a5b', '#ff8d8e'),
'purples': ('#9467bd', '#c79af0', '#facdff'),
'browns': ('#8c564b', '#bf897e', '#f2bcb1'),
'pinks': ('#e377c2', '#ffaaf5', '#ffddff'),
'grays': ('#7f7f7f', '#b2b2b2', '#e5e5e5'),
'yellows': ('#bcbd22', '#eff055', '#ffff88'),
'cyans': ('#17becf', '#4af1ff', '#7dffff'),
}
defaultColorsOrder = ['blues', 'oranges', 'greens', 'reds', 'purples',
'browns', 'pinks', 'grays', 'yellows', 'cyans']
priorColor = '#333333'
# Angle of tick labels
tickAngle = 45
# Dictionary of size types or whatever:
mplPPI = plt.rcParams['figure.dpi'] # Matplotlib dots per inch
figSizeDict = {'APJ_column': 245.26653 / mplPPI,
'APJ_page': 513.11743 / mplPPI,
'MNRAS_column': 240. / mplPPI,
'MNRAS_page': 504. / mplPPI,
'AandA_column': 256.0748 / mplPPI,
'AandA_page': 523.5307 / mplPPI}
# Check the validity of the chains argument:
# Numpy really doesn't like lists of Pandas DataFrame objects
# So if it gets one, extract array vals and throw away the rest
dfColNames = None
try: # Not a list of DFs, but might be a single DF
try:
# Check if single numpy 2d chain
if chains.ndim == 2:
chains = [chains]
except Exception:
pass
# Read in column names from Pandas DataFrame if exists
# Also convert DataFrame to simple numpy array to avoid later conflicts
if hasattr(chains[0], 'columns'):
# Set default param names from DataFrame column names
dfColNames = list(chains[0].columns.values)
chains = [df.values for df in chains]
except ValueError: # Probably a list of pandas DFs
if hasattr(chains[0], 'columns') and hasattr(chains[0], 'values'):
dfColNames = list(chains[0].columns.values)
chains = [df.values for df in chains]
# Get number of chains
nChains = len(chains)
assert nChains <= len(defaultColorsOrder), \
"currently only supports up to "+str(len(defaultColorsOrder))+" chains"
# Check that each chain looks reasonable (2d shape)
for i in range(nChains):
assert len(chains[i].shape) == 2, \
"unexpected shape of chain %d" % (chains[i])
# Number of dimensions (parameters), check all chains have same nDim
nDim = len(chains[0][0, :])
for i in range(nChains):
nDimi = len(chains[i][0, :])
assert nDimi == nDim, \
"chain %d has unexpected number of dimensions %d" % (i, nDimi)
# Labels for multiple chains, goes in plot legend
chainLabels = kwargs.pop('chainLabels', None)
if chainLabels is not None:
# Convert to list if only one label
if __isstr(chainLabels):
chainLabels = [chainLabels]
# Check that number of labels equals number of chains
assert len(chainLabels) == nChains, \
"chainLabels mismatch with number of chains"
# Check that it's a list of strings
assert all(__isstr(s) for s in chainLabels), \
"chainLabels must be list of strings"
# Label the x and y axes, supports latex
paramNames = kwargs.pop('paramNames', None)
if paramNames is not None:
# Convert to list if only one name
if __isstr(paramNames):
paramNames = [paramNames]
# Check that number of paramNames equals nDim
assert len(paramNames) == nDim, \
"paramNames mismatch with number of dimensions"
# Check that it's a list of strings
assert all(__isstr(s) for s in paramNames), \
"paramNames must be list of strings"
elif dfColNames is not None:
paramNames = dfColNames
# Custom parameter range
paramRanges = kwargs.pop('paramRanges', None)
if paramRanges is not None:
assert len(paramRanges) == nDim, \
"paramRanges must match number of parameters"
# Rotated tick labels
labelRotation = kwargs.pop('labelRotation', (True, True))
# Shifted tick labels, Default is nudge by 0.1 * tick spacing
shiftX, shiftY = kwargs.pop('tickShifts', (0.1, 0.1))
# If the rotation is turned off, then don't shift the labels
if not labelRotation[0]:
shiftX = 0
if not labelRotation[1]:
shiftY = 0
# User-defined color ordering
colorsOrder = kwargs.pop('colorsOrder', defaultColorsOrder)
# Convert to list if only one entry
if __isstr(colorsOrder):
colorsOrder = [colorsOrder]
if not all(color in colorsDict.keys() for color in colorsOrder):
raise ValueError("Bad color name in colorsOrder=%s, pick from %s" %
(colorsOrder, colorsDict.keys()))
colors = [colorsDict[cs] for cs in colorsOrder]
# Highlight a point (or several) in parameter space by lines
truthColors = kwargs.pop('truthColors', truthsDefaultColors)
truthLineStyles = kwargs.pop('truthLineStyles', truthsDefaultLS)
truths = kwargs.pop('truths', None)
if truths is not None:
# Convert to list if needed
if len(np.shape(truths)) == 1:
truths = [truths]
truths = np.array(truths)
assert np.shape(truths)[0] <= len(truthColors), \
("More truths than available colors." +
"Set colors with truthColors = [colors...]")
assert np.shape(truths)[0] <= len(truthLineStyles), \
("More truths than available line styles." +
"Set line styles with truthLineStyles = [ls...]")
assert np.shape(truths)[1] == nDim, \
"Each list of truths must match number of parameters"
# Labels for the different truth lines
truthLabels = kwargs.pop('truthLabels', None)
if truthLabels is not None:
# Convert to list if only one label
if __isstr(truthLabels):
truthLabels = [truthLabels]
# Check that it's a list of strings
assert all(__isstr(s) for s in truthLabels), \
"truthLabels must be list of strings"
assert len(truthLabels) == len(truths), \
"truthLabels mismatch with number of truths"
# Show Gaussian PDF on 1d plots (to show Gaussian priors)
priors = kwargs.pop('priors', None)
if priors is not None:
if haveScipy:
assert len(priors) == nDim, \
"List of priors must match number of parameters"
for i in range(nDim):
if priors[i]:
assert priors[i][1] > 0, "Prior width must be positive"
else:
warnings.warn("Gaussian priors requires scipy, ignoring priors.",
UserWarning)
priors = None
# Manage the sample point weights
weights = kwargs.pop('weights', None)
if weights is None:
# Set unit weights if no weights are provided
weights = [np.ones(len(chains[i])) for i in range(nChains)]
else:
if len(weights) == len(chains[0]):
weights = [weights]
for i in range(nChains):
assert len(weights[i]) == len(chains[i]), \
("Mismatch in chain/weights #%d: " +
"len(chain) %d, len(weights) %d"
% (i, len(chains[i]), len(weights[i])))
# Set plotName to save the plot to plotName
plotName = kwargs.pop('plotName', None) # Um... the name of the plot?!
if plotName is not None:
assert __isstr(plotName), "plotName must be a string type"
# Which contour levels to show
nContourLevels = kwargs.pop('nContourLevels', 2)
assert nContourLevels in [1, 2, 3], "nContourLevels must be 1, 2, or 3"
# Maintain support for older naming convention.
# TODO: Remove in next major version
deprecated_nContourLevels = kwargs.pop('nConfidenceLevels', False)
if deprecated_nContourLevels:
warnings.warn("nConfidenceLevels has been replaced by nContourLevels",
DeprecationWarning)
nContourLevels = deprecated_nContourLevels
assert nContourLevels in [1, 2, 3], "nContourLevels must be 1, 2, or 3"
# 2d contour levels: (68%, 95%, 99%) or sigma (39%, 86%, 99%)
confLevels = (.3173, .0455, .0027)
sigmaContourLevels = kwargs.pop('sigmaContourLevels', False)
if sigmaContourLevels:
confLevels = (.6065, .1353, .0111)
# Maintain support for older naming convention.
# TODO: Remove in next major version
deprecated_ConfLevels = kwargs.pop('gaussianConfLevels', False)
if deprecated_ConfLevels:
warnings.warn("gaussianConfLevels replaced by sigmaContourLevels",
DeprecationWarning)
confLevels = (.6065, .1353, .0111)
deprecated_ConfLevels = kwargs.pop('GaussianConfLevels', False)
if deprecated_ConfLevels:
warnings.warn("GaussianConfLevels replaced by sigmaContourLevels",
DeprecationWarning)
confLevels = (.6065, .1353, .0111)
# Data binning and smoothing
nBins = kwargs.pop('nBins', 30)
smoothingKernel = kwargs.pop('smoothingKernel', 1)
if (smoothingKernel != 0) and (not haveScipy):
warnings.warn("Scipy not installed. Curves will not be smoothed.",
UserWarning)
smoothingKernel = 0
if smoothingKernel >= nBins/10:
warnings.warn("Wow, that's a huge smoothing kernel! You sure you want"
"its scale to be %.1f percent of the plot?!"
% (100.*float(smoothingKernel)/float(nBins)),
UserWarning)
# Filled contours and histograms
filledPlots = kwargs.pop('filledPlots', True)
# Filled contours and histograms
plotDensity = kwargs.pop('plotDensity', False)
# Figure size: choose size to fit journal, use reasonable default, or
# provide your own
figureSize = kwargs.pop('figureSize', None)
if figureSize is None:
# If no figure size is given, use resolution of 70 pixel per panel
figureWidth = nDim*70. / mplPPI
else:
# User-defined width=height in inches
if not __isstr(figureSize):
figureWidth = figureSize
else:
# Choose from a couple of presets to fit your publication
if figureSize in figSizeDict.keys():
figureWidth = figSizeDict[figureSize]
else:
raise ValueError("figureSize %s unknown" % figureSize)
# Space between panels
panelSpacing = kwargs.pop('panelSpacing', 'tight')
# Marker lines in legend
showLegendMarker = False
legendMarker = kwargs.pop('legendMarker', 'Auto')
assert legendMarker in ('All', 'None', 'Auto'), \
"legendMarker must be one of 'All', 'None', 'Auto'"
if legendMarker == 'Auto':
if truthLabels is not None:
if len(truthLabels) > 1:
showLegendMarker = True
elif legendMarker == 'All':
showLegendMarker = True
# Plot 1d histograms
do1dPlots = kwargs.pop('do1dPlots', True)
# Plot ONLY 1d histograms
doOnly1dPlot = kwargs.pop('doOnly1dPlot', False)
if doOnly1dPlot:
for i in range(nChains):
assert chains[i].shape[1] == 1, \
"For 1d histogram, Provide chains of shape(Npoints,1)"
do1dPlots = True
# Set font in rcParams (just in the running kernel)
mathtextTypes = ['cm', 'stix', 'custom', 'stixsans', None]
mathTextFontSet = kwargs.pop('mathTextFontSet', 'stixsans')
assert mathTextFontSet in mathtextTypes, \
"mathTextFont options: 'cm', 'stix', 'custom', 'stixsans', None."
oldMathTextFontSet = plt.rcParams['mathtext.fontset']
if mathTextFontSet is not None:
plt.rcParams['mathtext.fontset'] = mathTextFontSet
holdRC = kwargs.pop('holdRC', False)
assert holdRC in [True, False], "holdRC must be True or False."
# Grab the custom fontdicts
# Default size is 9 for all labels.
defaultFontFamily = 'Arial'
defaultLabelFontSize = 9
defaultTickFontSize = 6
customLabelFont = kwargs.pop('customLabelFont', {})
if 'size' not in customLabelFont.keys():
customLabelFont['size'] = defaultLabelFontSize
if 'family' not in customLabelFont.keys():
customLabelFont['family'] = defaultFontFamily
customLegendFont = kwargs.pop('customLegendFont', {})
if 'size' not in customLegendFont.keys():
customLegendFont['size'] = defaultLabelFontSize
if 'family' not in customLegendFont.keys():
customLegendFont['family'] = defaultFontFamily
customTickFont = kwargs.pop('customTickFont', {})
if 'size' not in customTickFont.keys():
customTickFont['size'] = defaultTickFontSize
if 'family' not in customTickFont.keys():
customTickFont['family'] = defaultFontFamily
# Ticks require a FontProperties instead of a font dict
tickFontProps = mpl.font_manager.FontProperties(**customTickFont)
# Check to see if there are any remaining keyword arguments
keys = ''
for key in iter(kwargs.keys()):
keys = keys + key + ' '
raise NameError("illegal keyword arguments: " + keys)
# Define colormap
myColorMap = setCustomColorMaps(colors)
# Matplotlib and figure settings
axisColor = '#333333'
# Create the figure, and empty list for first column / last row
fig = plt.figure(figsize=(figureWidth, figureWidth))
axV, axH = [], []
# Minimum and maximum sample for each dimension
samplesMin = np.nanmin(np.array([np.nanmin(chains[k], axis=0)
for k in range(nChains)]), axis=0)
samplesMax = np.nanmax(np.array([np.nanmax(chains[k], axis=0)
for k in range(nChains)]), axis=0)
# Left and right panel boundaries
# Use data limits and override if user-defined
panelAxRange = np.vstack((samplesMin, samplesMax)).T
for i in range(nDim):
if paramRanges is not None:
if paramRanges[i]:
panelAxRange[i] = paramRanges[i]
xTicks, yTicks = nDim*[None], nDim*[None]
# 2D contour plots
if not doOnly1dPlot:
for i in range(nDim): # row
for j in range(nDim): # column
if j < i:
# Create subplot
if do1dPlots:
ax = fig.add_subplot(nDim, nDim, (i*nDim)+j+1)
else:
ax = fig.add_subplot(nDim-1, nDim-1,
((i-1)*(nDim-1))+j+1)
# Draw contours and truths
# Extract 2d chains
chainsForPlot2D = [[chains[k][:, j], chains[k][:, i]]
for k in range(nChains)]
# Extract 2d truths
truthsForPlot2D = None
if truths is not None:
truthsForPlot2D = [[truths[k, i], truths[k, j]]
for k in range(len(truths))]
# Plot!
ax = __plot2d(ax, nChains, chainsForPlot2D, weights, nBins,
smoothingKernel, filledPlots, colors,
nContourLevels, confLevels, truthsForPlot2D,
truthColors, truthLineStyles, plotDensity,
myColorMap)
# Range
ax.set_xlim(panelAxRange[j][0], panelAxRange[j][1])
ax.set_ylim(panelAxRange[i][0], panelAxRange[i][1])
# Tick labels without offset and scientific notation
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_xaxis().get_major_formatter().set_scientific(False)
ax.get_yaxis().get_major_formatter().set_useOffset(False)
ax.get_yaxis().get_major_formatter().set_scientific(False)
# x-labels at bottom of plot only
if i == nDim-1:
if paramNames is not None:
ax.set_xlabel(paramNames[j],
fontdict=customLabelFont)
else:
ax.get_xaxis().set_ticklabels([])
# y-labels for left-most panels only
if j == 0:
if paramNames is not None:
ax.set_ylabel(paramNames[i],
fontdict=customLabelFont)
else:
ax.get_yaxis().set_ticklabels([])
# Panel layout
ax.grid(False)
try:
# This is the matplotlib 2.0 way of doing things
ax.set_facecolor('w')
except AttributeError:
# Fallback to matplotlib 1.5
ax.set_axis_bgcolor('w')
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_color(axisColor)
ax.spines[axis].set_linewidth(1)
# Global tick properties
ax.tick_params(direction='in', top=True, right=True, pad=4,
colors=axisColor, size=4, width=.5,
labelsize=6)
# get x limits
deltaX = panelAxRange[j, 1]-panelAxRange[j, 0]
# Ticks x axis
if xTicks[j] is None:
# 5 ticks max
ax.xaxis.set_major_locator(mtik.MaxNLocator(5))
# Remove xticks that are too close to panel edge
LoHi = (panelAxRange[j, 0]+.05*deltaX,
panelAxRange[j, 1]-.05*deltaX)
tickLocs = ax.xaxis.get_ticklocs()
idx = np.where((tickLocs > LoHi[0]) &
(tickLocs < LoHi[1]))[0]
xTicks[j] = tickLocs[idx]
ax.xaxis.set_ticks(xTicks[j])
# get y limits
deltaY = panelAxRange[i, 1]-panelAxRange[i, 0]
# Ticks y axis
if yTicks[i] is None:
# 5 ticks max
ax.yaxis.set_major_locator(mtik.MaxNLocator(5))
# Remove xticks that are too close to panel edge
LoHi = (panelAxRange[i, 0]+.05*deltaY,
panelAxRange[i, 1]-.05*deltaY)
tickLocs = ax.yaxis.get_ticklocs()
idx = np.where((tickLocs > LoHi[0]) &
(tickLocs < LoHi[1]))[0]
yTicks[i] = tickLocs[idx]
ax.yaxis.set_ticks(yTicks[i])
# Calculate the position for shifting the x-axis tick
# labels Bump all the labels over just a tiny bit so it
# looks good! Default is 0.1 * tick spacing
# Get the number of ticks to convert
# to coordinates of fraction of tick separation
numTicksX = len(xTicks[j])-1
# Transform the shift to data coords
shiftXdata = 1.0*shiftX*deltaX/numTicksX
# Rotate tick labels
for xLabel in ax.get_xticklabels():
if labelRotation[0]:
xLabel.set_rotation(tickAngle)
xLabel.set_horizontalalignment('right')
# Add a custom attribute to the tick label object
xLabel.custom_shift = shiftXdata
# Now monkey patch the label's set_x method to force it
# to shift the x labels when it gets called during
# render
def _mpx(self, x):
return mpl.text.Text.set_x(self,
x+self.custom_shift)
# Python 3 changes how this gets called
if PYVER >= 3:
xLabel.set_x = types.MethodType(_mpx, xLabel)
else:
xLabel.set_x = types.MethodType(_mpx, xLabel,
mpl.text.Text)
# Update the font if needed
xLabel.set_fontproperties(tickFontProps)
# Calculate the position for shifting the y-axis tick
# labels Bump all the labels over just a tiny bit so it
# looks good! Default is 0.1 * tick spacing
# Get the number of ticks to convert
# to coordinates of fraction of tick separation
numTicksY = len(yTicks[i])-1
shiftYdata = 1.0*shiftY*deltaY/numTicksY
for yLabel in ax.get_yticklabels():
if labelRotation[1]:
yLabel.set_rotation(tickAngle)
yLabel.set_verticalalignment('top')
# Add a custom attribute to the tick label object
yLabel.custom_shift = shiftYdata
# Now monkey patch the label's set_x method to force it
# to shift the x labels when it gets called during
# render
def _mpy(self, y):
return mpl.text.Text.set_y(self,
y+self.custom_shift)
if PYVER >= 3:
yLabel.set_y = types.MethodType(_mpy, yLabel)
else:
yLabel.set_y = types.MethodType(_mpy, yLabel,
mpl.text.Text)
# Update the font if needed
yLabel.set_fontproperties(tickFontProps)
# First column and last row are needed to align labels
if j == 0:
axV.append(ax)
if i == nDim-1:
axH.append(ax)
if do1dPlots:
# 1D histograms
for i in range(nDim):
# Create subplot
ax = fig.add_subplot(nDim, nDim, (i*nDim)+i+1)
# Plot histograms, truths, Gaussians
# Extract 1d chains
chainsForPlot1D = [chains[k][:, i] for k in range(nChains)]
# Extract 1d truths
truthsForPlot1D = None
if truths is not None:
truthsForPlot1D = [truths[k, i] for k in range(len(truths))]
# Extract 1d prior
prior1d = None
if priors is not None:
if priors[i] and priors[i][1] > 0:
prior1d = priors[i]
# Plot!
ax = __plot1d(ax, nChains, chainsForPlot1D, weights, nBins,
smoothingKernel, filledPlots, colors,
truthsForPlot1D, truthColors, truthLineStyles,
prior1d, priorColor)
# Panel layout
ax.grid(False)
try:
# This is the matplotlib 2.0 way of doing things
ax.set_facecolor('w')
except AttributeError:
# Fallback to matplotlib 1.5
ax.set_axis_bgcolor('w')
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_color(axisColor)
ax.spines[axis].set_linewidth(1)
# Global tick properties
ax.tick_params(direction='in', top=True, right=True, pad=4,
colors=axisColor, size=4, width=.5, labelsize=6)
# Tick labels without offset and scientific notation
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_xaxis().get_major_formatter().set_scientific(False)
# No ticks or labels on y-axes, lower limit 0
ax.yaxis.set_ticks([])
ax.set_ylim(bottom=0)
ax.xaxis.set_ticks_position('bottom')
# x-label for bottom-right panel only and a scaling hack
if i == nDim-1:
if paramNames is not None:
ax.set_xlabel(paramNames[i], fontdict=customLabelFont)
# Hack to get scaling to work for final 1D plot under MPL < 2.0
if (MPLVER < version.parse('2.0')) and (smoothingKernel == 0):
max_y = 0
# Loop through the children, find the polygons
# and extract the maximum y-value
for child in ax.get_children():
if type(child) == plt.Polygon:
child_max_y = child.get_xy()[:, 1].max()
if child_max_y > max_y:
max_y = child_max_y
# Set upper limit to be 5% above maximum y-value
ax.set_ylim(0, max_y*1.05)
else:
ax.get_xaxis().set_ticklabels([])
# Set x range
ax.set_xlim(panelAxRange[i])
# Calculate limits and tick spacing
deltaX = panelAxRange[i, 1]-panelAxRange[i, 0]
# Ticks x axis
if i == nDim-1:
# 5 ticks max
ax.xaxis.set_major_locator(mtik.MaxNLocator(5))
# Remove xticks that are too close to panel edge
LoHi = (panelAxRange[i, 0]+.05*deltaX,
panelAxRange[i, 1]-.05*deltaX)
tickLocs = ax.xaxis.get_ticklocs()
idx = np.where((tickLocs > LoHi[0]) & (tickLocs < LoHi[1]))[0]
xTicks[i] = tickLocs[idx]
ax.xaxis.set_ticks(xTicks[i])
# Calculate the position for shifting the x-axis tick labels
# Bump all the labels over just a tiny bit so
# it looks good! Default is 0.1 * tick spacing
# Get the number of ticks to convert
# to coordinates of fraction of tick separation
numTicksX = len(xTicks[i])-1
shiftXdata = 1.0*shiftX*deltaX/numTicksX
# Rotate tick labels
for xLabel in ax.get_xticklabels():
if labelRotation[0]:
xLabel.set_rotation(tickAngle)
xLabel.set_horizontalalignment('right')
# Add a custom attribute to the tick label object
xLabel.custom_shift = shiftXdata
# Now monkey patch the label's set_x method to force it to
# shift the x labels when it gets called during render
def _mpx(self, x):
return mpl.text.Text.set_x(self, x+self.custom_shift)
if PYVER >= 3:
xLabel.set_x = types.MethodType(_mpx, xLabel)
else:
xLabel.set_x = types.MethodType(_mpx, xLabel,
mpl.text.Text)
# Update the font if needed
xLabel.set_fontproperties(tickFontProps)
# First column and last row are needed to align labels
if i == 0:
axV.append(ax)
elif i == nDim-1:
axH.append(ax)
# Align labels if there is more than one panel
if len(axH) > 1:
fig.canvas.draw()
bboxSize = np.empty(len(axH))
try:
# This is the canonical way to get the renderer, which the OSX
# backend started supporting in mpl 2.0. Older versions of the OSX
# backend don't implement the method though, and access the
# renderer obect directly. This should do it right, but fall
# through to the "wrong" way for older versions or backends that
# have yet to implement.
renderer = fig.canvas.get_renderer()
except AttributeError:
# If the get_renderer method doesn't exist, then try accessing the
# renderer directly.
renderer = fig.canvas.renderer
# x labels
# Get label length of the bottom row
for i in range(len(axH)):
bboxTickLabel = (axH[i].xaxis.get_ticklabel_extents(renderer)[0]
.get_points())
bboxSize[i] = bboxTickLabel[1, 1]-bboxTickLabel[0, 1]
panelWidth = (axH[i].get_window_extent()
.transformed(fig.dpi_scale_trans.inverted()).width)
# Apply longest spacing to all panels in last row
longestTickLabel = 3+np.amax(bboxSize)
loc = (longestTickLabel/mplPPI/panelWidth)
for i in range(len(axH)):
axH[i].get_xaxis().set_label_coords(.5, -loc)
# y labels
# Get label length of the left column
for i in range(len(axV)):
bboxTickLabel = (axV[i].yaxis.get_ticklabel_extents(renderer)[0]
.get_points())
bboxSize[i] = bboxTickLabel[1, 0]-bboxTickLabel[0, 0]
panelHeight = (axV[i].get_window_extent()
.transformed(fig.dpi_scale_trans.inverted()).height)
# Apply longest spacing to all panels in first column
longestTickLabel = 2+np.amax(bboxSize)
loc = (longestTickLabel/mplPPI/panelHeight)
for i in range(len(axV)):
axV[i].get_yaxis().set_label_coords(-loc, .5)
# Legend
if (chainLabels is not None) or (truthLabels is not None):
# Dummy plot for label line color
labelColors = []
if not doOnly1dPlot:
ax = fig.add_subplot(nDim, nDim, nDim)
ax.axis('off')
else:
labelPanelRange = ax.get_xlim()
# Label the data sets
if chainLabels is not None:
# Label for each chain
for k in range(nChains):
ax.plot(0, 0, color=colors[k][0], lw=4, label=chainLabels[k])
labelColors.append(colors[k][0])
# Label the truth lines
if truthLabels is not None:
# Label for each truth
for k in range(len(truthLabels)):
ax.plot(0, 0, lw=1, color=truthColors[k], label=truthLabels[k],
ls=truthsDefaultLS[k])
labelColors.append(truthColors[k])
# Set xlim back to what the data wanted
if doOnly1dPlot:
ax.set_xlim(labelPanelRange)
# Legend and label colors according to plot
leg = plt.legend(loc='upper right', fancybox=True, handlelength=3,
prop=customLegendFont)
leg.get_frame().set_alpha(0.)
for color, text in zip(labelColors, leg.get_texts()):
text.set_color(color)
# Remove markers in legend
if showLegendMarker is not True:
for item in leg.legendHandles:
item.set_visible(False)
# Panel spacing, save to file (optional) and return
# Space between panels
space = 0
if panelSpacing == 'loose':
space = .05
fig.subplots_adjust(hspace=space)
fig.subplots_adjust(wspace=space)
# Save figure
if plotName is not None:
fig.savefig(plotName, bbox_inches='tight')
if not holdRC:
plt.rcParams['mathtext.fontset'] = oldMathTextFontSet
return fig
# Create single 1d panel
def __plot1d(ax, nChains, chains1d, weights, nBins, smoothingKernel,
filledPlots, colors, truths1d, truthColors, truthLineStyles,
prior1d, priorColor):
r"""Plot the 1d histogram and optional prior.
Parameters
----------
ax : matplotlib.pyplot.axis
Axis on which to plot the histogram(s)
nChains : int
How many chains are you passing?
chains1d : list-like
A list of `nChains` 1d chains: [chain1, chain2, etc...]
weights : list-like
A list of `nChains` weights.
nBins : int
How many histogram bins?
smoothingKernel : int
Number of bins to smooth over, 0 for no smoothing.
filledPlots : bool
Want the area under the curve filled in?
colors : list-like
List of `nChains` tuples. Each tuple must have at least two colors.
truths1d : list-like
List of truths to overplot on the histogram.
truthColors : list-like
One color for each truth.
truthLineStyles : list-like
One matplotlib linestyle specifier per truth.
prior1d : tuple
Normal distribution paramters (mu, sigma)
priorColor : color
The color to plot the prior.
Note
----
You should really just call this from the plotGTC function unless you have
a strong need to work only with an axis instead of a figure...
"""
# 1D histogram
plotData = []
# With smoothing
if smoothingKernel > 0:
for k in reversed(range(nChains)):
# Is there a chain to plot?
if np.isnan(chains1d[k]).all():
plotData.append(None)
else:
# create 1d histogram
hist1d, edges = np.histogram(chains1d[k], weights=weights[k],
bins=nBins, **density_kw)
# Bin center between histogram edges
centers = (edges[1:]+edges[:-1])/2
# Filter data
plotData.append(scipy.ndimage.gaussian_filter1d(
(centers, hist1d), sigma=smoothingKernel))
if filledPlots:
# Filled smooth histogram
ax.fill_between(plotData[-1][0], plotData[-1][1], 0,
color=colors[k][1])
# Line for hidden histogram
for k in reversed(range(nChains)):
if plotData[nChains-1-k] is not None:
ax.plot(plotData[nChains-1-k][0], plotData[nChains-1-k][1],
lw=1, ls='-', color=colors[k][1])
# No smoothing
else:
if filledPlots:
for k in reversed(range(nChains)):
# Is there a chain to plot?
if not np.isnan(chains1d[k]).all():
# Filled stepfilled histograms
ax.hist(chains1d[k], weights=weights[k], **density_kw,
bins=nBins, histtype='stepfilled',
edgecolor='None', color=colors[k][1])
for k in reversed(range(nChains)):
# Is there a chain to plot?
if not np.isnan(chains1d[k]).all():
# Step curves for hidden histogram(s)
ax.hist(chains1d[k], weights=weights[k], **density_kw,
bins=nBins, histtype='step', color=colors[k][1])
# Truth line
if truths1d is not None:
for k in range(len(truths1d)):
if truths1d[k] is not None:
ax.axvline(truths1d[k], lw=1, color=truthColors[k],
ls=truthLineStyles[k])
# Gaussian prior
if prior1d is not None:
# Plot prior in -4 to +4 sigma range
arr = np.linspace(prior1d[0]-4*prior1d[1], prior1d[0]+4*prior1d[1], 40)
ax.plot(arr, norm.pdf(arr, prior1d[0], prior1d[1]),
lw=1, color=priorColor)
return ax
# Create single 2d panel
def __plot2d(ax, nChains, chains2d, weights, nBins, smoothingKernel,
filledPlots, colors, nContourLevels, confLevels, truths2d,
truthColors, truthLineStyles, plotDensity, myColorMap):
r"""Plot a 2D histogram in a an axis object and return the axis with plot.
Parameters
----------
ax : matplotlib.pyplot.axis
The axis on which to plot the 2D histogram
nChains : int
The number of chains to plot.
chains2d : list-like
A list of pairs of sample points in the form:
[[chain1_x, chain1_y], [chain2_x, chain2_y], ...].
weights : list-like
Weights for the chains2d.
nBins : int
Number of bins (per side) for the 2d histogram.
smoothingKernel : int
Size of the Gaussian smoothing kernel in bins.
Set to 0 for no smoothing.
filledPlots : bool
Just contours, or filled contours?
colors : list-like
List of `nChains` tuples. Each tuple must have at least nContourLevels
colors.
nContourLevels : int {1,2,3}
How many contour levels?
confLevels : list-like
List of at least `nContourLevels` values for contour levels.
truths2d : list-like
A list of nChains tuples of the form: [(truth1_x, truth1_y), etc...].
truthColors : list-like
A list of colors for the truths.
truthLineStyles : list-like
A list of matplotlib linestyle descriptors, one for each truth.
plotDensity : bool
Whether to show points density in addition to contours.
myColorMap : list-like
A list of `nChains` matplotlib colormap specifiers,
or actual colormaps.
Note
----
You should really just call this from the plotGTC function unless you have
a strong need to work only with an axis instead of a figure...
"""
# Empty arrays needed below
chainLevels = np.ones((nChains, nContourLevels+1))
extents = np.empty((nChains, 4))
# These are needed to compute the contour levels
nBinsFlat = np.linspace(0., nBins**2, nBins**2)
# The filled contour plots
plotData = []
# Draw filled contours in reversed order to have first chain in list on top
for k in reversed(range(nChains)):
# Is there a chain to plot?
if (np.isnan(chains2d[k][0]).all()) | (np.isnan(chains2d[k][1]).all()):
plotData.append(None)
else:
# Create 2d histogram
hist2d, xedges, yedges = np.histogram2d(
chains2d[k][0], chains2d[k][1], weights=weights[k], bins=nBins)
# image extent, needed below for contour lines
extents[k] = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
# Normalize
hist2d = hist2d/np.sum(hist2d)
# Cumulative 1d distribution
histOrdered = np.sort(hist2d.flat)
histCumulative = np.cumsum(histOrdered)
# Compute contour levels (from low to high for technical reasons)
for l in range(nContourLevels):
# Find location of contour level in 1d histCumulative
temp = np.interp(confLevels[l], histCumulative, nBinsFlat)
# Find "height" of contour level
chainLevels[k][nContourLevels-1-l] = np.interp(temp, nBinsFlat,
histOrdered)
# Apply Gaussian smoothing and plot filled contours if requested
if smoothingKernel > 0:
plotData.append(scipy.ndimage
.gaussian_filter(
hist2d.T, sigma=smoothingKernel))
else:
plotData.append(hist2d.T)
if filledPlots:
xbins = (xedges[1:]+xedges[:-1])/2
ybins = (yedges[1:]+yedges[:-1])/2
ax.contourf(xbins, ybins, plotData[-1], levels=chainLevels[k],
colors=colors[k][:nContourLevels][::-1])
# Plot density
if plotDensity:
if filledPlots:
ax.imshow(hist2d.T, extent=extents[k], origin='lower',
cmap=myColorMap[k], aspect='auto',
clim=(0, chainLevels[k][0]))
else:
ax.imshow(hist2d.T, extent=extents[k], origin='lower',
cmap=myColorMap[k], aspect='auto')
# Draw contour lines in order to see contours lying on top of each other
for k in range(nChains):
if plotData[nChains-1-k] is not None:
for l in range(nContourLevels):
ax.contour(plotData[nChains-1-k],
[chainLevels[k][nContourLevels-1-l]],
extent=extents[k], origin='lower',
linewidths=1, colors=colors[k][l])
# Truth lines
if truths2d is not None:
for k in range(len(truths2d)):
# horizontal line
if truths2d[k][0] is not None:
ax.axhline(truths2d[k][0], lw=1, color=truthColors[k],
ls=truthLineStyles[k])
# vertical line
if truths2d[k][1] is not None:
ax.axvline(truths2d[k][1], lw=1, color=truthColors[k],
ls=truthLineStyles[k])
return ax
# Custom colormap for density plots
def CustomCmap(to_rgb):
# from color r,g,b
r1, g1, b1 = 1, 1, 1
# to color r,g,b
r2, g2, b2 = mplcolors.hex2color(to_rgb)
cdict = {'red': ((0, r1, r1), (1, r2, r2)),
'green': ((0, g1, g1), (1, g2, g2)),
'blue': ((0, b1, b1), (1, b2, b2))}
cmap = LinearSegmentedColormap('custom_cmap', cdict)
return cmap
def setCustomColorMaps(colors):
customColorMaps = [CustomCmap(color[0]) for color in colors]
return customColorMaps
# Check for basestring in python 2/3 compatible way
def __isstr(s):
try:
isinstance("", basestring)
return isinstance(s, basestring)
except NameError:
return isinstance(s, str)
| SebastianBocquet/pygtc | pygtc/pygtc.py | Python | mit | 53,321 | [
"Gaussian"
] | 89754d0b0884ddf55c6ea8a241f8766170fcd60e2cc64b8a13970a0c057282d5 |
#
# CoCoEachBlockUnique.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from pynestml.cocos.co_co import CoCo
from pynestml.meta_model.ast_neuron import ASTNeuron
from pynestml.utils.ast_helper import ASTHelper
from pynestml.utils.logger import Logger, LoggingLevel
from pynestml.utils.messages import Messages
from pynestml.visitors.ast_visitor import ASTVisitor
class CoCoEachBlockUniqueAndDefined(CoCo):
"""
This context condition ensures that each block is defined at most once.
Not allowed:
state:
...
end
...
state:
...
end
"""
name = 'each block unique and defined'
description = """This context condition ensures that each block is defined at most once.\n
Not allowed:\n
state:\n
...\n
end\n
...\n
state:\n
...\n
end\n
"""
def check_co_co(self, node):
"""
Checks whether each block is define at most once.
:param node: a single neuron.
:type node: ASTNeuron
"""
assert (node is not None and isinstance(node, ASTNeuron)), \
'(PyNestML.CoCo.BlocksUniques) No or wrong type of neuron provided (%s)!' % type(node)
visitor = BlockCounterChecker()
node.accept(visitor)
if visitor.report['state'] > 1:
code, message = Messages.get_block_not_defined_correctly('State', False)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
# check that update block is defined exactly once
if visitor.report['update'] > 1:
code, message = Messages.get_block_not_defined_correctly('Update', False)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
if visitor.report['update'] == 0:
code, message = Messages.get_block_not_defined_correctly('Update', True)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
# check that parameters block is defined at most once
if visitor.report['parameters'] > 1:
code, message = Messages.get_block_not_defined_correctly('Parameters', False)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
# check that internals block is defined at most once
if visitor.report['internals'] > 1:
code, message = Messages.get_block_not_defined_correctly('Internals', False)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
# check that equations block is defined at most once
if visitor.report['equations'] > 1:
code, message = Messages.get_block_not_defined_correctly('Equations', False)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
# check that input block is defined exactly once
if visitor.report['input'] > 1:
code, message = Messages.get_block_not_defined_correctly('Input', False)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
if visitor.report['input'] == 0:
code, message = Messages.get_block_not_defined_correctly('Input', True)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
# check that output block is defined exactly once
if visitor.report['output'] > 1:
code, message = Messages.get_block_not_defined_correctly('Output', False)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
if visitor.report['output'] == 0:
code, message = Messages.get_block_not_defined_correctly('Output', True)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
# check the initial values block
if visitor.report['init_values'] > 1:
code, message = Messages.get_block_not_defined_correctly('Initial Values', False)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
# check the constraints block
if visitor.report['constraints'] > 1:
code, message = Messages.get_block_not_defined_correctly('Constraints', False)
Logger.log_message(code=code, message=message, neuron=node, error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
return
class BlockCounterChecker(ASTVisitor):
def __init__(self):
super(BlockCounterChecker, self).__init__()
self.report = {
'state': 0, 'update': 0, 'input': 0, 'output': 0, 'constraints': 0,
'parameters': 0, 'internals': 0, 'equations': 0, 'init_values': 0
}
def visit_block_with_variables(self, node):
if node.is_initial_values:
self.report['init_values'] += 1
if node.is_internals:
self.report['internals'] += 1
if node.is_parameters:
self.report['parameters'] += 1
if node.is_state:
self.report['state'] += 1
def visit_equations_block(self, node):
self.report['equations'] += 1
def visit_update_block(self, node):
self.report['update'] += 1
def visit_input_block(self, node):
self.report['input'] += 1
def visit_output_block(self, node):
self.report['output'] += 1
def visit_constraints_block(self, node):
self.report['constraints'] += 1
| kperun/nestml | pynestml/cocos/co_co_each_block_unique_and_defined.py | Python | gpl-2.0 | 7,234 | [
"NEURON"
] | 3887fa14180b970a814f0644e560cb55b7c992d3c2aacc28d8a3f0474cdc8762 |
#!/usr/bin/env python3
import curses
import logging
import os
import re
import time
from socket import socket, socket_collection
class screen:
def __init__(self, stdscr, sockets, config):
self.scr = stdscr
self.sockets = sockets
self.config = config
def refresh(self):
self.scr.clear()
i = 1
for sock in self.sockets.all.values():
is_gone, is_deleted = self.sockets.is_gone(sock, self.config)
if is_deleted: continue
self.draw_socket(0, i, sock,
self.sockets.is_new(sock, self.config), is_gone)
i += 1
self.scr.refresh()
def draw_socket(self, x, y, sock, is_new, is_gone):
local_addr = sock.local_addr
for name, addr in devices.items():
if local_addr[:len(addr)] == addr:
local_addr = name + local_addr[len(addr):]
break
state = sock.state
if state == 'ESTABLISHED': state = 'EST'
elif state == 'TIME_WAIT': state = 'TIW'
elif state == 'FIN_WAIT1': state = 'FW1'
elif state == 'FIN_WAIT2': state = 'FW2'
elif state == 'CLOSE_WAIT': state = 'CLW'
elif state == 'LISTEN': state = 'LSN'
elif state == 'SYN_SENT': state = 'SYS'
elif state == 'SYN_RECV': state = 'SYR'
elif state == 'LAST_ACK': state = 'ACK'
elif state == 'CLOSING': state = 'CLO'
elif state == 'UNKNOWN': state = 'UNK'
# "CLOSE"
timer1 = sock.timer1
if timer1 == 'on': timer1 = 'r'
elif timer1 == 'off': timer1 = '.'
elif timer1 == 'keepalive': timer1 = 'k'
elif timer1 == 'timewait': timer1 = 'w'
text = '%21s %10s ' % (
sock.foreign_addr + screen._padding(sock.foreign_addr, 5),
local_addr + screen._padding(local_addr, 5)
)
self.scr.addstr(y, x, text,
screen._attribute(sock.updated, '', is_new, is_gone))
x += len(text)
text = '%5d ' % sock.recvq
self.scr.addstr(y, x, text,
screen._attribute(sock.updated, 'recvq', is_new, is_gone))
x += len(text)
text = '%5d ' % sock.sendq
self.scr.addstr(y, x, text,
screen._attribute(sock.updated, 'sendq', is_new, is_gone))
x += len(text)
text = '%3s ' % state
self.scr.addstr(y, x, text,
screen._attribute(sock.updated, 'state', is_new, is_gone))
x += len(text)
text = '%s ' % timer1
self.scr.addstr(y, x, text,
screen._attribute(sock.updated, 'timer1', is_new, is_gone))
x += len(text)
text = '%5d ' % sock.timer2[0]
self.scr.addstr(y, x, text,
screen._attribute(sock.updated, 'timer2[0]', is_new, is_gone))
x += len(text)
text = '%2d ' % sock.timer2[1]
self.scr.addstr(y, x, text,
screen._attribute(sock.updated, 'timer2[1]', is_new, is_gone))
x += len(text)
text = '%1d ' % sock.timer2[2]
self.scr.addstr(y, x, text,
screen._attribute(sock.updated, 'timer2[2]', is_new, is_gone))
x += len(text)
@staticmethod
def _padding(str, max_width):
idx = str.index(':')
return ' ' * (max_width - (len(str) - idx - 1))
@staticmethod
def _attribute(lst, val, is_new, is_gone):
if is_gone: return curses.color_pair(1) #A_BLINK #A_DIM
elif is_new: return curses.color_pair(3)
elif val in lst: return curses.color_pair(3) #A_BOLD
return curses.A_NORMAL #color_pair(7) # curses.A_NORMAL
def get_network_devices():
with os.popen("/sbin/ifconfig") as out:
devices = {'': '0.0.0.0'}
result = out.read()
pattern = re.compile('^([a-z0-9]+).*\n *inet addr:([0-9.]+)',
re.MULTILINE)
for match in pattern.finditer(result):
devices[match.group(1)] = match.group(2)
return devices
def update():
global i
with os.popen("netstat -ano") as out:
sockets.visit_reset()
while True:
line = out.readline()
if not line:
break
words = line.split()
if words[0] == 'Active' or words[0] == 'Proto' or words[0] == 'unix':
continue
if words[0] == 'tcp6' or words[0] == 'udp' or words[0] == 'udp6':
# TODO: handle these sockets, too
continue
sock = socket(words)
sockets.update(sock)
sockets.visit(sock)
for sock in sockets.unvisited():
sockets.remove(sock)
def handle_input(stdscr):
key = stdscr.getch()
if key == ord('q'):
return True
return False
def main(stdscr):
curses.use_default_colors()
for i in range(0, curses.COLORS):
curses.init_pair(i, i, -1)
curses.curs_set(0)
term = screen(stdscr, sockets, config)
while True:
start = time.time()
update()
term.refresh()
elapsed = time.time() - start
curses.halfdelay(int(10 * (config.UPDATE_INTERVAL - elapsed)))
if handle_input(stdscr): break
logging.basicConfig(filename='debug.log', level=logging.DEBUG)
class Config:
pass
config = Config()
config.HIGHLIGHT_DURATION = 5
config.UPDATE_INTERVAL = 2
sockets = socket_collection()
devices = get_network_devices()
curses.wrapper(main)
| maczniak/dalshabet | dalshabet.py | Python | apache-2.0 | 4,604 | [
"VisIt"
] | 293a8828f117f38afa596bea6d4010f8ea309b92b185719ce496056b53e184eb |
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.camino as camino
import nipype.interfaces.fsl as fsl
import nipype.interfaces.camino2trackvis as cam2trk
import nipype.algorithms.misc as misc
from ...misc.utils import get_affine, get_data_dims, get_vox_dims
def create_camino_dti_pipeline(name="dtiproc"):
"""Creates a pipeline that does the same diffusion processing as in the
:doc:`../../users/examples/dmri_camino_dti` example script. Given a diffusion-weighted image,
b-values, and b-vectors, the workflow will return the tractography
computed from diffusion tensors and from PICo probabilistic tractography.
Example
-------
>>> import os
>>> nipype_camino_dti = create_camino_dti_pipeline("nipype_camino_dti")
>>> nipype_camino_dti.inputs.inputnode.dwi = os.path.abspath('dwi.nii')
>>> nipype_camino_dti.inputs.inputnode.bvecs = os.path.abspath('bvecs')
>>> nipype_camino_dti.inputs.inputnode.bvals = os.path.abspath('bvals')
>>> nipype_camino_dti.run() # doctest: +SKIP
Inputs::
inputnode.dwi
inputnode.bvecs
inputnode.bvals
Outputs::
outputnode.fa
outputnode.trace
outputnode.tracts_pico
outputnode.tracts_dt
outputnode.tensors
"""
inputnode1 = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode1")
"""
Setup for Diffusion Tensor Computation
--------------------------------------
In this section we create the nodes necessary for diffusion analysis.
First, the diffusion image is converted to voxel order.
"""
image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel")
fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme")
fsl2scheme.inputs.usegradmod = True
"""
Second, diffusion tensors are fit to the voxel-order data.
"""
dtifit = pe.Node(interface=camino.DTIFit(),name='dtifit')
"""
Next, a lookup table is generated from the schemefile and the
signal-to-noise ratio (SNR) of the unweighted (q=0) data.
"""
dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen")
dtlutgen.inputs.snr = 16.0
dtlutgen.inputs.inversion = 1
"""
In this tutorial we implement probabilistic tractography using the PICo algorithm.
PICo tractography requires an estimate of the fibre direction and a model of its
uncertainty in each voxel; this is produced using the following node.
"""
picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs")
picopdfs.inputs.inputmodel = 'dt'
"""
An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography.
"""
bet = pe.Node(interface=fsl.BET(), name="bet")
bet.inputs.mask = True
"""
Finally, tractography is performed.
First DT streamline tractography.
"""
trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt")
"""
Now camino's Probablistic Index of connectivity algorithm.
In this tutorial, we will use only 1 iteration for time-saving purposes.
"""
trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico")
trackpico.inputs.iterations = 1
"""
Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse.
"""
cam2trk_dt = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt")
cam2trk_dt.inputs.min_length = 30
cam2trk_dt.inputs.voxel_order = 'LAS'
cam2trk_pico = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico")
cam2trk_pico.inputs.min_length = 30
cam2trk_pico.inputs.voxel_order = 'LAS'
"""
Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, using the following two nodes.
"""
#vtkstreamlines = pe.Node(interface=camino.VtkStreamlines(), name="vtkstreamlines")
#procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines")
#procstreamlines.inputs.outputtracts = 'oogl'
"""
We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the fractional anisotropy and diffusivity trace maps and their associated headers.
"""
fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(),name='fa')
#md = pe.Node(interface=camino.MD(),name='md')
trace = pe.Node(interface=camino.ComputeTensorTrace(),name='trace')
dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig')
analyzeheader_fa = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_fa")
analyzeheader_fa.inputs.datatype = "double"
analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace')
#analyzeheader_md = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_md")
#analyzeheader_md.inputs.datatype = "double"
#analyzeheader_trace = analyzeheader_md.clone('analyzeheader_trace')
fa2nii = pe.Node(interface=misc.CreateNifti(),name='fa2nii')
trace2nii = fa2nii.clone("trace2nii")
"""
Since we have now created all our nodes, we can now define our workflow and start making connections.
"""
tractography = pe.Workflow(name='tractography')
tractography.connect([(inputnode1, bet,[("dwi","in_file")])])
"""
File format conversion
"""
tractography.connect([(inputnode1, image2voxel, [("dwi", "in_file")]),
(inputnode1, fsl2scheme, [("bvecs", "bvec_file"),
("bvals", "bval_file")])
])
"""
Tensor fitting
"""
tractography.connect([(image2voxel, dtifit,[['voxel_order','in_file']]),
(fsl2scheme, dtifit,[['scheme','scheme_file']])
])
"""
Workflow for applying DT streamline tractogpahy
"""
tractography.connect([(bet, trackdt,[("mask_file","seed_file")])])
tractography.connect([(dtifit, trackdt,[("tensor_fitted","in_file")])])
"""
Workflow for applying PICo
"""
tractography.connect([(bet, trackpico,[("mask_file","seed_file")])])
tractography.connect([(fsl2scheme, dtlutgen,[("scheme","scheme_file")])])
tractography.connect([(dtlutgen, picopdfs,[("dtLUT","luts")])])
tractography.connect([(dtifit, picopdfs,[("tensor_fitted","in_file")])])
tractography.connect([(picopdfs, trackpico,[("pdfs","in_file")])])
# Mean diffusivity still appears broken
#tractography.connect([(dtifit, md,[("tensor_fitted","in_file")])])
#tractography.connect([(md, analyzeheader_md,[("md","in_file")])])
#tractography.connect([(inputnode, analyzeheader_md,[(('dwi', get_vox_dims), 'voxel_dims'),
#(('dwi', get_data_dims), 'data_dims')])])
#This line is commented out because the ProcStreamlines node keeps throwing memory errors
#tractography.connect([(track, procstreamlines,[("tracked","in_file")])])
"""
Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the
tensor fitting.
This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with the original DWI image from the input node, to the header-generating nodes. This ensures that the files will be correct and readable.
"""
tractography.connect([(dtifit, fa,[("tensor_fitted","in_file")])])
tractography.connect([(fa, analyzeheader_fa,[("fa","in_file")])])
tractography.connect([(inputnode1, analyzeheader_fa,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(fa, fa2nii,[('fa','data_file')])])
tractography.connect([(inputnode1, fa2nii,[(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_fa, fa2nii,[('header', 'header_file')])])
tractography.connect([(dtifit, trace,[("tensor_fitted","in_file")])])
tractography.connect([(trace, analyzeheader_trace,[("trace","in_file")])])
tractography.connect([(inputnode1, analyzeheader_trace,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(trace, trace2nii,[('trace','data_file')])])
tractography.connect([(inputnode1, trace2nii,[(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_trace, trace2nii,[('header', 'header_file')])])
tractography.connect([(dtifit, dteig,[("tensor_fitted","in_file")])])
tractography.connect([(trackpico, cam2trk_pico, [('tracked','in_file')])])
tractography.connect([(trackdt, cam2trk_dt, [('tracked','in_file')])])
tractography.connect([(inputnode1, cam2trk_pico,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(inputnode1, cam2trk_dt,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
inputnode= pe.Node(interface = util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode")
outputnode = pe.Node(interface = util.IdentityInterface(fields=["fa",
"trace",
"tracts_pico",
"tracts_dt",
"tensors"]),
name="outputnode")
workflow = pe.Workflow(name=name)
workflow.base_output_dir=name
workflow.connect([(inputnode, tractography, [("dwi", "inputnode1.dwi"),
("bvals", "inputnode1.bvals"),
("bvecs", "inputnode1.bvecs")])])
workflow.connect([(tractography, outputnode, [("cam2trk_dt.trackvis", "tracts_dt"),
("cam2trk_pico.trackvis", "tracts_pico"),
("fa2nii.nifti_file", "fa"),
("trace2nii.nifti_file", "trace"),
("dtifit.tensor_fitted", "tensors")])
])
return workflow
| mick-d/nipype_source | nipype/workflows/dmri/camino/diffusion.py | Python | bsd-3-clause | 10,475 | [
"ParaView",
"VTK"
] | 8564bdc92886b72b1a2fa9cb0d560d5d7a3fdf5ea4966118aa07c75138cb0d8f |
# Natural Language Toolkit: Conditional Random Fields
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
# $Id: hmm.py 5994 2008-06-02 12:07:07Z stevenbird $
"""
An interface to U{Mallet <http://mallet.cs.umass.edu/>}'s Linear Chain
Conditional Random Field (LC-CRF) implementation.
A user-supplied I{feature detector function} is used to convert each
token to a featureset. Each feature/value pair is then encoded as a
single binary feature for Mallet.
"""
from tempfile import *
import textwrap
import re
import time
import subprocess
import sys
import zipfile
import pickle
from nltk.classify.maxent import *
from nltk.classify.mallet import call_mallet
from nltk.etree import ElementTree
from api import *
class MalletCRF(FeaturesetTaggerI):
"""
A conditional random field tagger, which is trained and run by
making external calls to Mallet. Tokens are converted to
featuresets using a feature detector function::
feature_detector(tokens, index) -> featureset
These featuresets are then encoded into feature vectors by
converting each feature (name, value) pair to a unique binary
feature.
Ecah C{MalletCRF} object is backed by a X{crf model file}. This
model file is actually a zip file, and it contains one file for
the serialized model (C{crf-model.ser}) and one file for
information about the structure of the CRF (C{crf-info.xml}).
"""
def __init__(self, filename, feature_detector=None):
"""
Create a new C{MalletCRF}.
@param filename: The filename of the model file that backs
this CRF.
@param feature_detector: The feature detector function that is
used to convert tokens to featuresets. This parameter
only needs to be given if the model file does not contain
a pickled pointer to the feature detector (e.g., if the
feature detector was a lambda function).
"""
# Read the CRFInfo from the model file.
zf = zipfile.ZipFile(filename)
crf_info = CRFInfo.fromstring(zf.read('crf-info.xml'))
zf.close()
self.crf_info = crf_info
"""A L{CRFInfo} object describing this CRF."""
# Ensure that our crf_info object has a feature detector.
if crf_info.feature_detector is not None:
if (feature_detector is not None and
self.crf_info.feature_detector != feature_detector):
raise ValueError('Feature detector mismatch: %r vs %r' %
(feature_detector, self.crf_info.feature_detector))
elif feature_detector is None:
raise ValueError('Feature detector not found; supply it manually.')
elif feature_detector.__name__ != crf_info.feature_detector_name:
raise ValueError('Feature detector name mismatch: %r vs %r' %
(feature_detector.__name__,
crf_info.feature_detector_name))
else:
self.crf_info.feature_detector = feature_detector
#/////////////////////////////////////////////////////////////////
# Convenience accessors (info also available via self.crf_info)
#/////////////////////////////////////////////////////////////////
def _get_filename(self):
return self.crf_info.model_filename
filename = property(_get_filename , doc="""
The filename of the crf model file that backs this
C{MalletCRF}. The crf model file is actually a zip file, and
it contains one file for the serialized model
(C{crf-model.ser}) and one file for information about the
structure of the CRF (C{crf-info.xml}).""")
def _get_feature_detector(self):
return self.crf_info.model_feature_detector
feature_detector = property(_get_feature_detector , doc="""
The feature detector function that is used to convert tokens
to featuresets. This function has the signature::
feature_detector(tokens, index) -> featureset""")
#/////////////////////////////////////////////////////////////////
# Tagging
#/////////////////////////////////////////////////////////////////
#: The name of the java script used to run MalletCRFs.
_RUN_CRF = "org.nltk.mallet.RunCRF"
def batch_tag(self, sentences):
# Write the test corpus to a temporary file
(fd, test_file) = mkstemp('.txt', 'test')
self.write_test_corpus(sentences, os.fdopen(fd, 'w'))
try:
# Run mallet on the test file.
stdout, stderr = call_mallet([self._RUN_CRF,
'--model-file', os.path.abspath(self.crf_info.model_filename),
'--test-file', test_file], stdout='pipe')
# Decode the output
labels = self.parse_mallet_output(stdout)
# strip __start__ and __end__
if self.crf_info.add_start_state and self.crf_info.add_end_state:
labels = [labs[1:-1] for labs in labels]
elif self.crf_info.add_start_state:
labels = [labs[1:] for labs in labels]
elif self.crf_info.add_end_state:
labels = [labs[:-1] for labs in labels]
# Combine the labels and the original sentences.
return [zip(sent, label) for (sent,label) in
zip(sentences, labels)]
finally:
os.remove(test_file)
#/////////////////////////////////////////////////////////////////
# Training
#/////////////////////////////////////////////////////////////////
#: The name of the java script used to train MalletCRFs.
_TRAIN_CRF = "org.nltk.mallet.TrainCRF"
@classmethod
def train(cls, feature_detector, corpus, filename=None,
weight_groups=None, gaussian_variance=1, default_label='O',
transduction_type='VITERBI', max_iterations=500,
add_start_state=True, add_end_state=True, trace=1):
"""
Train a new linear chain CRF tagger based on the given corpus
of training sequences. This tagger will be backed by a I{crf
model file}, containing both a serialized Mallet model and
information about the CRF's structure. This crf model file
will I{not} be automatically deleted -- if you wish to delete
it, you must delete it manually. The filename of the model
file for a MalletCRF C{crf} is available as C{crf.filename}.
@type corpus: C{list} of C{tuple}
@param corpus: Training data, represented as a list of
sentences, where each sentence is a list of (token, tag)
tuples.
@type filename: C{str}
@param filename: The filename that should be used for the crf
model file that backs the new C{MalletCRF}. If no
filename is given, then a new filename will be chosen
automatically.
@type weight_groups: C{list} of L{CRFInfo.WeightGroup}
@param weight_groups: Specifies how input-features should
be mapped to joint-features. See L{CRFInfo.WeightGroup}
for more information.
@type gaussian_variance: C{float}
@param gaussian_variance: The gaussian variance of the prior
that should be used to train the new CRF.
@type default_label: C{str}
@param default_label: The "label for initial context and
uninteresting tokens" (from Mallet's SimpleTagger.java.)
It's unclear whether this currently has any effect.
@type transduction_type: C{str}
@param transduction_type: The type of transduction used by
the CRF. Can be VITERBI, VITERBI_FBEAM, VITERBI_BBEAM,
VITERBI_FBBEAM, or VITERBI_FBEAMKL.
@type max_iterations: C{int}
@param max_iterations: The maximum number of iterations that
should be used for training the CRF.
@type add_start_state: C{bool}
@param add_start_state: If true, then NLTK will add a special
start state, named C{'__start__'}. The initial cost for
the start state will be set to 0; and the initial cost for
all other states will be set to +inf.
@type add_end_state: C{bool}
@param add_end_state: If true, then NLTK will add a special
end state, named C{'__end__'}. The final cost for the end
state will be set to 0; and the final cost for all other
states will be set to +inf.
@type trace: C{int}
@param trace: Controls the verbosity of trace output generated
while training the CRF. Higher numbers generate more verbose
output.
"""
t0 = time.time() # Record starting time.
# If they did not supply a model filename, then choose one.
if filename is None:
(fd, filename) = mkstemp('.crf', 'model')
os.fdopen(fd).close()
# Ensure that the filename ends with '.zip'
if not filename.endswith('.crf'):
filename += '.crf'
if trace >= 1:
print '[MalletCRF] Training a new CRF: %s' % filename
# Create crf-info object describing the new CRF.
crf_info = MalletCRF._build_crf_info(
corpus, gaussian_variance, default_label, max_iterations,
transduction_type, weight_groups, add_start_state,
add_end_state, filename, feature_detector)
# Create a zipfile, and write crf-info to it.
if trace >= 2:
print '[MalletCRF] Adding crf-info.xml to %s' % filename
zf = zipfile.ZipFile(filename, mode='w')
zf.writestr('crf-info.xml', crf_info.toxml()+'\n')
zf.close()
# Create the CRF object.
crf = MalletCRF(filename, feature_detector)
# Write the Training corpus to a temporary file.
if trace >= 2:
print '[MalletCRF] Writing training corpus...'
(fd, train_file) = mkstemp('.txt', 'train')
crf.write_training_corpus(corpus, os.fdopen(fd, 'w'))
try:
if trace >= 1:
print '[MalletCRF] Calling mallet to train CRF...'
cmd = [MalletCRF._TRAIN_CRF,
'--model-file', os.path.abspath(filename),
'--train-file', train_file]
if trace > 3:
call_mallet(cmd)
else:
p = call_mallet(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
blocking=False)
MalletCRF._filter_training_output(p, trace)
finally:
# Delete the temp file containing the training corpus.
os.remove(train_file)
if trace >= 1:
print '[MalletCRF] Training complete.'
print '[MalletCRF] Model stored in: %s' % filename
if trace >= 2:
dt = time.time()-t0
print '[MalletCRF] Total training time: %d seconds' % dt
# Return the completed CRF.
return crf
@staticmethod
def _build_crf_info(corpus, gaussian_variance, default_label,
max_iterations, transduction_type, weight_groups,
add_start_state, add_end_state,
model_filename, feature_detector):
"""
Construct a C{CRFInfo} object describing a CRF with a given
set of configuration parameters, and based on the contents of
a given corpus.
"""
state_info_list = []
labels = set()
if add_start_state:
labels.add('__start__')
if add_end_state:
labels.add('__end__')
transitions = set() # not necessary to find this?
for sent in corpus:
prevtag = default_label
for (tok,tag) in sent:
labels.add(tag)
transitions.add( (prevtag, tag) )
prevtag = tag
if add_start_state:
transitions.add( ('__start__', sent[0][1]) )
if add_end_state:
transitions.add( (sent[-1][1], '__end__') )
labels = sorted(labels)
# 0th order default:
if weight_groups is None:
weight_groups = [CRFInfo.WeightGroup(name=l, src='.*',
dst=re.escape(l))
for l in labels]
# Check that weight group names are unique
if len(weight_groups) != len(set(wg.name for wg in weight_groups)):
raise ValueError("Weight group names must be unique")
# Construct a list of state descriptions. Currently, we make
# these states fully-connected, with one parameter per
# transition.
for src in labels:
if add_start_state:
if src == '__start__':
initial_cost = 0
else:
initial_cost = '+inf'
if add_end_state:
if src == '__end__':
final_cost = 0
else:
final_cost = '+inf'
state_info = CRFInfo.State(src, initial_cost, final_cost, [])
for dst in labels:
state_weight_groups = [wg.name for wg in weight_groups
if wg.match(src, dst)]
state_info.transitions.append(
CRFInfo.Transition(dst, dst, state_weight_groups))
state_info_list.append(state_info)
return CRFInfo(state_info_list, gaussian_variance,
default_label, max_iterations,
transduction_type, weight_groups,
add_start_state, add_end_state,
model_filename, feature_detector)
#: A table used to filter the output that mallet generates during
#: training. By default, mallet generates very verbose output.
#: This table is used to select which lines of output are actually
#: worth displaying to the user, based on the level of the C{trace}
#: parameter. Each entry of this table is a tuple
#: C{(min_trace_level, regexp)}. A line will be displayed only if
#: C{trace>=min_trace_level} and the line matches C{regexp} for at
#: least one table entry.
_FILTER_TRAINING_OUTPUT = [
(1, r'DEBUG:.*'),
(1, r'Number of weights.*'),
(1, r'CRF about to train.*'),
(1, r'CRF finished.*'),
(1, r'CRF training has converged.*'),
(2, r'CRF weights.*'),
(2, r'getValue\(\) \(loglikelihood\) .*'),
]
@staticmethod
def _filter_training_output(p, trace):
"""
Filter the (very verbose) output that is generated by mallet,
and only display the interesting lines. The lines that are
selected for display are determined by
L{_FILTER_TRAINING_OUTPUT}.
"""
out = []
while p.poll() is None:
while True:
line = p.stdout.readline()
if not line: break
out.append(line)
for (t, regexp) in MalletCRF._FILTER_TRAINING_OUTPUT:
if t <= trace and re.match(regexp, line):
indent = ' '*t
print '[MalletCRF] %s%s' % (indent, line.rstrip())
break
if p.returncode != 0:
print "\nError encountered! Mallet's most recent output:"
print ''.join(out[-100:])
raise OSError('Mallet command failed')
#/////////////////////////////////////////////////////////////////
# Communication w/ mallet
#/////////////////////////////////////////////////////////////////
def write_training_corpus(self, corpus, stream, close_stream=True):
"""
Write a given training corpus to a given stream, in a format that
can be read by the java script C{org.nltk.mallet.TrainCRF}.
"""
feature_detector = self.crf_info.feature_detector
for sentence in corpus:
if self.crf_info.add_start_state:
stream.write('__start__ __start__\n')
unlabeled_sent = [tok for (tok,tag) in sentence]
for index in range(len(unlabeled_sent)):
featureset = feature_detector(unlabeled_sent, index)
for (fname, fval) in featureset.items():
stream.write(self._format_feature(fname, fval)+" ")
stream.write(sentence[index][1]+'\n')
if self.crf_info.add_end_state:
stream.write('__end__ __end__\n')
stream.write('\n')
if close_stream: stream.close()
def write_test_corpus(self, corpus, stream, close_stream=True):
"""
Write a given test corpus to a given stream, in a format that
can be read by the java script C{org.nltk.mallet.TestCRF}.
"""
feature_detector = self.crf_info.feature_detector
for sentence in corpus:
if self.crf_info.add_start_state:
stream.write('__start__ __start__\n')
for index in range(len(sentence)):
featureset = feature_detector(sentence, index)
for (fname, fval) in featureset.items():
stream.write(self._format_feature(fname, fval)+" ")
stream.write('\n')
if self.crf_info.add_end_state:
stream.write('__end__ __end__\n')
stream.write('\n')
if close_stream: stream.close()
def parse_mallet_output(self, s):
"""
Parse the output that is generated by the java script
C{org.nltk.mallet.TestCRF}, and convert it to a labeled
corpus.
"""
if re.match(r'\s*<<start>>', s):
assert 0, 'its a lattice'
corpus = [[]]
for line in s.split('\n'):
line = line.strip()
# Label with augmentations?
if line:
corpus[-1].append(line.strip())
# Start of new instance?
elif corpus[-1] != []:
corpus.append([])
if corpus[-1] == []: corpus.pop()
return corpus
_ESCAPE_RE = re.compile('[^a-zA-Z0-9]')
@staticmethod
def _escape_sub(m):
return '%' + ('%02x' % ord(m.group()))
@staticmethod
def _format_feature(fname, fval):
"""
Return a string name for a given feature (name, value) pair,
appropriate for consumption by mallet. We escape every
character in fname or fval that's not a letter or a number,
just to be conservative.
"""
fname = MalletCRF._ESCAPE_RE.sub(MalletCRF._escape_sub, fname)
if isinstance(fval, basestring):
fval = "'%s'" % MalletCRF._ESCAPE_RE.sub(
MalletCRF._escape_sub, fval)
else:
fval = MalletCRF._ESCAPE_RE.sub(MalletCRF._escape_sub, '%r'%fval)
return fname+'='+fval
#/////////////////////////////////////////////////////////////////
# String Representation
#/////////////////////////////////////////////////////////////////
def __repr__(self):
return 'MalletCRF(%r)' % self.crf_info.model_filename
###########################################################################
## Serializable CRF Information Object
###########################################################################
class CRFInfo(object):
"""
An object used to record configuration information about a
MalletCRF object. This configuration information can be
serialized to an XML file, which can then be read by NLTK's custom
interface to Mallet's CRF.
CRFInfo objects are typically created by the L{MalletCRF.train()}
method.
Advanced users may wish to directly create custom
C{CRFInfo.WeightGroup} objects and pass them to the
L{MalletCRF.train()} function. See L{CRFInfo.WeightGroup} for
more information.
"""
def __init__(self, states, gaussian_variance, default_label,
max_iterations, transduction_type, weight_groups,
add_start_state, add_end_state, model_filename,
feature_detector):
self.gaussian_variance = float(gaussian_variance)
self.default_label = default_label
self.states = states
self.max_iterations = max_iterations
self.transduction_type = transduction_type
self.weight_groups = weight_groups
self.add_start_state = add_start_state
self.add_end_state = add_end_state
self.model_filename = model_filename
if isinstance(feature_detector, basestring):
self.feature_detector_name = feature_detector
self.feature_detector = None
else:
self.feature_detector_name = feature_detector.__name__
self.feature_detector = feature_detector
_XML_TEMPLATE = (
'<crf>\n'
' <modelFile>%(model_filename)s</modelFile>\n'
' <gaussianVariance>%(gaussian_variance)d</gaussianVariance>\n'
' <defaultLabel>%(default_label)s</defaultLabel>\n'
' <maxIterations>%(max_iterations)s</maxIterations>\n'
' <transductionType>%(transduction_type)s</transductionType>\n'
' <featureDetector name="%(feature_detector_name)s">\n'
' %(feature_detector)s\n'
' </featureDetector>\n'
' <addStartState>%(add_start_state)s</addStartState>\n'
' <addEndState>%(add_end_state)s</addEndState>\n'
' <states>\n'
'%(states)s\n'
' </states>\n'
' <weightGroups>\n'
'%(w_groups)s\n'
' </weightGroups>\n'
'</crf>\n')
def toxml(self):
info = self.__dict__.copy()
info['states'] = '\n'.join(state.toxml() for state in self.states)
info['w_groups'] = '\n'.join(wg.toxml() for wg in self.weight_groups)
info['feature_detector_name'] = (info['feature_detector_name']
.replace('&', '&')
.replace('<', '<'))
try:
fd = pickle.dumps(self.feature_detector)
fd = fd.replace('&', '&').replace('<', '<')
fd = fd.replace('\n', ' ') # put pickle data all on 1 line.
info['feature_detector'] = '<pickle>%s</pickle>' % fd
except pickle.PicklingError:
info['feature_detector'] = ''
return self._XML_TEMPLATE % info
@staticmethod
def fromstring(s):
return CRFInfo._read(ElementTree.fromstring(s))
@staticmethod
def _read(etree):
states = [CRFInfo.State._read(et) for et in
etree.findall('states/state')]
weight_groups = [CRFInfo.WeightGroup._read(et) for et in
etree.findall('weightGroups/weightGroup')]
fd = etree.find('featureDetector')
feature_detector = fd.get('name')
if fd.find('pickle') is not None:
try: feature_detector = pickle.loads(fd.find('pickle').text)
except pickle.PicklingError, e: pass # unable to unpickle it.
return CRFInfo(states,
float(etree.find('gaussianVariance').text),
etree.find('defaultLabel').text,
int(etree.find('maxIterations').text),
etree.find('transductionType').text,
weight_groups,
bool(etree.find('addStartState').text),
bool(etree.find('addEndState').text),
etree.find('modelFile').text,
feature_detector)
def write(self, filename):
out = open(filename, 'w')
out.write(self.toxml())
out.write('\n')
out.close()
class State(object):
"""
A description of a single CRF state.
"""
def __init__(self, name, initial_cost, final_cost, transitions):
if initial_cost != '+inf': initial_cost = float(initial_cost)
if final_cost != '+inf': final_cost = float(final_cost)
self.name = name
self.initial_cost = initial_cost
self.final_cost = final_cost
self.transitions = transitions
_XML_TEMPLATE = (
' <state name="%(name)s" initialCost="%(initial_cost)s" '
'finalCost="%(final_cost)s">\n'
' <transitions>\n'
'%(transitions)s\n'
' </transitions>\n'
' </state>\n')
def toxml(self):
info = self.__dict__.copy()
info['transitions'] = '\n'.join(transition.toxml()
for transition in self.transitions)
return self._XML_TEMPLATE % info
@staticmethod
def _read(etree):
transitions = [CRFInfo.Transition._read(et)
for et in etree.findall('transitions/transition')]
return CRFInfo.State(etree.get('name'),
etree.get('initialCost'),
etree.get('finalCost'),
transitions)
class Transition(object):
"""
A description of a single CRF transition.
"""
def __init__(self, destination, label, weightgroups):
"""
@param destination: The name of the state that this transition
connects to.
@param label: The tag that is generated when traversing this
transition.
@param weightgroups: A list of L{WeightGroup} names, indicating
which weight groups should be used to calculate the cost
of traversing this transition.
"""
self.destination = destination
self.label = label
self.weightgroups = weightgroups
_XML_TEMPLATE = (' <transition label="%(label)s" '
'destination="%(destination)s" '
'weightGroups="%(w_groups)s"/>')
def toxml(self):
info = self.__dict__
info['w_groups'] = ' '.join(wg for wg in self.weightgroups)
return self._XML_TEMPLATE % info
@staticmethod
def _read(etree):
return CRFInfo.Transition(etree.get('destination'),
etree.get('label'),
etree.get('weightGroups').split())
class WeightGroup(object):
"""
A configuration object used by C{MalletCRF} to specify how
input-features (which are a function of only the input) should be
mapped to joint-features (which are a function of both the input
and the output tags).
Each weight group specifies that a given set of input features
should be paired with all transitions from a given set of source
tags to a given set of destination tags.
"""
def __init__(self, name, src, dst, features='.*'):
"""
@param name: A unique name for this weight group.
@param src: The set of source tags that should be used for
this weight group, specified as either a list of state
names or a regular expression.
@param dst: The set of destination tags that should be used
for this weight group, specified as either a list of state
names or a regular expression.
@param features: The set of input feature that should be used
for this weight group, specified as either a list of
feature names or a regular expression. WARNING: currently,
this regexp is passed streight to java -- i.e., it must
be a java-style regexp!
"""
if re.search('\s', name):
raise ValueError('weight group name may not '
'contain whitespace.')
if re.search('"', name):
raise ValueError('weight group name may not contain \'"\'.')
self.name = name
self.src = src
self.dst = dst
self.features = features
self._src_match_cache = {}
self._dst_match_cache = {}
_XML_TEMPLATE = (' <weightGroup name="%(name)s" src="%(src)s" '
'dst="%(dst)s" features="%(features)s" />')
def toxml(self):
return self._XML_TEMPLATE % self.__dict__
@staticmethod
def _read(etree):
return CRFInfo.WeightGroup(etree.get('name'),
etree.get('src'),
etree.get('dst'),
etree.get('features'))
# [xx] feature name????
def match(self, src, dst):
# Check if the source matches
src_match = self._src_match_cache.get(src)
if src_match is None:
if isinstance(self.src, basestring):
src_match = bool(re.match(self.src+'\Z', src))
else:
src_match = src in self.src
self._src_match_cache[src] = src_match
# Check if the dest matches
dst_match = self._dst_match_cache.get(dst)
if dst_match is None:
if isinstance(self.dst, basestring):
dst_match = bool(re.match(self.dst+'\Z', dst))
else:
dst_match = dst in self.dst
self._dst_match_cache[dst] = dst_match
# Return true if both matched.
return src_match and dst_match
###########################################################################
## Demonstration code
###########################################################################
def demo(train_size=100, test_size=100,
java_home='/usr/local/jdk1.5.0/',
mallet_home='/usr/local/mallet-0.4'):
from nltk.corpus import brown
import textwrap
# Define a very simple feature detector
def fd(sentence, index):
word = sentence[index]
return dict(word=word, suffix=word[-2:], len=len(word))
# Let nltk know where java & mallet are.
nltk.internals.config_java(java_home)
nltk.classify.mallet.config_mallet(mallet_home)
# Get the training & test corpus. We simplify the tagset a little:
# just the first 2 chars.
def strip(corpus): return [[(w, t[:2]) for (w,t) in sent]
for sent in corpus]
brown_train = strip(brown.tagged_sents(categories='news')[:train_size])
brown_test = strip(brown.tagged_sents(categories='editorial')[:test_size])
crf = MalletCRF.train(fd, brown_train, #'/tmp/crf-model',
transduction_type='VITERBI')
sample_output = crf.tag([w for (w,t) in brown_test[5]])
acc = nltk.tag.accuracy(crf, brown_test)
print '\nAccuracy: %.1f%%' % (acc*100)
print 'Sample output:'
print textwrap.fill(' '.join('%s/%s' % w for w in sample_output),
initial_indent=' ', subsequent_indent=' ')+'\n'
# Clean up
print 'Clean-up: deleting', crf.filename
os.remove(crf.filename)
return crf
if __name__ == '__main__':
crf = demo(train_size=100)
| markgw/jazzparser | lib/nltk/tag/crf.py | Python | gpl-3.0 | 31,850 | [
"Gaussian"
] | dd323f04d70a7d75b50046e2427c7bd120ab92cf183462891293afe5b7c376f9 |
"""
Callable objects that generate numbers according to different distributions.
"""
import random
import operator
import hashlib
import struct
import fractions
from math import e,pi
import param
from param import __version__ # noqa: API import
class TimeAware(param.Parameterized):
"""
Class of objects that have access to a global time function
and have the option of using it to generate time-dependent values
as necessary.
In the simplest case, an object could act as a strict function of
time, returning the current time transformed according to a fixed
equation. Other objects may support locking their results to a
timebase, but also work without time. For instance, objects with
random state could return a new random value for every call, with
no notion of time, or could always return the same value until the
global time changes. Subclasses should thus provide an ability to
return a time-dependent value, but may not always do so.
"""
time_dependent = param.Boolean(default=False, doc="""
Whether the given time_fn should be used to constrain the
results generated.""")
time_fn = param.Callable(default=param.Dynamic.time_fn, doc="""
Callable used to specify the time that determines the state
and return value of the object, if time_dependent=True.""")
def __init__(self, **params):
super(TimeAware, self).__init__(**params)
self._check_time_fn()
def _check_time_fn(self, time_instance=False):
"""
If time_fn is the global time function supplied by
param.Dynamic.time_fn, make sure Dynamic parameters are using
this time function to control their behaviour.
If time_instance is True, time_fn must be a param.Time instance.
"""
if time_instance and not isinstance(self.time_fn, param.Time):
raise AssertionError("%s requires a Time object"
% self.__class__.__name__)
if self.time_dependent:
global_timefn = self.time_fn is param.Dynamic.time_fn
if global_timefn and not param.Dynamic.time_dependent:
raise AssertionError("Cannot use Dynamic.time_fn as"
" parameters are ignoring time.")
class TimeDependent(TimeAware):
"""
Objects that have access to a time function that determines the
output value. As a function of time, this type of object should
allow time values to be randomly jumped forwards or backwards,
but for a given time point, the results should remain constant.
The time_fn must be an instance of param.Time, to ensure all the
facilities necessary for safely navigating the timeline are
available.
"""
time_dependent = param.Boolean(default=True, readonly=True, doc="""
Read-only parameter that is always True.""")
def _check_time_fn(self):
super(TimeDependent,self)._check_time_fn(time_instance=True)
class NumberGenerator(param.Parameterized):
"""
Abstract base class for any object that when called produces a number.
Primarily provides support for using NumberGenerators in simple
arithmetic expressions, such as abs((x+y)/z), where x,y,z are
NumberGenerators or numbers.
"""
def __call__(self):
raise NotImplementedError
# Could define any of Python's operators here, esp. if they have operator or ufunc equivalents
def __add__ (self,operand): return BinaryOperator(self,operand,operator.add)
def __sub__ (self,operand): return BinaryOperator(self,operand,operator.sub)
def __mul__ (self,operand): return BinaryOperator(self,operand,operator.mul)
def __mod__ (self,operand): return BinaryOperator(self,operand,operator.mod)
def __pow__ (self,operand): return BinaryOperator(self,operand,operator.pow)
def __div__ (self,operand): return BinaryOperator(self,operand,operator.div)
def __truediv__ (self,operand): return BinaryOperator(self,operand,operator.truediv)
def __floordiv__ (self,operand): return BinaryOperator(self,operand,operator.floordiv)
def __radd__ (self,operand): return BinaryOperator(self,operand,operator.add,True)
def __rsub__ (self,operand): return BinaryOperator(self,operand,operator.sub,True)
def __rmul__ (self,operand): return BinaryOperator(self,operand,operator.mul,True)
def __rmod__ (self,operand): return BinaryOperator(self,operand,operator.mod,True)
def __rpow__ (self,operand): return BinaryOperator(self,operand,operator.pow,True)
def __rdiv__ (self,operand): return BinaryOperator(self,operand,operator.div,True)
def __rtruediv__ (self,operand): return BinaryOperator(self,operand,operator.truediv,True)
def __rfloordiv__(self,operand): return BinaryOperator(self,operand,operator.floordiv,True)
def __neg__ (self): return UnaryOperator(self,operator.neg)
def __pos__ (self): return UnaryOperator(self,operator.pos)
def __abs__ (self): return UnaryOperator(self,operator.abs)
class BinaryOperator(NumberGenerator):
"""Applies any binary operator to NumberGenerators or numbers to yield a NumberGenerator."""
def __init__(self,lhs,rhs,operator,reverse=False,**args):
"""
Accepts two NumberGenerator operands, an operator, and
optional arguments to be provided to the operator when calling
it on the two operands.
"""
# Note that it's currently not possible to set
# parameters in the superclass when creating an instance,
# because **args is used by this class itself.
super(BinaryOperator,self).__init__()
if reverse:
self.lhs=rhs
self.rhs=lhs
else:
self.lhs=lhs
self.rhs=rhs
self.operator=operator
self.args=args
def __call__(self):
return self.operator(self.lhs() if callable(self.lhs) else self.lhs,
self.rhs() if callable(self.rhs) else self.rhs, **self.args)
class UnaryOperator(NumberGenerator):
"""Applies any unary operator to a NumberGenerator to yield another NumberGenerator."""
def __init__(self,operand,operator,**args):
"""
Accepts a NumberGenerator operand, an operator, and
optional arguments to be provided to the operator when calling
it on the operand.
"""
# Note that it's currently not possible to set
# parameters in the superclass when creating an instance,
# because **args is used by this class itself.
super(UnaryOperator,self).__init__()
self.operand=operand
self.operator=operator
self.args=args
def __call__(self):
return self.operator(self.operand(),**self.args)
class Hash(object):
"""
A platform- and architecture-independent hash function (unlike
Python's inbuilt hash function) for use with an ordered collection
of rationals or integers.
The supplied name sets the initial hash state. The output from
each call is a 32-bit integer to ensure the value is a regular
Python integer (and not a Python long) on both 32-bit and 64-bit
platforms. This can be important to seed Numpy's random number
generator safely (a bad Numpy bug!).
The number of inputs (integer or rational numbers) to be supplied
for __call__ must be specified in the constructor and must stay
constant across calls.
"""
def __init__(self, name, input_count):
self.name = name
self.input_count = input_count
self._digest = hashlib.md5()
self._digest.update(name.encode())
self._hash_struct = struct.Struct( "!" +" ".join(["I"] * (input_count * 2)))
def _rational(self, val):
"""Convert the given value to a rational, if necessary."""
I32 = 4294967296 # Maximum 32 bit unsigned int (i.e. 'I') value
if isinstance(val, int):
numer, denom = val, 1
elif isinstance(val, fractions.Fraction):
numer, denom = val.numerator, val.denominator
elif hasattr(val, 'numer'):
(numer, denom) = (int(val.numer()), int(val.denom()))
else:
param.main.param.warning("Casting type '%s' to Fraction.fraction"
% type(val).__name__)
frac = fractions.Fraction(str(val))
numer, denom = frac.numerator, frac.denominator
return numer % I32, denom % I32
def __getstate__(self):
"""
Avoid Hashlib.md5 TypeError in deepcopy (hashlib issue)
"""
d = self.__dict__.copy()
d.pop('_digest')
d.pop('_hash_struct')
return d
def __setstate__(self, d):
self._digest = hashlib.md5()
name, input_count = d['name'], d['input_count']
self._digest.update(name.encode())
self._hash_struct = struct.Struct( "!" +" ".join(["I"] * (input_count * 2)))
self.__dict__.update(d)
def __call__(self, *vals):
"""
Given integer or rational inputs, generate a cross-platform,
architecture-independent 32-bit integer hash.
"""
# Convert inputs to (numer, denom) pairs with integers
# becoming (int, 1) pairs to match gmpy.mpqs for int values.
pairs = [self._rational(val) for val in vals]
# Unpack pairs and fill struct with ints to update md5 hash
ints = [el for pair in pairs for el in pair]
digest = self._digest.copy()
digest.update(self._hash_struct.pack(*ints))
# Convert from hex string to 32 bit int
return int(digest.hexdigest()[:7], 16)
class TimeAwareRandomState(TimeAware):
"""
Generic base class to enable time-dependent random
streams. Although this class is the basis of all random numbergen
classes, it is designed to be useful whenever time-dependent
randomness is needed using param's notion of time. For instance,
this class is used by the imagen package to define time-dependent,
random distributions over 2D arrays.
For generality, this class may use either the Random class from
Python's random module or numpy.random.RandomState. Either of
these random state objects may be used to generate numbers from
any of several different random distributions (e.g. uniform,
Gaussian). The latter offers the ability to generate
multi-dimensional random arrays and more random distributions but
requires numpy as a dependency.
If declared time_dependent, the random state is fully determined
by a hash value per call. The hash is initialized once with the
object name and then per call using a tuple consisting of the time
(via time_fn) and the global param.random_seed. As a consequence,
for a given name and fixed value of param.random_seed, the random
values generated will be a fixed function of time.
If the object name has not been set and time_dependent is True, a
message is generated warning that the default object name is
dependent on the order of instantiation. To ensure that the
random number stream will remain constant even if other objects
are added or reordered in your file, supply a unique name
explicitly when you construct the RandomDistribution object.
"""
random_generator = param.Parameter(
default=random.Random((500,500)), doc=
"""
Random state used by the object. This may may be an instance
of random.Random from the Python standard library or an
instance of numpy.random.RandomState.
This random state may be exclusively owned by the object or
may be shared by all instance of the same class. It is always
possible to give an object its own unique random state by
setting this parameter with a new random state instance.
""")
__abstract = True
def _initialize_random_state(self, seed=None, shared=True, name=None):
"""
Initialization method to be called in the constructor of
subclasses to initialize the random state correctly.
If seed is None, there is no control over the random stream
(no reproducibility of the stream).
If shared is True (and not time-dependent), the random state
is shared across all objects of the given class. This can be
overridden per object by creating new random state to assign
to the random_generator parameter.
"""
if seed is None: # Equivalent to an uncontrolled seed.
seed = random.Random().randint(0, 1000000)
suffix = ''
else:
suffix = str(seed)
# If time_dependent, independent state required: otherwise
# time-dependent seeding (via hash) will affect shared
# state. Note that if all objects have time_dependent=True
# shared random state is safe and more memory efficient.
if self.time_dependent or not shared:
self.random_generator = type(self.random_generator)(seed)
# Seed appropriately (if not shared)
if not shared:
self.random_generator.seed(seed)
if name is None:
self._verify_constrained_hash()
hash_name = name if name else self.name
if not shared: hash_name += suffix
self._hashfn = Hash(hash_name, input_count=2)
if self.time_dependent:
self._hash_and_seed()
def _verify_constrained_hash(self):
"""
Warn if the object name is not explicitly set.
"""
changed_params = dict(self.param.get_param_values(onlychanged=True))
if self.time_dependent and ('name' not in changed_params):
self.param.warning("Default object name used to set the seed: "
"random values conditional on object instantiation order.")
def _hash_and_seed(self):
"""
To be called between blocks of random number generation. A
'block' can be an unbounded sequence of random numbers so long
as the time value (as returned by time_fn) is guaranteed not
to change within the block. If this condition holds, each
block of random numbers is time-dependent.
Note: param.random_seed is assumed to be integer or rational.
"""
hashval = self._hashfn(self.time_fn(), param.random_seed)
self.random_generator.seed(hashval)
class RandomDistribution(NumberGenerator, TimeAwareRandomState):
"""
The base class for all Numbergenerators using random state.
Numbergen provides a hierarchy of classes to make it easier to use
the random distributions made available in Python's random module,
where each class is tied to a particular random distribution.
RandomDistributions support setting parameters on creation rather
than passing them each call, and allow pickling to work properly.
Code that uses these classes will be independent of how many
parameters are used by the underlying distribution, and can simply
treat them as a generic source of random numbers.
RandomDistributions are examples of TimeAwareRandomState, and thus
can be locked to a global time if desired. By default,
time_dependent=False, and so a new random value will be generated
each time these objects are called. If you have a global time
function, you can set time_dependent=True, so that the random
values will instead be constant at any given time, changing only
when the time changes. Using time_dependent values can help you
obtain fully reproducible streams of random numbers, even if you
e.g. move time forwards and backwards for testing.
Note: Each RandomDistribution object has independent random state.
"""
seed = param.Integer(default=None, allow_None=True, doc="""
Sets the seed of the random number generator and can be used to
randomize time dependent streams.
If seed is None, there is no control over the random stream
(i.e. no reproducibility of the stream).""")
__abstract = True
def __init__(self,**params):
"""
Initialize a new Random() instance and store the supplied
positional and keyword arguments.
If seed=X is specified, sets the Random() instance's seed.
Otherwise, calls creates an unseeded Random instance which is
likely to result in a state very different from any just used.
"""
super(RandomDistribution,self).__init__(**params)
self._initialize_random_state(seed=self.seed, shared=False)
def __call__(self):
if self.time_dependent:
self._hash_and_seed()
class UniformRandom(RandomDistribution):
"""
Specified with lbound and ubound; when called, return a random
number in the range [lbound, ubound).
See the random module for further details.
"""
lbound = param.Number(default=0.0,doc="inclusive lower bound")
ubound = param.Number(default=1.0,doc="exclusive upper bound")
def __call__(self):
super(UniformRandom, self).__call__()
return self.random_generator.uniform(self.lbound,self.ubound)
class UniformRandomOffset(RandomDistribution):
"""
Identical to UniformRandom, but specified by mean and range.
When called, return a random number in the range
[mean - range/2, mean + range/2).
See the random module for further details.
"""
mean = param.Number(default=0.0, doc="""Mean value""")
range = param.Number(default=1.0, bounds=(0.0,None), doc="""
Difference of maximum and minimum value""")
def __call__(self):
super(UniformRandomOffset, self).__call__()
return self.random_generator.uniform(
self.mean - self.range / 2.0,
self.mean + self.range / 2.0)
class UniformRandomInt(RandomDistribution):
"""
Specified with lbound and ubound; when called, return a random
number in the inclusive range [lbound, ubound].
See the randint function in the random module for further details.
"""
lbound = param.Number(default=0,doc="inclusive lower bound")
ubound = param.Number(default=1000,doc="inclusive upper bound")
def __call__(self):
super(UniformRandomInt, self).__call__()
x = self.random_generator.randint(self.lbound,self.ubound)
return x
class Choice(RandomDistribution):
"""
Return a random element from the specified list of choices.
Accepts items of any type, though they are typically numbers.
See the choice() function in the random module for further details.
"""
choices = param.List(default=[0,1],
doc="List of items from which to select.")
def __call__(self):
super(Choice, self).__call__()
return self.random_generator.choice(self.choices)
class NormalRandom(RandomDistribution):
"""
Normally distributed (Gaussian) random number.
Specified with mean mu and standard deviation sigma.
See the random module for further details.
"""
mu = param.Number(default=0.0,doc="Mean value.")
sigma = param.Number(default=1.0,bounds=(0.0,None),doc="Standard deviation.")
def __call__(self):
super(NormalRandom, self).__call__()
return self.random_generator.normalvariate(self.mu,self.sigma)
class VonMisesRandom(RandomDistribution):
"""
Circularly normal distributed random number.
If kappa is zero, this distribution reduces to a uniform random
angle over the range 0 to 2*pi. Otherwise, it is concentrated to
a greater or lesser degree (determined by kappa) around the mean
mu. For large kappa (narrow peaks), this distribution approaches
the Gaussian (normal) distribution with variance 1/kappa. See the
random module for further details.
"""
mu = param.Number(default=0.0,softbounds=(0.0,2*pi),doc="""
Mean value, typically in the range 0 to 2*pi.""")
kappa = param.Number(default=1.0,bounds=(0.0,None),softbounds=(0.0,50.0),doc="""
Concentration (inverse variance).""")
def __call__(self):
super(VonMisesRandom, self).__call__()
return self.random_generator.vonmisesvariate(self.mu,self.kappa)
class ScaledTime(NumberGenerator, TimeDependent):
"""
The current time multiplied by some conversion factor.
"""
factor = param.Number(default=1.0, doc="""
The factor to be multiplied by the current time value.""")
def __call__(self):
return float(self.time_fn() * self.factor)
class BoxCar(NumberGenerator, TimeDependent):
"""
The boxcar function over the specified time interval. The bounds
are exclusive: zero is returned at the onset time and at the
offset (onset+duration).
If duration is None, then this reduces to a step function around the
onset value with no offset.
See http://en.wikipedia.org/wiki/Boxcar_function
"""
onset = param.Number(0.0, doc="Time of onset.")
duration = param.Number(None, allow_None=True, bounds=(0.0,None), doc="""
Duration of step value.""")
def __call__(self):
if self.time_fn() <= self.onset:
return 0.0
elif (self.duration is not None) and (self.time_fn() > self.onset + self.duration):
return 0.0
else:
return 1.0
class SquareWave(NumberGenerator, TimeDependent):
"""
Generate a square wave with 'on' periods returning 1.0 and
'off'periods returning 0.0 of specified duration(s). By default
the portion of time spent in the high state matches the time spent
in the low state (a duty cycle of 50%), but the duty cycle can be
controlled if desired.
The 'on' state begins after a time specified by the 'onset'
parameter. The onset duration supplied must be less than the off
duration.
"""
onset = param.Number(0.0, doc="""Time of onset of the first 'on'
state relative to time 0. Must be set to a value less than the
'off_duration' parameter.""")
duration = param.Number(1.0, allow_None=False, bounds=(0.0,None), doc="""
Duration of the 'on' state during which a value of 1.0 is
returned.""")
off_duration = param.Number(default=None, allow_None=True,
bounds=(0.0,None), doc="""
Duration of the 'off' value state during which a value of 0.0
is returned. By default, this duration matches the value of
the 'duration' parameter.""")
def __init__(self, **params):
super(SquareWave,self).__init__(**params)
if self.off_duration is None:
self.off_duration = self.duration
if self.onset > self.off_duration:
raise AssertionError("Onset value needs to be less than %s" % self.onset)
def __call__(self):
phase_offset = (self.time_fn() - self.onset) % (self.duration + self.off_duration)
if phase_offset < self.duration:
return 1.0
else:
return 0.0
class ExponentialDecay(NumberGenerator, TimeDependent):
"""
Function object that provides a value that decays according to an
exponential function, based on a given time function.
Returns starting_value*base^(-time/time_constant).
See http://en.wikipedia.org/wiki/Exponential_decay.
"""
starting_value = param.Number(1.0, doc="Value used for time zero.")
ending_value = param.Number(0.0, doc="Value used for time infinity.")
time_constant = param.Number(10000,doc="""
Time scale for the exponential; large values give slow decay.""")
base = param.Number(e, doc="""
Base of the exponent; the default yields starting_value*exp(-t/time_constant).
Another popular choice of base is 2, which allows the
time_constant to be interpreted as a half-life.""")
def __call__(self):
Vi = self.starting_value
Vm = self.ending_value
exp = -1.0*float(self.time_fn())/float(self.time_constant)
return Vm + (Vi - Vm) * self.base**exp
class TimeSampledFn(NumberGenerator, TimeDependent):
"""
Samples the values supplied by a time_dependent callable at
regular intervals of duration 'period', with the sampled value
held constant within each interval.
"""
period = param.Number(default=1.0, bounds=(0.0,None),
inclusive_bounds=(False,True), softbounds=(0.0,5.0), doc="""
The periodicity with which the values of fn are sampled.""")
offset = param.Number(default=0.0, bounds=(0.0,None),
softbounds=(0.0,5.0), doc="""
The offset from time 0.0 at which the first sample will be drawn.
Must be less than the value of period.""")
fn = param.Callable(doc="""
The time-dependent function used to generate the sampled values.""")
def __init__(self, **params):
super(TimeSampledFn, self).__init__(**params)
if not getattr(self.fn,'time_dependent', False):
raise Exception("The function 'fn' needs to be time dependent.")
if self.time_fn != self.fn.time_fn:
raise Exception("Objects do not share the same time_fn")
if self.offset >= self.period:
raise Exception("The onset value must be less than the period.")
def __call__(self):
current_time = self.time_fn()
current_time += self.offset
difference = current_time % self.period
with self.time_fn as t:
t(current_time - difference - self.offset)
value = self.fn()
return value
class BoundedNumber(NumberGenerator):
"""
Function object that silently enforces numeric bounds on values
returned by a callable object.
"""
generator = param.Callable(None, doc="Object to call to generate values.")
bounds = param.Parameter((None,None), doc="""
Legal range for the value returned, as a pair.
The default bounds are (None,None), meaning there are actually
no bounds. One or both bounds can be set by specifying a
value. For instance, bounds=(None,10) means there is no lower
bound, and an upper bound of 10.""")
def __call__(self):
val = self.generator()
min_, max_ = self.bounds
if min_ is not None and val < min_: return min_
elif max_ is not None and val > max_: return max_
else: return val
_public = list(set([_k for _k,_v in locals().items() if isinstance(_v,type) and issubclass(_v,NumberGenerator)]))
__all__ = _public
| ioam/param | numbergen/__init__.py | Python | bsd-3-clause | 26,699 | [
"Gaussian"
] | 0f4aa47885e46a4611a788aa78b98a5bf46e3bcbd4a69c165676e8b1d8dca246 |
import __builtin__
import glob
import operator
import string
import itertools
import copy
import collections
import random
import csv
from cStringIO import StringIO
import subprocess
import tempfile
import os
import numpy
import sys
from distutils.version import StrictVersion
import dendropy
import time
import math
import json
import pickle
import warnings
import traceback
if StrictVersion(dendropy.__version__) < StrictVersion('4.0.0'): # not sure on the exact version I need, but 3.12.0 is missing lots of vital tree fcns
raise RuntimeError("dendropy version 4.0.0 or later is required (found version %s)." % dendropy.__version__)
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
import utils
# TODO isn't this info somewhere else?
affy_metrics = ['lbi', 'cons-dist-aa', 'cons-dist-nuc', 'shm', 'aa-lbi'] # it would be nice to instead use the info at the top of treeutils/lbplotting
affy_metrics += ['sum-'+m for m in affy_metrics]
daffy_metrics = ['delta-lbi', 'lbr', 'aa-lbr']
daffy_metrics += ['sum-'+m for m in daffy_metrics]
lb_metrics = collections.OrderedDict(('lb' + let, 'lb ' + lab) for let, lab in (('i', 'index'), ('r', 'ratio')))
selection_metrics = ['lbi', 'lbr', 'cons-dist-aa', 'cons-frac-aa', 'aa-lbi', 'aa-lbr'] # I really thought this was somewhere, but can't find it so adding it here
selection_metrics += ['sum-n_mutations', 'sum-shm-aa']
typical_bcr_seq_len = 400
default_lb_tau = 0.0025
default_lbr_tau_factor = 1
default_min_selection_metric_cluster_size = 10
dummy_str = 'x-dummy-x'
legtexts = {
'metric-for-target-distance' : 'target dist. metric',
'n-sim-seqs-per-generation' : 'N sampled',
'leaf-sampling-scheme' : 'sampling scheme',
'target-count' : 'N target seqs',
'n-target-clusters' : 'N target clust.',
'min-target-distance' : 'min target dist.',
'uniform-random' : 'unif. random',
'affinity-biased' : 'affinity biased',
'high-affinity' : 'perf. affinity',
'cons-dist-aa' : 'aa-cdist',
'sum-cons-dist-aa' : '- AA dist. to cons seq (h+l)',
'cons-frac-aa' : 'aa-cfrac',
'cons-dist-nuc' : 'nuc-cdist',
'shm' : 'n-shm',
'aa-lbi' : 'AA lb index',
'aa-lbr' : 'AA lb ratio',
'sum-aa-lbi' : 'h+l AA lb index',
'sum-aa-lbr' : 'h+l AA lb ratio',
'sum-lbi' : 'h+l lb index',
'sum-lbr' : 'h+l lb ratio',
'sum-n_mutations' : 'h+l nuc mutations',
'sum-shm-aa' : 'h+l AA mutations',
}
# ----------------------------------------------------------------------------------------
all_plot_cfg = ['lb-vs-affy', 'slice', 'joy', 'lb-vs-daffy', 'lb-scatter', 'tree', 'distr', 'true-vs-inf-metrics', 'tree-mut-stats']
default_plot_cfg = ['lb-vs-affy', 'slice', 'joy', 'lb-vs-daffy', 'lb-scatter', 'tree']
# ----------------------------------------------------------------------------------------
def smetric_fname(fname):
return utils.insert_before_suffix('-selection-metrics', fname)
# ----------------------------------------------------------------------------------------
def add_cons_seqs(line, aa=False):
ckey = 'consensus_seq'
if ckey not in line:
line[ckey] = utils.cons_seq_of_line(line)
if aa:
ckey += '_aa'
if ckey not in line:
line[ckey] = utils.cons_seq_of_line(line, aa=True)
# ----------------------------------------------------------------------------------------
def lb_cons_dist(line, iseq, aa=False, frac=False): # at every point where this can add something to <line> (i.e. consensus seqs and aa seqs) it checks that they're not already there, so it will never do those calculations twice. But the final hamming calculation is *not* cached so will get redone if you call more than once
if aa and 'seqs_aa' not in line:
utils.add_seqs_aa(line)
add_cons_seqs(line, aa=aa)
tstr = '_aa' if aa else ''
hfcn = utils.hamming_fraction if frac else utils.hamming_distance # NOTE it's important to use this if you want the fraction (rather than dividing by sequence length afterward) since you also need to account for ambig bases in the cons seq
return hfcn(line['consensus_seq'+tstr], line['seqs'+tstr][iseq], amino_acid=aa)
# ----------------------------------------------------------------------------------------
def add_cons_dists(line, aa=False, debug=False):
ckey = 'cons_dists_' + ('aa' if aa else 'nuc')
if ckey not in line:
line[ckey] = [lb_cons_dist(line, i, aa=aa) for i, u in enumerate(line['unique_ids'])]
if debug: # it would kind of make more sense to have this in some of the fcns that this fcn is calling, but then I'd have to pass the debug arg through a bunch of tiny fcns that don't really need it
tstr = '_aa' if aa else ''
# don't need this unless we turn the tie resolver stuff back on:
# if aa: # we have to add this by hand since we don't actually use it to calculate the aa cons seq -- we get that by just translating the nuc cons seq
# utils.add_naive_seq_aa(line)
hfkey = ckey.replace('cons_dists_', 'cons_fracs_')
line[hfkey] = [lb_cons_dist(line, i, aa=aa, frac=True) for i, u in enumerate(line['unique_ids'])]
extra_keys = [ckey, hfkey]
if 'cell-types' in line:
extra_keys.append('cell-types')
utils.print_cons_seq_dbg(utils.seqfos_from_line(line, aa=aa, extra_keys=extra_keys), line['consensus_seq'+tstr], align=False, aa=aa) # NOTE you probably don't want to turn the naive tie resolver back on in utils.cons_seq_of_line(), but if you do, this reminds you to also do it here so the dbg is correct, tie_resolver_seq=line['naive_seq'+tstr], tie_resolver_label='naive seq')
# ----------------------------------------------------------------------------------------
def add_cdists_to_lbfo(line, lbfo, cdist, debug=False): # it's kind of dumb to store them both in <line> and in <lbfo> (and thus in <line['tree-info']['lb']>), but I think it's ultimately the most sensible thing, given the inherent contradiction that a) we want to *treat* the cons dists like lbi/lbr tree metrics in almost every way, but b) they're *not* actually tree metrics in the sense that they don't use a tree (also, we want the minus sign in lbfo)
add_cons_dists(line, aa='-aa' in cdist, debug=debug)
tkey = cdist.replace('cons-dist-', 'cons_dists_') # yes, I want the names to be different (although admittedly with a time machine it'd be set up differently)
lbfo[cdist] = {u : -line[tkey][i] for i, u in enumerate(line['unique_ids'])}
# ----------------------------------------------------------------------------------------
# if neither iseq nor uid are set, returns all of the values; otherwise specify *either* iseq or uid
def smvals(line, smetric, iseq=None, uid=None, nullval=None): # retrieve selection metric values from within line['tree-info']['lb'][yadda yadda], i.e. as if they were a normal list-based per-seq quantity
# NOTE this is what you use if the values are already there, in 'tree-info' -- if you want to calculate them, there's other fcns
assert (iseq is None and uid is None) or [iseq, uid].count(None) == 1
if uid is not None:
iseq = line['unique_ids'].index(uid)
if 'tree-info' not in line or 'lb' not in line['tree-info'] or smetric not in line['tree-info']['lb']:
return [nullval for _ in line['unique_ids']] if iseq is None else nullval
lbfo = line['tree-info']['lb'][smetric]
if iseq is None:
return [lbfo.get(u, nullval) for u in line['unique_ids']]
else:
return lbfo.get(line['unique_ids'][iseq], nullval)
# ----------------------------------------------------------------------------------------
def lb_cons_seq_shm(line, aa=False):
add_cons_seqs(line, aa=aa)
if aa and 'naive_seq_aa' not in line:
utils.add_naive_seq_aa(line)
tstr = '_aa' if aa else ''
return utils.hamming_distance(line['naive_seq'+tstr], line['consensus_seq'+tstr], amino_acid=aa)
# ----------------------------------------------------------------------------------------
def edge_dist_fcn(dtree, uid): # duplicates fcn in lbplotting.make_lb_scatter_plots()
node = dtree.find_node_with_taxon_label(uid)
if node is None:
return None
return min(node.distance_from_tip(), node.distance_from_root()) # NOTE the tip one gives the *maximum* distance to a leaf, but I think that's ok
# ----------------------------------------------------------------------------------------
cgroups = ['within-families', 'among-families'] # different ways of grouping clusters, i.e. "cluster groupings"
dtr_targets = {'within-families' : ['affinity', 'delta-affinity'], 'among-families' : ['affinity', 'delta-affinity']} # variables that we try to predict, i.e. we train on dtr for each of these
pchoices = ['per-seq', 'per-cluster'] # per-? choice, i.e. is this a per-sequence or per-cluster quantity
dtr_metrics = ['%s-%s-dtr'%(cg, tv) for cg in cgroups for tv in dtr_targets[cg]] # NOTE order of this has to remain the same as in the loops used to generate it
dtr_vars = {'within-families' : {'per-seq' : ['lbi', 'cons-dist-nuc', 'cons-dist-aa', 'edge-dist', 'lbr', 'shm', 'shm-aa'], # NOTE when iterating over this, you have to take the order from <pchoices>, since both pchoices go into the same list of variable values
'per-cluster' : []},
'among-families' : {'per-seq' : ['lbi', 'cons-dist-nuc', 'cons-dist-aa', 'edge-dist', 'lbr', 'shm', 'shm-aa'],
'per-cluster' : ['fay-wu-h', 'cons-seq-shm-nuc', 'cons-seq-shm-aa', 'mean-shm', 'max-lbi', 'max-lbr']},
}
default_dtr_options = {
# 'base-regr' :
'vars' : None, # uses <dtr_vars> for default
'min_samples_leaf' : 5, # only used for grad-boost and bag
'max_depth' : 5, # only used for grad-boost and bag
'ensemble' : 'grad-boost', # ['bag', 'forest', 'ada-boost',
'n_estimators' : 100,
'n_train_per_family' : 1, # for among-families dtr, only train on this many cells per family (to avoid over training). Set to None to use all of 'em
'n_jobs' : None, # default set below (also, this is not used for boosted ensembles)
}
# ----------------------------------------------------------------------------------------
def get_dtr_varnames(cgroup, varlists, with_pc=False): # arg, <with_pc> is fucking ugly
return [(pc, vn) if with_pc else vn for pc in pchoices for vn in varlists[cgroup][pc]]
# ----------------------------------------------------------------------------------------
def get_dtr_vals(cgroup, varlists, line, lbfo, dtree):
# ----------------------------------------------------------------------------------------
def getval(pchoice, var, uid):
if pchoice == 'per-seq':
if var in ['lbi', 'lbr', 'cons-dist-nuc', 'cons-dist-aa']:
return lbfo[var][uid] # NOTE this will fail in (some) cases where the uids in the tree and annotation aren't the same, but I don't care atm since it looks like we won't really be using the dtr
elif var == 'edge-dist':
return edge_dist_fcn(dtree, uid)
elif var == 'shm':
return utils.per_seq_val(line, 'n_mutations', uid)
elif var == 'shm-aa':
return utils.shm_aa(line, line['unique_ids'].index(uid))
else:
assert False
elif pchoice == 'per-cluster':
return per_cluster_vals[var]
else:
assert False
# ----------------------------------------------------------------------------------------
if cgroup == 'among-families':
per_cluster_vals = {
'cons-seq-shm-nuc' : lb_cons_seq_shm(line),
'cons-seq-shm-aa' : lb_cons_seq_shm(line, aa=True),
'fay-wu-h' : -utils.fay_wu_h(line),
'mean-shm' : numpy.mean(line['n_mutations']),
'max-lbi' : max(lbfo['lbi'].values()),
'max-lbr' : max(lbfo['lbr'].values()),
}
vals = []
for uid in line['unique_ids']:
vals.append([getval(pc, var, uid) for pc, var in get_dtr_varnames(cgroup, varlists, with_pc=True)])
return vals
# ----------------------------------------------------------------------------------------
def dtrfname(dpath, cg, tvar, suffix='pickle'):
return '%s/%s-%s-dtr-model.%s' % (dpath, cg, tvar, suffix)
# ----------------------------------------------------------------------------------------
def tmfname(plotdir, metric, x_axis_label, cg=None, tv=None, use_relative_affy=False): # tree metric fname
assert x_axis_label in ['affinity', 'n-ancestor'] # arg, this is messy
assert tv in [None, 'affinity', 'delta-affinity']
metric_str = metric if metric != 'dtr' else '-'.join([cg, tv, metric])
vs_str = '%s-vs%s-%s' % (metric_str, '-relative' if x_axis_label == 'affinity' and use_relative_affy else '', x_axis_label)
return '%s/true-tree-metrics/%s/%s-ptiles/%s-true-tree-ptiles-all-clusters.yaml' % (plotdir, metric_str, vs_str, vs_str) # NOTE has 'true-tree' in there, which is fine for now but may need to change
# ----------------------------------------------------------------------------------------
def write_pmml(pmmlfname, dmodel, varlist, targetvar):
try: # seems to crash for no @**($ing reason sometimes
if 'sklearn2pmml' not in sys.modules: # just so people don't need to install/import it if they're not training
import sklearn2pmml
pmml_pipeline = sys.modules['sklearn2pmml'].make_pmml_pipeline(dmodel, active_fields=varlist, target_fields=targetvar)
sys.modules['sklearn2pmml'].sklearn2pmml(pmml_pipeline, pmmlfname)
except:
elines = traceback.format_exception(*sys.exc_info())
print utils.pad_lines(''.join(elines))
print ' %s pmml conversion failed (see above), but continuing' % utils.color('red', 'error')
# ----------------------------------------------------------------------------------------
def train_dtr_model(trainfo, outdir, cfgvals, cgroup, tvar):
if os.path.exists(dtrfname(outdir, cgroup, tvar)):
print ' %s dtr model file exists, so skipping training: %s' % (utils.color('yellow', 'warning'), dtrfname(outdir, cgroup, tvar))
return
if 'sklearn.ensemble' not in sys.modules:
with warnings.catch_warnings(): # NOTE not sure this is actually catching the warnings UPDATE oh, I think the warnings are getting thrown by function calls, not imports
warnings.simplefilter('ignore', category=DeprecationWarning) # numpy is complaining about how sklearn is importing something, and I really don't want to *@*($$ing hear about it
from sklearn import tree
from sklearn import ensemble
skens = sys.modules['sklearn.ensemble']
sktree = sys.modules['sklearn.tree']
start = time.time()
base_kwargs, kwargs = {}, {'n_estimators' : cfgvals['n_estimators']}
if cfgvals['ensemble'] == 'bag':
base_kwargs = {'min_samples_leaf' : cfgvals['min_samples_leaf'], 'max_depth' : cfgvals['max_depth']}
kwargs['base_estimator'] = sktree.DecisionTreeRegressor(**base_kwargs) # we can pass this to ada-boost, but I'm not sure if we should (it would override the default max_depth=3, for instance)
if 'grad-boost' in cfgvals['ensemble']:
kwargs['max_depth'] = cfgvals['max_depth']
kwargs['min_samples_leaf'] = cfgvals['min_samples_leaf']
if 'boost' not in cfgvals['ensemble']:
kwargs['n_jobs'] = cfgvals['n_jobs']
if cfgvals['ensemble'] == 'bag':
model = skens.BaggingRegressor(**kwargs)
elif cfgvals['ensemble'] == 'forest':
model = skens.RandomForestRegressor(**kwargs)
elif cfgvals['ensemble'] == 'ada-boost':
model = skens.AdaBoostRegressor(**kwargs)
elif cfgvals['ensemble'] == 'grad-boost':
model = skens.GradientBoostingRegressor(**kwargs) # if too slow, maybe try the new hist gradient boosting stuff
else:
assert False
model.fit(trainfo['in'], trainfo['out']) #, sample_weight=trainfo['weights'])
tmpkeys = [k for k in cfgvals if k != 'vars' and (k in kwargs or k in base_kwargs)] # don't want to print the inapplicable ones
print ' %s-families %s (%d observations in %.1fs): %s' % (utils.color('green', cgroup.split('-')[0]), utils.color('blue', tvar), len(trainfo['in']), time.time() - start, ' '.join('%s %s'%(k, cfgvals[k]) for k in sorted(tmpkeys)))
print ' feature importances:'
print ' mean err'
for iv, vname in enumerate([v for pc in pchoices for v in cfgvals['vars'][cgroup][pc]]):
if cfgvals['ensemble'] == 'grad-boost':
filist = [model.feature_importances_[iv]]
else:
filist = [estm.feature_importances_[iv] for estm in model.estimators_]
wlist = None
if cfgvals['ensemble'] == 'ada-boost':
wlist = [w for w in model.estimator_weights_ if w > 0]
assert len(wlist) == len(model.estimators_) # it terminates early (i.e. before making all the allowed estimators) if it already has perfect performance, but doesn't leave the lists the same length
print ' %17s %5.3f %5.3f' % (vname, numpy.average(filist, weights=wlist), (numpy.std(filist, ddof=1) / math.sqrt(len(filist))) if len(filist) > 1 else 0.) # NOTE not sure if std should also use the weights
if not os.path.exists(outdir):
os.makedirs(outdir)
if 'joblib' not in sys.modules: # just so people don't need to install it unless they're training (also scons seems to break it https://stackoverflow.com/questions/24453387/scons-attributeerror-builtin-function-or-method-object-has-no-attribute-disp)
import joblib
with open(dtrfname(outdir, cgroup, tvar), 'w') as dfile:
sys.modules['joblib'].dump(model, dfile)
write_pmml(dtrfname(outdir, cgroup, tvar, suffix='pmml'), model, get_dtr_varnames(cgroup, cfgvals['vars']), tvar)
# ----------------------------------------------------------------------------------------
# NOTE the min lbi is just tau, but I still like doing it this way
lb_bounds = { # calculated to 17 generations, which is quite close to the asymptote
typical_bcr_seq_len : { # seq_len
0.0030: (0.0030, 0.0331), # if tau is any bigger than this it doesn't really converge
0.0025: (0.0025, 0.0176),
0.0020: (0.0020, 0.0100),
0.0010: (0.0010, 0.0033),
0.0005: (0.0005, 0.0015),
},
# it turns out the aa lb metrics need the above nuc normalization (i.e. if we normalize with the below, the values are huge, like lots are 10ish). I guess maybe this makes sense, since i'm taking the nuc tree topology and scaling it to aa
# int(typical_bcr_seq_len / 3.) : { # amino acid (133)
# 0.0030: (0.0030, 0.0099),
# 0.0025: (0.0025, 0.0079),
# 0.0020: (0.0020, 0.0061),
# 0.0010: (0.0010, 0.0030),
# 0.0005: (0.0005, 0.0015),
# }
}
# ----------------------------------------------------------------------------------------
def normalize_lb_val(metric, lbval, tau, seq_len=typical_bcr_seq_len):
if metric == 'lbr':
return lbval
if seq_len not in lb_bounds:
raise Exception('seq len %d not in cached lb bound values (available: %s)' % (seq_len, lb_bounds.keys()))
if tau not in lb_bounds[seq_len]:
raise Exception('tau value %f not in cached lb bound values (available: %s)' % (tau, lb_bounds[seq_len].keys()))
lbmin, lbmax = lb_bounds[seq_len][tau]
return (lbval - lbmin) / (lbmax - lbmin)
# ----------------------------------------------------------------------------------------
def get_treestrs_from_file(treefname, n_expected_trees=None):
with open(treefname) as treefile:
tlines = treefile.readlines()
if n_expected_trees is not None and len(tlines) != n_expected_trees:
raise Exception('expected %d tree%s, but read %d tree lines from %s' % (n_expected_trees, utils.plural(n_expected_trees), len(tlines), treefname))
return tlines
# ----------------------------------------------------------------------------------------
def get_treestr_from_file(treefname):
return get_treestrs_from_file(treefname, n_expected_trees=1)[0]
# ----------------------------------------------------------------------------------------
def as_str(dtree): # just a shortand (adding this very late, so could stand to add this to a lot of paces that use dtree.as_string())
return dtree.as_string(schema='newick').strip()
# ----------------------------------------------------------------------------------------
def cycle_through_ascii_conversion(dtree=None, treestr=None, taxon_namespace=None): # run once through the cycle of str -> dtree -> str (or dtree -> str -> dtree)
if dtree is not None:
return get_dendro_tree(treestr=as_str(dtree), taxon_namespace=taxon_namespace)
elif treestr is not None:
return as_str(get_dendro_tree(treestr=treestr))
else:
assert False
# ----------------------------------------------------------------------------------------
def get_dendro_tree(treestr=None, treefname=None, taxon_namespace=None, schema='newick', ignore_existing_internal_node_labels=False, suppress_internal_node_taxa=False, no_warn=False, debug=False): # specify either <treestr> or <treefname>
# <ignore_existing_internal_node_labels> is for when you want the internal nodes labeled (which we usually do, since we want to calculate selection metrics for internal nodes), but you also want to ignore the existing internal node labels (e.g. with FastTree output, where they're floats)
# <suppress_internal_node_taxa> on the other hand is for when you don't want to have taxa for any internal nodes (e.g. when calculating the tree difference metrics, the two trees have to have the same taxon namespace, but since they in general have different internal nodes, the internal nodes can't have taxa)
assert treestr is None or treefname is None
if ignore_existing_internal_node_labels and suppress_internal_node_taxa:
raise Exception('doesn\'t make sense to specify both')
if treestr is None:
treestr = get_treestr_from_file(treefname)
if debug:
print ' getting dendro tree from string:\n %s' % treestr
if taxon_namespace is not None:
print ' and taxon namespace: %s' % ' '.join([t.label for t in taxon_namespace])
# dendropy doesn't make taxons for internal nodes by default, so it puts the label for internal nodes in node.label instead of node.taxon.label, but it crashes if it gets duplicate labels, so you can't just always turn off internal node taxon suppression
dtree = dendropy.Tree.get_from_string(treestr, schema, taxon_namespace=taxon_namespace, suppress_internal_node_taxa=(ignore_existing_internal_node_labels or suppress_internal_node_taxa), preserve_underscores=True, rooting='force-rooted') # make sure the tree is rooted, to avoid nodes disappearing in remove_dummy_branches() (and proably other places as well)
if dtree.seed_node.edge_length > 0 and not no_warn:
# this would be easy to fix, but i think it only happens from simulation trees from treegenerator UPDATE ok also happens for trees from the linearham paper
print ' %s seed/root node has non-zero edge length (i.e. there\'s a branch above it)' % utils.color('red', 'warning')
label_nodes(dtree, ignore_existing_internal_node_labels=ignore_existing_internal_node_labels, suppress_internal_node_taxa=suppress_internal_node_taxa, debug=debug) # set internal node labels to any found in <treestr> (unless <ignore_existing_internal_node_labels> is set), otherwise make some up (e.g. aa, ab, ac)
# # uncomment for more verbosity: NOTE node label check will likely fail if suppress_internal_node_taxa is set
# check_node_labels(dtree, debug=debug) # makes sure that for all nodes, node.taxon is not None, and node.label *is* None (i.e. that label_nodes did what it was supposed to, as long as suppress_internal_node_taxa wasn't set)
# if debug:
# print utils.pad_lines(get_ascii_tree(dendro_tree=dtree))
return dtree
# ----------------------------------------------------------------------------------------
def import_bio_phylo():
if 'Bio.Phylo' not in sys.modules:
from Bio import Phylo # slow af to import
return sys.modules['Bio.Phylo']
# ----------------------------------------------------------------------------------------
def get_bio_tree(treestr=None, treefname=None, schema='newick'): # NOTE don't use this in future (all current uses are commented)
Phylo = import_bio_phylo()
if treestr is not None:
return Phylo.read(StringIO(treestr), schema)
elif treefname is not None:
with open(treefname) as treefile:
return Phylo.read(treefile, schema)
else:
assert False
# ----------------------------------------------------------------------------------------
def get_imbalance(dtree, treetype='dendropy'): # tree imbalance as std dev in root-to-tip branch lengths (see here https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1008030#pcbi-1008030-g001)
depths = get_leaf_depths(dtree, treetype=treetype)
imbal = numpy.std(depths.values(), ddof=1)
# print utils.pad_lines(get_ascii_tree(dendro_tree=dtree))
# print ' '.join(['%.3f'%v for v in sorted(depths.values())])
# print 'imbal', imbal
return imbal
# ----------------------------------------------------------------------------------------
def get_leaf_depths(tree, treetype='dendropy'): # NOTE structure of dictionary may depend on <treetype>, e.g. whether non-named nodes are included (maybe it doesn't any more? unless you return <clade_keyed_depths> at least)
if treetype == 'dendropy':
depths = {n.taxon.label : n.distance_from_root() for n in tree.leaf_node_iter()}
elif treetype == 'Bio':
clade_keyed_depths = tree.depths() # keyed by clade, not clade name (so unlabelled nodes are accessible)
depths = {n.name : clade_keyed_depths[n] for n in tree.find_clades()}
else:
assert False
return depths
# ----------------------------------------------------------------------------------------
def get_n_leaves(tree):
return len(tree.leaf_nodes())
# ----------------------------------------------------------------------------------------
def get_n_nodes(tree):
return len(list(tree.preorder_node_iter()))
# ----------------------------------------------------------------------------------------
def collapse_nodes(dtree, keep_name, remove_name, keep_name_node=None, remove_name_node=None, debug=False): # collapse edge between <keep_name> and <remove_name>, leaving remaining node with name <keep_name>
# NOTE I wrote this to try to fix the phylip trees from lonr.r, but it ends up they're kind of unfixable... but this fcn may be useful in the future, I guess, and it works UPDATE yep using it now for something else
if debug:
print ' collapsing %s and %s (the former will be the label for the surviving node)' % (keep_name, remove_name)
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree))
if keep_name_node is None:
keep_name_node = dtree.find_node_with_taxon_label(keep_name)
if remove_name_node is None:
assert remove_name is not None # if we *are* passed <remove_name_node>, it's ok for <remove_name> to be None
remove_name_node = dtree.find_node_with_taxon_label(remove_name)
swapped = False
if keep_name_node in remove_name_node.child_nodes():
assert remove_name_node not in keep_name_node.child_nodes()
parent_node = remove_name_node
if parent_node.taxon is None:
parent_node.taxon = dendropy.Taxon()
parent_node.taxon.label = keep_name # have to rename it, since we always actually keep the parent
swapped = True
child_node = keep_name_node
elif remove_name_node in keep_name_node.child_nodes():
assert keep_name_node not in remove_name_node.child_nodes()
parent_node = keep_name_node
child_node = remove_name_node
else:
print ' node names %s and %s don\'t share an edge:' % (keep_name, remove_name)
print ' keep node children: %s' % ' '.join([n.taxon.label for n in keep_name_node.child_nodes()])
print ' remove node children: %s' % ' '.join([n.taxon.label for n in remove_name_node.child_nodes()])
raise Exception('see above')
if child_node.is_leaf():
dtree.prune_taxa([child_node.taxon], suppress_unifurcations=False)
if debug:
print ' pruned leaf node %s' % (('%s (renamed parent to %s)' % (remove_name, keep_name)) if swapped else remove_name)
else:
found = False
for edge in parent_node.child_edge_iter():
if edge.head_node is child_node:
edge.collapse() # removes child node (in dendropy language: inserts all children of the head_node (child) of this edge as children of the edge's tail_node (parent)) Doesn't modify edge lengths by default (i.e. collapsed edge should have zero length).
found = True
break
assert found
if debug:
print ' collapsed edge between %s and %s' % (keep_name, remove_name)
if debug:
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree))
assert dtree.find_node_with_taxon_label(remove_name) is None
# NOTE do i need to add this?
# dtree.purge_taxon_namespace()
# ----------------------------------------------------------------------------------------
def check_node_labels(dtree, debug=False):
if debug:
print 'checking node labels for:'
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=250))
for node in dtree.preorder_node_iter():
if node.taxon is None:
raise Exception('taxon is None for node with depth %f' % node.distance_from_root())
if debug:
print ' ok: %s' % node.taxon.label
if node.label is not None:
raise Exception('node.label not set to None')
# ----------------------------------------------------------------------------------------
# by default, mostly adds labels to internal nodes (also sometimes the root node) that are missing them
def label_nodes(dendro_tree, ignore_existing_internal_node_labels=False, ignore_existing_internal_taxon_labels=False, suppress_internal_node_taxa=False, initial_length=3, debug=False):
if ignore_existing_internal_node_labels and suppress_internal_node_taxa:
raise Exception('doesn\'t make sense to specify both')
if debug:
print ' labeling nodes'
# print ' before:'
# print utils.pad_lines(get_ascii_tree(dendro_tree))
tns = dendro_tree.taxon_namespace
initial_names = set([t.label for t in tns]) # should all be leaf nodes, except the naive sequence (at least for now)
if debug:
print ' initial taxon labels: %s' % ' '.join(sorted(initial_names))
no_taxon_nodes = [n for n in dendro_tree.preorder_node_iter() if n.taxon is None]
if len(no_taxon_nodes) > 0:
print ' %d node%s with no taxa and depths: %s' % (len(no_taxon_nodes), utils.plural(len(no_taxon_nodes)), ' '.join('%.4f'%n.distance_from_root() for n in no_taxon_nodes))
potential_names, used_names = None, None
new_label, potential_names, used_names = utils.choose_new_uid(potential_names, used_names, initial_length=initial_length, shuffle=True)
skipped_dbg, relabeled_dbg = [], []
for node in dendro_tree.preorder_node_iter():
if node.taxon is not None and not (ignore_existing_internal_taxon_labels and not node.is_leaf()):
skipped_dbg += ['%s' % node.taxon.label]
assert node.label is None # if you want to change this, you have to start setting the node labels in build_lonr_tree(). For now, I like having the label in _one_ freaking place
continue # already properly labeled
current_label = node.label
node.label = None
if suppress_internal_node_taxa and not node.is_leaf():
continue
if current_label is None or ignore_existing_internal_node_labels:
new_label, potential_names, used_names = utils.choose_new_uid(potential_names, used_names)
else:
# turning this off since it's slow, and has been here a while without getting tripped (and I'm pretty sure the tns checks, anyway)
# if tns.has_taxon_label(current_label):
# raise Exception('duplicate node label \'%s\'' % current_label)
new_label = current_label
# turning this off since it's slow, and has been here a while without getting tripped (and I'm pretty sure the tns checks, anyway)
# if tns.has_taxon_label(new_label):
# raise Exception('failed labeling internal nodes (chose name \'%s\' that was already in the taxon namespace)' % new_label)
node.taxon = dendropy.Taxon(new_label)
tns.add_taxon(node.taxon)
relabeled_dbg += ['%s' % new_label]
if debug:
print ' skipped (already labeled): %s' % ' '.join(sorted(skipped_dbg))
print ' (re-)labeled: %s' % ' '.join(sorted(relabeled_dbg))
# print ' after:'
# print utils.pad_lines(get_ascii_tree(dendro_tree))
# ----------------------------------------------------------------------------------------
def translate_labels(dendro_tree, translation_pairs, dbgstr='', dont_fail=False, debug=False):
if debug:
print ' translating %stree:' % ('' if dbgstr=='' else dbgstr+' ')
print get_ascii_tree(dendro_tree=dendro_tree, extra_str=' ')
for old_label, new_label in translation_pairs:
taxon = dendro_tree.taxon_namespace.get_taxon(old_label)
if taxon is None:
prstr = 'requested taxon with old name \'%s\' not present in tree (present: %s)' % (old_label, ' '.join(t.label for t in dendro_tree.taxon_namespace))
if dont_fail:
print prstr
continue
else:
raise Exception(prstr)
taxon.label = new_label
if debug:
print ' %20s --> %s' % (old_label, new_label)
if debug:
print get_ascii_tree(dendro_tree=dendro_tree, extra_str=' ')
# ----------------------------------------------------------------------------------------
def get_mean_leaf_height(tree=None, treestr=None):
assert tree is None or treestr is None
if tree is None:
tree = get_dendro_tree(treestr=treestr, schema='newick')
heights = get_leaf_depths(tree).values()
return sum(heights) / len(heights)
# ----------------------------------------------------------------------------------------
def get_ascii_tree(dendro_tree=None, treestr=None, treefname=None, extra_str='', width=200, schema='newick', label_fcn=None):
"""
AsciiTreePlot docs (don't show up in as_ascii_plot()):
plot_metric : str
A string which specifies how branches should be scaled, one of:
'age' (distance from tips), 'depth' (distance from root),
'level' (number of branches from root) or 'length' (edge
length/weights).
show_internal_node_labels : bool
Whether or not to write out internal node labels.
leaf_spacing_factor : int
Positive integer: number of rows between each leaf.
width : int
Force a particular display width, in terms of number of columns.
node_label_compose_fn : function object
A function that takes a Node object as an argument and returns
the string to be used to display it.
"""
if dendro_tree is None:
assert treestr is None or treefname is None
if treestr is None:
treestr = get_treestr_from_file(treefname)
dendro_tree = get_dendro_tree(treestr=treestr, schema=schema)
if get_mean_leaf_height(dendro_tree) == 0.: # we really want the max height, but since we only care whether it's zero or not this is the same
return '%szero height' % extra_str
# elif: get_n_nodes(dendro_tree) > 1: # not sure if I really need this if any more (it used to be for one-leaf trees (and then for one-node trees), but the following code (that used to be indented) seems to be working fine on one-leaf, one-node, and lots-of-node trees a.t.m.)
start_char, end_char = '', ''
def compose_fcn(x):
if x.taxon is not None: # if there's a taxon defined, use its label
lb = x.taxon.label
elif x.label is not None: # use node label
lb = x.label
else:
lb = 'o'
if label_fcn is not None:
lb = label_fcn(lb)
return '%s%s%s' % (start_char, lb, end_char)
dendro_str = dendro_tree.as_ascii_plot(width=width, plot_metric='length', show_internal_node_labels=True, node_label_compose_fn=compose_fcn)
special_chars = [c for c in reversed(string.punctuation) if c not in set(dendro_str)] # find some special characters that we can use to identify the start and end of each label (could also use non-printable special characters, but it shouldn't be necessary)
if len(special_chars) >= 2: # can't color them directly, since dendropy counts the color characters as printable
start_char, end_char = special_chars[:2] # NOTE the colors get screwed up when dendropy overlaps labels (or sometimes just straight up strips stuff), which it does when it runs out of space
dendro_str = dendro_tree.as_ascii_plot(width=width, plot_metric='length', show_internal_node_labels=True, node_label_compose_fn=compose_fcn) # call again after modiying compose fcn (kind of wasteful to call it twice, but it shouldn't make a difference)
dendro_str = dendro_str.replace(start_char, utils.Colors['blue']).replace(end_char, utils.Colors['end'] + ' ')
else:
print ' %s can\'t color tree, no available special characters in get_ascii_tree()' % utils.color('red', 'note:')
if get_n_nodes(dendro_tree) == 1:
extra_str += ' (one node)'
return_lines = [('%s%s' % (extra_str, line)) for line in dendro_str.split('\n')]
return '\n'.join(return_lines)
# ----------------------------------------------------------------------------------------
def rescale_tree(new_mean_height, dtree=None, treestr=None, debug=False):
# NOTE if you pass in <dtree>, it gets modified, but if you pass in <treestr> you get back a new dtree (which is kind of a dumb way to set this up, but I don't want to change it now. Although I guess it returns None if you pass <dtree>, so you shouldn't get in too much trouble)
# TODO (maybe) switch calls of this to dendro's scale_edges() (but note you'd then have to get the mean depth beforehand, since that just multiplies by factor, whereas this rescales to get a particular new height)
""" rescale the branch lengths in dtree/treestr by a factor such that the new mean height is <new_mean_height> """
if dtree is None:
dtree = get_dendro_tree(treestr=treestr, suppress_internal_node_taxa=True)
mean_height = get_mean_leaf_height(tree=dtree)
if debug:
print ' current mean: %.4f target height: %.4f' % (mean_height, new_mean_height)
for edge in dtree.postorder_edge_iter():
if edge.head_node is dtree.seed_node: # why tf does the root node have an edge where it's the child?
continue
if debug:
print ' %5s %7e --> %7e' % (edge.head_node.taxon.label if edge.head_node.taxon is not None else 'None', edge.length, edge.length * new_mean_height / mean_height)
if mean_height != 0: # ok should really probably just return without doing anything if every leaf height is zero, but oh well for now
edge.length *= new_mean_height / mean_height # rescale every branch length in the tree by the ratio of desired to existing height (everybody's heights should be the same... but they never quite were when I was using Bio.Phylo, so, uh. yeah, uh. not sure what to do, but this is fine. It's checked below, anyway)
if not treestr: # i'm really pretty sure there's no point in doing this if we're just going to immediately convert to string (and it just caused huge fucking problems because it was missing the suppress unifurcations arg. I'm so *!$@(($@ing tired of that shit this is like the fourth time I've wasted hours chasing down weirdness that stems from that)
dtree.update_bipartitions(suppress_unifurcations=False) # probably doesn't really need to be done
if debug:
print ' final mean: %.4f' % get_mean_leaf_height(tree=dtree)
if treestr:
return dtree.as_string(schema='newick').strip()
# ----------------------------------------------------------------------------------------
def get_tree_difference_metrics(region, in_treestr, leafseqs, naive_seq):
taxon_namespace = dendropy.TaxonNamespace() # in order to compare two trees with the metrics below, the trees have to have the same taxon namespace
in_dtree = get_dendro_tree(treestr=in_treestr, taxon_namespace=taxon_namespace, suppress_internal_node_taxa=True)
seqfos = [{'name' : 't%d' % (iseq + 1), 'seq' : seq} for iseq, seq in enumerate(leafseqs)]
out_dtree = get_fasttree_tree(seqfos, naive_seq=naive_seq, taxon_namespace=taxon_namespace, suppress_internal_node_taxa=True)
in_height = get_mean_leaf_height(tree=in_dtree)
out_height = get_mean_leaf_height(tree=out_dtree)
base_width = 100
in_ascii_str = get_ascii_tree(dendro_tree=in_dtree, extra_str=' ', width=base_width) # make copies before the following functions mess the trees up
out_ascii_str = get_ascii_tree(dendro_tree=out_dtree, extra_str=' ', width=int(base_width*out_height/in_height))
print ' comparing input and bppseqgen output trees:'
print ' heights: %.3f %.3f' % (in_height, out_height)
print ' symmetric difference: %d' % dendropy.calculate.treecompare.symmetric_difference(in_dtree, out_dtree) # WARNING these functions modify the tree (i think by removing unifurcations) becuase OF COURSE THEY DO, wtf
print ' euclidean distance: %f' % dendropy.calculate.treecompare.euclidean_distance(in_dtree, out_dtree)
print ' r-f distance: %f' % dendropy.calculate.treecompare.robinson_foulds_distance(in_dtree, out_dtree)
print ' %s' % utils.color('blue', 'input:')
print in_ascii_str
print ' %s' % utils.color('blue', 'output:')
print out_ascii_str
# ----------------------------------------------------------------------------------------
# loops over uids in <hline> and <lline> (which, in order, must correspond to each other), chooses a new joint uid and applies it to both h and l trees, then checks to make sure the trees are identical
def merge_heavy_light_trees(hline, lline, use_identical_uids=False, check_trees=True, debug=False):
def ladd(uid, locus):
return '%s-%s' % (uid, locus)
def lrm(uid, locus):
assert '-' in uid and uid.split('-')[-1] == locus
return uid.replace('-%s' % locus, '')
if debug:
print ' before:'
print ' heavy:'
print utils.pad_lines(get_ascii_tree(treestr=hline['tree']))
print ' light:'
print utils.pad_lines(get_ascii_tree(treestr=lline['tree']))
if 'heavy-chain-correlation-info' in lline: # if doing paired h/l correlations, we need to make sure we're pairing togethether the same events here that were used to determine the correlations (they got out of sync before because things got out of order when writing/reading events from subprocesses)
assert hline['unique_ids'] == lline['heavy-chain-correlation-info']['heavy-chain-uids']
assert len(hline['unique_ids']) == len(lline['unique_ids'])
lpair = [hline, lline]
joint_reco_id = utils.uidhashstr(hline['reco_id'] + lline['reco_id'])
for ltmp in lpair:
ltmp['reco_id'] = joint_reco_id
ltmp['paired-uids'] = []
dtrees = [get_dendro_tree(treestr=l['tree']) for l in lpair]
for iuid, (huid, luid) in enumerate(zip(hline['unique_ids'], lline['unique_ids'])):
joint_uid = utils.uidhashstr(huid + luid)
for ltmp in lpair:
ltmp['unique_ids'][iuid] = joint_uid
if not use_identical_uids:
ltmp['unique_ids'][iuid] = ladd(ltmp['unique_ids'][iuid], ltmp['loci'][iuid])
for l1, l2 in zip(lpair, reversed(lpair)):
l1['paired-uids'].append([l2['unique_ids'][iuid]])
for dt, uid, ltmp in zip(dtrees, [huid, luid], lpair): # NOTE huid and luid here are the *old* ones
dt.find_node_with_taxon_label(uid).taxon = dendropy.Taxon(ltmp['unique_ids'][iuid]) # don't need to update the taxon namespace since we don't use it afterward
hline['tree'], lline['tree'] = [as_str(dt) for dt in dtrees] # have to make a separate tree to actually put in the <line>s, since the symmetric difference function screws up the tree
if check_trees:
if not use_identical_uids: # reset back to the plain <joint_uid> so we can compare
for dt, ltmp in zip(dtrees, lpair):
for uid, locus in zip(ltmp['unique_ids'], ltmp['loci']): # yes, they all have the same locus, but see note in utils
dt.find_node_with_taxon_label(uid).taxon = dendropy.Taxon(lrm(uid, locus)) # don't need to update the taxon namespace since we don't use it afterward
tns = dendropy.TaxonNamespace()
dtrees = [cycle_through_ascii_conversion(dtree=dt, taxon_namespace=tns) for dt in dtrees] # have to recreate from str before calculating symmetric difference to avoid the taxon namespace being screwed up (I tried a bunch to avoid this, I don't know what it's changing, the tns looks fine, but something's wrong)
sym_diff = dendropy.calculate.treecompare.symmetric_difference(*dtrees) # WARNING this function modifies the tree (i think by removing unifurcations) becuase OF COURSE THEY DO, wtf
if sym_diff != 0: # i guess in principle we could turn this off after we've run a fair bit, but it seems really dangerous, since if the heavy and light trees get out of sync the whole simulation is ruined
raise Exception('trees differ (symmetric difference %d) for heavy and light chains' % sym_diff)
if debug:
print ' after:'
print ' symmetric difference: %d' % sym_diff
print ' heavy:'
print utils.pad_lines(get_ascii_tree(treestr=hline['tree']))
print ' light:'
print utils.pad_lines(get_ascii_tree(treestr=lline['tree']))
# ----------------------------------------------------------------------------------------
def collapse_zero_length_leaves(dtree, sequence_uids, debug=False): # <sequence_uids> is uids for which we have actual sequences (i.e. not internal nodes inferred by the tree program without sequences)
if debug > 1:
print ' merging trivially-dangling leaves into parent internal nodes'
print ' distance leaf parent'
removed_nodes = []
for leaf in list(dtree.leaf_node_iter()): # subsume super short/zero length leaves into their parent internal nodes
recursed = False
while leaf.edge_length is not None and leaf.edge_length < 1./(2*typical_bcr_seq_len): # if distance corresponds to less than one mutation, it's probably (always?) just fasttree dangling an internal node as a leaf
if leaf.parent_node is None: # why tf can i get the root node here?
break
if leaf.parent_node.taxon is not None and leaf.parent_node.taxon.label in sequence_uids: # only want to do it if the parent node is a (spurious) internal node added by fasttree (this parent's taxon will be None if suppress_internal_node_taxa was set)
break
if debug > 1:
print ' %8.5f %-20s %-20s' % (leaf.edge_length, ' " ' if recursed else leaf.taxon.label, 'none' if leaf.parent_node.taxon is None else leaf.parent_node.taxon.label)
parent_node = leaf.parent_node
removed_nodes.append(parent_node.taxon.label if parent_node.taxon is not None else None)
collapse_nodes(dtree, leaf.taxon.label, None, keep_name_node=leaf, remove_name_node=leaf.parent_node)
leaf = parent_node
recursed = True
dtree.update_bipartitions(suppress_unifurcations=False)
dtree.purge_taxon_namespace()
if debug:
print ' merged %d trivially-dangling leaves into parent internal nodes: %s' % (len(removed_nodes), ' '.join(str(n) for n in removed_nodes))
# print get_ascii_tree(dendro_tree=dtree, extra_str=' ', width=350)
# print dtree.as_string(schema='newick').strip()
# ----------------------------------------------------------------------------------------
def get_fasttree_tree(seqfos, naive_seq=None, naive_seq_name='XnaiveX', taxon_namespace=None, suppress_internal_node_taxa=False, debug=False):
if debug:
print ' running FastTree on %d sequences plus a naive' % len(seqfos)
uid_list = [sfo['name'] for sfo in seqfos]
if any(uid_list.count(u) > 1 for u in uid_list):
raise Exception('duplicate uid(s) in seqfos for FastTree, which\'ll make it crash: %s' % ' '.join(u for u in uid_list if uid_list.count(u) > 1))
with tempfile.NamedTemporaryFile() as tmpfile:
if naive_seq is not None:
tmpfile.write('>%s\n%s\n' % (naive_seq_name, naive_seq))
for sfo in seqfos:
tmpfile.write('>%s\n%s\n' % (sfo['name'], sfo['seq'])) # NOTE the order of the leaves/names is checked when reading bppseqgen output
tmpfile.flush() # BEWARE if you forget this you are fucked
with open(os.devnull, 'w') as fnull:
treestr = subprocess.check_output('./bin/FastTree -gtr -nt ' + tmpfile.name, shell=True, stderr=fnull)
if debug:
print ' converting FastTree newick string to dendro tree'
dtree = get_dendro_tree(treestr=treestr, taxon_namespace=taxon_namespace, ignore_existing_internal_node_labels=not suppress_internal_node_taxa, suppress_internal_node_taxa=suppress_internal_node_taxa, debug=debug)
naive_node = dtree.find_node_with_taxon_label(naive_seq_name)
if naive_node is not None:
dtree.reroot_at_node(naive_node, suppress_unifurcations=False, update_bipartitions=True)
if not suppress_internal_node_taxa: # if we *are* suppressing internal node taxa, we're probably calling this from clusterpath, in which case we need to mess with the internal nodes in a way that assumes they can be ignored (so we collapse zero length leaves afterwards)
collapse_zero_length_leaves(dtree, uid_list + [naive_seq_name], debug=debug)
return dtree
# ----------------------------------------------------------------------------------------
def node_mtpy(multifo, node): # number of reads/contigs/whatever (depending on context) with the same sequence
if multifo is None or node.taxon.label not in multifo or multifo[node.taxon.label] is None: # most all of them should be in there, but for instance I'm not adding the dummy branch nodes
return 1
return multifo[node.taxon.label]
# ----------------------------------------------------------------------------------------
# copied from https://github.com/nextstrain/augur/blob/master/base/scores.py
# also see explanation here https://photos.app.goo.gl/gtjQziD8BLATQivR6
def set_lb_values(dtree, tau, only_calc_metric=None, dont_normalize=False, multifo=None, use_old_multiplicity_method=False, debug=False):
"""
traverses <dtree> in postorder and preorder to calculate the up and downstream tree length exponentially weighted by distance, then adds them as LBI (and divides as LBR)
use_old_multiplicity_method: insert multiplicity into integrals (below), which is equivalent to adding N-1 branches between the node and its parent
new version: add N-1 dummy branches of length <default_lb_tau> from the node
"""
getmulti = node_mtpy if use_old_multiplicity_method else lambda x, y: 1
metrics_to_calc = lb_metrics.keys() if only_calc_metric is None else [only_calc_metric]
if debug:
print ' setting %s values with tau %.4f' % (' and '.join(metrics_to_calc), tau)
initial_labels = set([n.taxon.label for n in dtree.preorder_node_iter()])
dtree, dummy_labels = get_tree_with_dummy_branches(dtree, tau, add_dummy_multiplicity_nubs=not use_old_multiplicity_method, multifo=multifo) # this returns a new dtree, but the old tree is a subtree of the new one (or at least its collection of nodes are), and these nodes get modified by the process (hence the reversal fcn below)
# calculate clock length (i.e. for each node, the distance to that node's parent)
for node in dtree.postorder_node_iter(): # postorder vs preorder doesn't matter, but I have to choose one
if node.parent_node is None: # root node
node.clock_length = 0.
for child in node.child_node_iter():
child.clock_length = child.distance_from_root() - node.distance_from_root()
# lbi is the sum of <node.down_polarizer> (downward message from <node>'s parent) and its children's up_polarizers (upward messages)
# traverse the tree in postorder (children first) to calculate message to parents (i.e. node.up_polarizer)
for node in dtree.postorder_node_iter():
node.down_polarizer = 0 # used for <node>'s lbi (this probabably shouldn't be initialized here, since it gets reset in the next loop [at least I think they all do])
node.up_polarizer = 0 # used for <node>'s parent's lbi (but not <node>'s lbi)
for child in node.child_node_iter():
node.up_polarizer += child.up_polarizer
bl = node.clock_length / tau
node.up_polarizer *= numpy.exp(-bl) # sum of child <up_polarizer>s weighted by an exponential decayed by the distance to <node>'s parent
node.up_polarizer += getmulti(multifo, node) * tau * (1 - numpy.exp(-bl)) # add the actual contribution (to <node>'s parent's lbi) of <node>: zero if the two are very close, increasing toward asymptote of <tau> for distances near 1/tau (integral from 0 to l of decaying exponential)
# traverse the tree in preorder (parents first) to calculate message to children (i.e. child1.down_polarizer)
for node in dtree.preorder_internal_node_iter():
for child1 in node.child_node_iter(): # calculate down_polarizer for each of <node>'s children
child1.down_polarizer = node.down_polarizer # first sum <node>'s down_polarizer...
for child2 in node.child_node_iter(): # and the *up* polarizers of any other children of <node>
if child1 != child2:
child1.down_polarizer += child2.up_polarizer # add the contribution of <child2> to its parent's (<node>'s) lbi (i.e. <child2>'s contribution to the lbi of its *siblings*)
bl = child1.clock_length / tau
child1.down_polarizer *= numpy.exp(-bl) # and decay the previous sum by distance between <child1> and its parent (<node>)
child1.down_polarizer += getmulti(multifo, child1) * tau * (1 - numpy.exp(-bl)) # add contribution of <child1> to its own lbi: zero if it's very close to <node>, increasing to max of <tau> (integral from 0 to l of decaying exponential)
returnfo = {m : {} for m in metrics_to_calc}
# go over all nodes and calculate lb metrics (can be done in any order)
for node in dtree.postorder_node_iter():
vals = {'lbi' : node.down_polarizer, 'lbr' : 0.}
for child in node.child_node_iter():
vals['lbi'] += child.up_polarizer
vals['lbr'] += child.up_polarizer
if node.down_polarizer > 0.:
vals['lbr'] /= node.down_polarizer # it might make more sense to not include the branch between <node> and its parent in either the numerator or denominator (here it's included in the denominator), but this way I don't have to change any of the calculations above
if dummy_str in node.taxon.label:
continue
if node is dtree.seed_node or node.parent_node is dtree.seed_node: # second clause is only because of dummy root addition (well, and if we are adding dummy root the first clause doesn't do anything)
vals['lbr'] = 0.
for metric in metrics_to_calc:
returnfo[metric][node.taxon.label] = float(vals[metric]) if dont_normalize else normalize_lb_val(metric, float(vals[metric]), tau)
if debug:
max_width = str(max([len(n.taxon.label) for n in dtree.postorder_node_iter()]))
print (' %'+max_width+'s %s%s multi') % ('node', ''.join(' %s' % m for m in metrics_to_calc), 16*' ' if 'lbr' in metrics_to_calc else '')
for node in dtree.preorder_node_iter():
if dummy_str in node.taxon.label:
continue
multi_str = ''
if multifo is not None:
multi_str = str(node_mtpy(multifo, node))
if node_mtpy(multifo, node) > 1:
multi_str = utils.color('blue', multi_str, width=3)
lbstrs = ['%8.3f' % returnfo[m][node.taxon.label] for m in metrics_to_calc]
if 'lbr' in metrics_to_calc:
lbstrs += [' = %-5.3f / %-5.3f' % (returnfo['lbr'][node.taxon.label] * node.down_polarizer, node.down_polarizer)]
print (' %' + max_width + 's %s %3s') % (node.taxon.label, ''.join(lbstrs), multi_str)
# this is maybe time consuming, but I want to leave the tree that was passed in as unmodified as I can (especially since I have to run this fcn twice for lbi/lbr since they need different tau values)
for node in dtree.postorder_node_iter():
delattr(node, 'clock_length')
delattr(node, 'up_polarizer')
delattr(node, 'down_polarizer')
remove_dummy_branches(dtree, initial_labels, dummy_labels)
return returnfo
# ----------------------------------------------------------------------------------------
def get_tree_with_dummy_branches(old_dtree, tau, n_tau_lengths=10, add_dummy_leaves=False, add_dummy_multiplicity_nubs=False, multifo=None, debug=False): # add long branches above root and/or below each leaf, since otherwise we're assuming that (e.g.) leaf node fitness is zero
# commenting this since I'm pretty sure I've fixed it, but not removing it since if a similar problem surfaces with dummy branch addition, deep copying is an easy way out
# zero_length_edges = [e for e in old_dtree.preorder_edge_iter() if e.length == 0 and not e.head_node.is_leaf()]
# if len(zero_length_edges) > 0: # rerooting to remove dummy branches screws up the tree in some cases with zero length branches (see comment in that fcn)
# old_dtree = copy.deepcopy(old_dtree) # could maybe do this by default, but it'll probably be really slow on large trees (at least iterating through the trees is; although I suppose maybe deepcopy is smater than that)
# print ' %s found %d zero length branches in tree, so deep copying before adding dummy branches (this is probably ok ish, but in general it\'s a bad idea to have zero length branches in your trees): %s' % (utils.color('yellow', 'warning'), len(zero_length_edges), ' '.join([e.head_node.taxon.label for e in zero_length_edges]))
dummy_edge_length = n_tau_lengths * tau
dummy_labels = []
new_root_taxon = dendropy.Taxon(dummy_str + '-root')
old_dtree.taxon_namespace.add_taxon(new_root_taxon)
new_root_node = dendropy.Node(taxon=new_root_taxon)
new_dtree = dendropy.Tree(seed_node=new_root_node, taxon_namespace=old_dtree.taxon_namespace, is_rooted=True)
dummy_labels.append(new_root_node.taxon.label)
# then add the entire old tree under this new tree
new_root_node.add_child(old_dtree.seed_node)
for edge in new_root_node.child_edge_iter():
edge.length = dummy_edge_length
if add_dummy_leaves: # add dummy child branches to each leaf
tns = new_dtree.taxon_namespace
for lnode in new_dtree.leaf_node_iter():
new_label = '%s-%s' % (dummy_str, lnode.taxon.label)
tns.add_taxon(dendropy.Taxon(new_label))
new_child_node = lnode.new_child(taxon=tns.get_taxon(new_label), edge_length=dummy_edge_length)
dummy_labels.append(new_child_node.taxon.label)
if add_dummy_multiplicity_nubs: # new way of incorporating multiplicity: add N-1 dummy branches from the node
tns = new_dtree.taxon_namespace
for mnode in list(new_dtree.preorder_node_iter()): # list() is because we're adding nodes as we iterate
for idum in range(1, node_mtpy(multifo, mnode)):
new_label = '%s-multi-%d-%s' % (dummy_str, idum, mnode.taxon.label)
tns.add_taxon(dendropy.Taxon(new_label))
new_child_node = mnode.new_child(taxon=tns.get_taxon(new_label), edge_length=default_lb_tau)
dummy_labels.append(new_child_node.taxon.label)
# TODO commenting this because it gets triggered way too much, but I'm not actually sure that I can really just ignore the problem (but maybe I can)
# zero_len_edge_nodes = [e.head_node for n in new_dtree.preorder_node_iter() for e in n.child_edge_iter() if e.length == 0 and not e.head_node.is_leaf()] # zero len edges above leaves are fine, since leaves don't count for lbr
# if len(zero_len_edge_nodes) > 0:
# print ' %s found %d zero length internal edges in tree, which means lb ratio may mis-categorize branches: %s' % (utils.color('red', 'warning'), len(zero_len_edge_nodes), ' '.join([n.taxon.label for n in zero_len_edge_nodes]))
# # for node in zero_len_edge_nodes: # we don't really want to modify the tree this drastically here (and a.t.m. this causes a crash later on), but I'm leaving it as a placeholder for how to remove zero length edges
# # collapse_nodes(new_dtree, node.taxon.label, node.parent_node.taxon.label) # keep the child, since it can be a leaf
# # print utils.pad_lines(get_ascii_tree(dendro_tree=new_dtree))
new_dtree.update_bipartitions(suppress_unifurcations=False) # not sure if I need this? (suppress_unifurcations is because otherwise it removes the branch between the old and new root nodes)
if debug:
print ' added dummy branches to tree:'
print get_ascii_tree(dendro_tree=new_dtree, extra_str=' ', width=350)
return new_dtree, dummy_labels
# ----------------------------------------------------------------------------------------
def remove_dummy_branches(dtree, initial_labels, dummy_labels, add_dummy_leaves=False, debug=False):
# if add_dummy_leaves:
# print 'UPDATE ok maybe it\'s fine now (since i\'m adding the dummy nubs), but i\'m not checking it'
# raise Exception('not implemented (shouldn\'t be too hard, but a.t.m. I don\'t think I\'ll need it)')
if len(dtree.seed_node.child_nodes()) != 1:
print ' %s root node has more than one child when removing dummy branches: %s' % (utils.color('yellow', 'warning'), ' '.join([n.taxon.label for n in dtree.seed_node.child_nodes()]))
new_root_node = dtree.seed_node.child_nodes()[0]
if debug:
print ' rerooting at %s' % new_root_node.taxon.label
print ' current children: %s' % ' '.join([n.taxon.label for n in new_root_node.child_node_iter()])
# NOTE if the new root has a child separated by a zero-length edge, this reroot call for some reason deletes that child from the tree (both with and without suppress_unifurcations set). After messing around a bunch to try to fix it, the message I'm taking is just that zero length branches (and unifurcations) are a bad idea and I should just forbid them
# UPDATE I think I was just missing the suppress_unifurcations=False in update_bipartitions(), but leaving these comments here in case there was another problem
# UPDATE actually the reroot still seems to eat a node sometimes if the tree is unrooted (so adding the extra reroot above)
# UPDATE this is more or less expectd, from dendropy's perspective; see https://github.com/jeetsukumaran/DendroPy/issues/118
assert dtree.is_rooted # make sure it's rooted, to avoid unifurcations getting suppressed (even with the arg set to false)
dtree.reroot_at_node(new_root_node, suppress_unifurcations=False) # reroot at old root node
if debug:
print ' children after reroot: %s' % ' '.join([n.taxon.label for n in new_root_node.child_node_iter()])
dtree.prune_taxa_with_labels(dummy_labels, suppress_unifurcations=False)
dtree.purge_taxon_namespace() # I'm sure there's a good reason the previous line doesn't do this
dtree.update_bipartitions(suppress_unifurcations=False)
if debug:
print ' children after purge: %s' % ' '.join([n.taxon.label for n in new_root_node.child_node_iter()])
final_labels = set([n.taxon.label for n in dtree.preorder_node_iter()])
if initial_labels != final_labels: # this was only happening with a zero-length node hanging off root (see above), which probably won't happen any more since I'm now removing zero length (non-leaf) branches in bcr-phylo simulator.py
print ' %s nodes after dummy branch addition and removal not the same as before:' % utils.color('red', 'error')
print ' missing: %s' % ' '.join(initial_labels - final_labels)
print ' extra: %s' % ' '.join(final_labels - initial_labels)
print ' tree:'
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400))
assert False # i think it's better to crash at this point, i think i have it working reliably
# ----------------------------------------------------------------------------------------
def get_aa_tree(dtree, annotation, extra_str=None, debug=False):
very_different_frac = 0.5
if debug:
print ' converting nuc tree (mean depth %.3f) to aa' % get_mean_leaf_height(dtree)
if debug > 1:
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400))
changes = {}
aa_dtree = copy.deepcopy(dtree)
nuc_seqs = {uid : seq for uid, seq in zip(annotation['unique_ids'], annotation['seqs'])}
aa_seqs = {uid : seq for uid, seq in zip(annotation['unique_ids'], annotation['seqs_aa'])}
skipped_edges = []
if debug > 1:
print ' N mutations branch length'
print ' nuc aa nuc aa child node'
for edge in aa_dtree.preorder_edge_iter():
if edge.tail_node is None: # edge above root (no, i don't know why root has an edge above it, but that's how it is)
continue
cnode = edge.head_node # child of this edge
clabel, plabel = cnode.taxon.label, cnode.parent_node.taxon.label # turns out there's also a .tail_node attribute of the edge that isn't listed properly in the docs
if clabel not in aa_seqs or plabel not in aa_seqs: # if either of the seqs are missing, leave the existing (presumably nucleotide-based) branch length unchanged
skipped_edges.append(edge)
continue
nuc_branch_length = edge.length # nucleotide distance from parent node (only used for debug, but we have to grab it before we change the edge length)
aa_mut_frac, aa_n_muts = utils.hamming_fraction(aa_seqs[plabel], aa_seqs[clabel], amino_acid=True, also_return_distance=True)
edge.length = aa_mut_frac
if debug:
nuc_mut_frac, nuc_n_muts = utils.hamming_fraction(nuc_seqs[plabel], nuc_seqs[clabel], also_return_distance=True)
if nuc_mut_frac > 0 and abs(nuc_branch_length - nuc_mut_frac) / nuc_mut_frac > very_different_frac:
print ' %s nuc branch length %.4f and mut frac %.4f very different for branch between %s --> %s' % (utils.color('red', 'warning'), nuc_branch_length, nuc_mut_frac, clabel, plabel)
changes[edge] = (nuc_n_muts, aa_n_muts)
if debug > 1:
print ' %3d %3d %.3f %.3f %s' % (nuc_n_muts, aa_n_muts, nuc_branch_length, aa_mut_frac, clabel)
aa_dtree.update_bipartitions(suppress_unifurcations=False)
if len(skipped_edges) > 0:
print ' %s get_aa_tree()%s: skipped %d/%d edges for which we didn\'t have sequences for both nodes (i.e. left the original branch length unmodified)' % (utils.color('yellow', 'warning'), '' if extra_str is None else ' %s'%extra_str, len(skipped_edges), len(list(aa_dtree.preorder_edge_iter())))
if debug:
assert len(changes) + len(skipped_edges) + 1 == len(list(aa_dtree.preorder_edge_iter())) # +1 is for root edge
print ' rescaled %d/%d edges' % (len(changes), len(list(aa_dtree.preorder_edge_iter())))
print ' aa tree mean depth: %.3f' % get_mean_leaf_height(aa_dtree)
n_to_print = 10
print ' child nodes with %d largest differences between N nuc and N aa changes' % n_to_print
print ' nuc aa parent node child node'
for edge in sorted(changes, key=lambda k: changes[k][1] - changes[k][0])[:n_to_print]:
nuc_n_muts, aa_n_muts = changes[edge]
print ' %3d %3d %-15s %s' % (nuc_n_muts, aa_n_muts, edge.tail_node.taxon.label, edge.head_node.taxon.label)
if debug > 1:
print utils.pad_lines(get_ascii_tree(dendro_tree=aa_dtree, width=400))
return aa_dtree
# ----------------------------------------------------------------------------------------
# check whether 1) node depth and 2) node pairwise distances are super different when calculated with tree vs sequences (not really sure why it's so different sometimes, best guess is fasttree sucks, partly because it doesn't put the root node anywhere near the root of the tree)
def compare_tree_distance_to_shm(dtree, annotation, max_frac_diff=0.5, min_warn_frac=0.25, extra_str=None, debug=False):
common_nodes = [n for n in dtree.preorder_node_iter() if n.taxon.label in annotation['unique_ids']]
tdepths, mfreqs, fracs = {}, {}, {}
for node in common_nodes:
tdepth = node.distance_from_root()
mfreq = utils.per_seq_val(annotation, 'mut_freqs', node.taxon.label)
frac_diff = abs(tdepth - mfreq) / tdepth if tdepth > 0 else 0
if frac_diff > max_frac_diff:
key = node.taxon.label
tdepths[key] = tdepth
mfreqs[key] = mfreq
fracs[key] = frac_diff
if debug or len(fracs) > 0:
warnstr = utils.color('yellow', 'warning ') if len(fracs) / float(len(common_nodes)) > min_warn_frac else ''
if debug or warnstr != '':
print ' %stree depth and mfreq differ by more than %.0f%% for %d/%d nodes%s' % (warnstr, 100*max_frac_diff, len(fracs), len(common_nodes), '' if extra_str is None else ' for %s' % extra_str)
if debug and len(fracs) > 0:
print ' tree depth mfreq frac diff'
for key, frac in sorted(fracs.items(), key=operator.itemgetter(1), reverse=True):
print ' %.4f %.4f %.4f %s' % (tdepths[key], mfreqs[key], frac, key)
dmatrix = dtree.phylogenetic_distance_matrix()
dmx_taxa = set(dmatrix.taxon_iter()) # phylogenetic_distance_matrix() seems to only return values for leaves, which maybe I'm supposed to expect?
tdists, mdists, fracs = {}, {}, {} # NOTE reusing these names is kind of dangerous
for n1, n2 in itertools.combinations([n for n in common_nodes if n.taxon in dmx_taxa], 2):
tdist = dmatrix.distance(n1.taxon, n2.taxon)
mdist = utils.hamming_fraction(utils.per_seq_val(annotation, 'seqs', n1.taxon.label), utils.per_seq_val(annotation, 'seqs', n2.taxon.label))
frac_diff = abs(tdist - mdist) / tdist if tdist > 0 else 0
if frac_diff > max_frac_diff:
key = (n1.taxon.label, n2.taxon.label)
tdists[key] = tdist
mdists[key] = mdist
fracs[key] = frac_diff
if debug or len(fracs) > 0:
warnstr = utils.color('yellow', 'warning ') if len(fracs) / float(len(common_nodes)) > min_warn_frac else ''
if debug or warnstr != '':
print ' %spairwise distance from tree and sequence differ by more than %.f%% for %d/%d node pairs%s' % (warnstr, 100*max_frac_diff, len(fracs), 0.5 * len(common_nodes) * (len(common_nodes)-1), '' if extra_str is None else ' for %s' % extra_str)
if debug and len(fracs) > 0:
print ' pairwise'
print ' tree dist seq dist frac diff'
for key, frac_diff in sorted(fracs.items(), key=operator.itemgetter(1), reverse=True):
print ' %.4f %.4f %.4f %s %s' % (tdists[key], mdists[key], frac_diff, key[0], key[1])
if debug:
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400))
utils.print_reco_event(annotation)
# ----------------------------------------------------------------------------------------
def calculate_lb_values(dtree, tau, lbr_tau_factor=None, only_calc_metric=None, dont_normalize=False, annotation=None, extra_str=None, iclust=None, dbgstr='', debug=False):
# if <only_calc_metric> is None, we use <tau> and <lbr_tau_factor> to calculate both lbi and lbr (i.e. with different tau)
# - whereas if <only_calc_metric> is set, we use <tau> to calculate only the given metric
# note that it's a little weird to do all this tree manipulation here, but then do the dummy branch tree manipulation in set_lb_values(), but the dummy branch stuff depends on tau so it's better this way
# <iclust> is just to give a little more granularity in dbg
# TODO this is too slow (although it would be easy to have an option for it to only spot check a random subset of nodes)
# if annotation is not None: # check that the observed shm rate and tree depth are similar (we're still worried that they're different if we don't have the annotation, but we have no way to check it)
# compare_tree_distance_to_shm(dtree, annotation, extra_str=extra_str)
if max(get_leaf_depths(dtree).values()) > 1:
if annotation is None:
raise Exception('tree needs rescaling in lb calculation (metrics will be wrong): found leaf depth greater than 1 (even when less than 1 they can be wrong, but we can be fairly certain that your BCR sequences don\'t have real mutation frequencty greater than 1, so this case we can actually check). If you pass in annotations we can rescale to the observed mutation frequencty.')
print ' %s leaf depths greater than 1, so rescaling by sequence length' % utils.color('yellow', 'warning')
dtree.scale_edges(1. / numpy.mean([len(s) for s in annotation['seqs']])) # using treeutils.rescale_tree() breaks, it seems because the update_bipartitions() call removes nodes near root on unrooted trees
if debug:
print ' calculating %s%s with tree:' % (' and '.join(lb_metrics if only_calc_metric is None else [only_calc_metric]), '' if extra_str is None else ' for %s' % extra_str)
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=400))
multifo = None
if annotation is not None:
multifo = {} # NOTE now that I'm always doing this, it might make sense to rearrange things a bit, but i don't want to look at it right now
for node in dtree.postorder_node_iter():
multifo[node.taxon.label] = utils.get_multiplicity(annotation, uid=node.taxon.label) if node.taxon.label in annotation['unique_ids'] else 1 # if it's not in there, it could be from wonky names from lonr.r, also could be from FastTree tree where we don't get inferred intermediate sequences
treestr = dtree.as_string(schema='newick') # get this before the dummy branch stuff to make more sure it isn't modified
normstr = 'unnormalized' if dont_normalize else 'normalized'
if only_calc_metric is None:
assert lbr_tau_factor is not None # has to be set if we're calculating both metrics
if iclust is None or iclust == 0:
print ' %scalculating %s lb metrics with tau values %.4f (lbi) and %.4f * %d = %.4f (lbr)' % ('' if extra_str is None else '%s: '%extra_str, normstr, tau, tau, lbr_tau_factor, tau*lbr_tau_factor)
lbvals = set_lb_values(dtree, tau, only_calc_metric='lbi', dont_normalize=dont_normalize, multifo=multifo, debug=debug)
tmpvals = set_lb_values(dtree, tau*lbr_tau_factor, only_calc_metric='lbr', dont_normalize=dont_normalize, multifo=multifo, debug=debug)
lbvals['lbr'] = tmpvals['lbr']
else:
assert lbr_tau_factor is None or dont_normalize # we need to make sure that we weren't accidentally called with lbr_tau_factor set, but then we ignore it because the caller forgot that we ignore it if only_calc_metric is also set
if iclust is None or iclust == 0:
print ' calculating %s %s%s with tau %.4f' % (normstr, lb_metrics[only_calc_metric], dbgstr, tau)
lbvals = set_lb_values(dtree, tau, only_calc_metric=only_calc_metric, dont_normalize=dont_normalize, multifo=multifo, debug=debug)
lbvals['tree'] = treestr
return lbvals
# ----------------------------------------------------------------------------------------
def set_n_generations(seq_len, tau, n_tau_lengths, n_generations, debug=False):
if n_generations is None:
assert n_tau_lengths is not None # have to specify one or the other
n_generations = max(1, int(seq_len * tau * n_tau_lengths))
if debug:
print ' %d generations = seq_len * tau * n_tau_lengths = %d * %.4f * %d = max(1, int(%.2f))' % (n_generations, seq_len, tau, n_tau_lengths, seq_len * tau * n_tau_lengths)
# else:
# if debug:
# print ' %d generations' % n_generations
return n_generations
# ----------------------------------------------------------------------------------------
def get_tree_for_lb_bounds(bound, metric, seq_len, tau, n_generations, n_offspring, debug=False):
dtree = dendropy.Tree(is_rooted=True) # note that using a taxon namespace while you build the tree is *much* slower than labeling it afterward (and we do need labels when we calculate lb values)
if bound == 'min':
leaf_node = dtree.seed_node # pretty similar to the dummy root stuff
for igen in range(n_generations):
leaf_node = leaf_node.new_child(edge_length=1./seq_len)
elif bound == 'max':
old_leaf_nodes = [l for l in dtree.leaf_node_iter()]
assert len(old_leaf_nodes) == 1
new_leaf_nodes = []
for igen in range(n_generations):
for ileaf in range(len(old_leaf_nodes)):
for ioff in range(n_offspring):
new_leaf_nodes += [old_leaf_nodes[ileaf].new_child(edge_length=1./seq_len)]
old_leaf_nodes = new_leaf_nodes
new_leaf_nodes = []
else:
assert False
return dtree
# ----------------------------------------------------------------------------------------
def calculate_lb_bounds(seq_len, tau, n_tau_lengths=10, n_generations=None, n_offspring=2, only_metrics=None, btypes=None, debug=False): # NOTE the min is just tau, but I don't feel like deleting this fcn just to keep clear what the min means
info = {m : {} for m in lb_metrics}
n_generations = set_n_generations(seq_len, tau, n_tau_lengths, n_generations, debug=debug)
for metric in [m for m in lb_metrics if only_metrics is None or m in only_metrics]:
for bound in [b for b in ['min', 'max'] if btypes is None or b in btypes]:
if metric == 'lbr' and bound == 'min': # lbr min is always zero (leaves)
info[metric][bound] = {metric : 0., 'vals' : None}
continue
if debug:
print ' %s %s for seq len %d' % (utils.color('red', bound), utils.color('yellow', metric), seq_len)
start = time.time()
dtree = get_tree_for_lb_bounds(bound, metric, seq_len, tau, n_generations, n_offspring, debug=debug)
label_nodes(dtree)
lbvals = calculate_lb_values(dtree, tau, only_calc_metric=metric, dont_normalize=True, debug=debug)
bfcn = __builtins__[bound] # min() or max()
info[metric][bound] = {metric : bfcn(lbvals[metric].values()), 'vals' : lbvals}
if debug:
bname, bval = bfcn(lbvals[metric].items(), key=operator.itemgetter(1))
print ' %s of %d %s values (%.1fs): %s %.4f' % (bound, len(lbvals[metric]), metric, time.time() - start, bname, bval)
return info
# ----------------------------------------------------------------------------------------
def find_affy_increases(dtree, line, min_affinity_change=1e-6):
affy_increasing_edges, affy_changes = [], {}
for edge in dtree.preorder_edge_iter():
parent_node = edge.tail_node
child_node = edge.head_node
nlist = [parent_node, child_node]
if None in nlist:
continue
parent_affy, child_affy = [utils.per_seq_val(line, 'affinities', n.taxon.label, use_default=True) for n in nlist]
if None in [parent_affy, child_affy]:
continue
daffy = child_affy - parent_affy
affy_changes[child_node.taxon.label] = daffy
if daffy > min_affinity_change:
affy_increasing_edges.append(edge)
return affy_increasing_edges, affy_changes
# ----------------------------------------------------------------------------------------
def get_n_ancestors_to_affy_increase(affy_increasing_edges, node, dtree, line, n_max_steps=15, also_return_branch_len=False, debug=False):
if affy_increasing_edges is None:
affy_increasing_edges, _ = find_affy_increases(dtree, line)
ancestor_node = node
chosen_edge = None
n_steps, branch_len = 0, 0.
while n_steps < n_max_steps:
if ancestor_node is dtree.seed_node:
break
ancestor_edge = ancestor_node.edge # edge from current <ancestor_node> to its parent (who in the next line becomes <ancestor_node>)
ancestor_node = ancestor_node.parent_node # move one more step up the tree
if debug:
ancestor_uid = ancestor_node.taxon.label
ancestor_affinity = utils.per_seq_val(line, 'affinities', ancestor_uid, default_val=float('nan'))
if ancestor_edge in affy_increasing_edges:
chosen_edge = ancestor_edge
break
if debug:
print ' %12s %5s %12s %2d %8.4f %9.4f %s' % ('', '', ancestor_uid, n_steps, branch_len, ancestor_affinity, utils.color('yellow', '?') if ancestor_node is dtree.seed_node else '')
n_steps += 1
branch_len += ancestor_edge.length
if chosen_edge is None:
return (None, None) if also_return_branch_len else None
if debug:
print ' %12s %5s %12s %2d %8.4f %9.4f%+9.4f' % ('', '', ancestor_uid, n_steps, branch_len, ancestor_affinity, utils.per_seq_val(line, 'affinities', chosen_edge.head_node.taxon.label, default_val=float('nan')) - ancestor_affinity) # NOTE the latter can be negative now, since unlike the old fcn (below) we're just looking for an edge where affinity increased (rather than a node with lower affinity than the current one)
if also_return_branch_len: # kind of hackey, but we only want the branch length for plotting atm, and actually we aren't even making those plots by default any more
return n_steps, branch_len
else:
return n_steps
# ----------------------------------------------------------------------------------------
def get_n_descendents_to_affy_increase(affy_increasing_edges, node, dtree, line, n_max_steps=15, also_return_branch_len=False, debug=False):
# ----------------------------------------------------------------------------------------
def get_branch_length(chosen_edge): # go back up from the <chosen_edge> to get its total depth from <node> (otherwise we'd need to keep track of the depths for all the child nodes all the way down)
tedge = chosen_edge
blen = chosen_edge.length
while tedge.tail_node is not node:
tedge = tedge.tail_node.edge
blen += tedge.length
return blen
# ----------------------------------------------------------------------------------------
child_nodes = [node]
chosen_edge = None
n_steps, branch_len = 1, 0.
while n_steps < n_max_steps:
found = False
child_nodes = [cc for c in child_nodes for cc in c.child_node_iter()] # all children of current children
if len(child_nodes) == 0: # they're all leaves
break
for cnode in child_nodes:
cedge = cnode.edge # edge to <cnode>'s parent
if debug:
child_affinity = utils.per_seq_val(line, 'affinities', cnode.taxon.label, default_val=float('nan'))
if cedge in affy_increasing_edges:
chosen_edge = cedge
found = True
assert branch_len == 0.
branch_len = get_branch_length(cedge)
break
if debug and not found:
print ' %12s %5s %12s %2d %8.4f %9.4f %s' % ('', '', cnode.taxon.label, -n_steps, -get_branch_length(cedge), child_affinity, utils.color('yellow', ' ?') if all(c.is_leaf() for c in child_nodes) else '')
if found:
break
n_steps += 1
if chosen_edge is None:
return (None, None) if also_return_branch_len else None
if debug:
print ' %12s %5s %12s %+2d %8.4f %9.4f%+9.4f' % ('', '', cnode.taxon.label, -n_steps, -branch_len, child_affinity, child_affinity - utils.per_seq_val(line, 'affinities', chosen_edge.tail_node.taxon.label, default_val=float('nan'))) # NOTE the latter can be negative now, since unlike the old fcn (below) we're just looking for an edge where affinity increased (rather than a node with lower affinity than the current one)
if also_return_branch_len: # kind of hackey, but we only want the branch length for plotting atm, and actually we aren't even making those plots by default any more
return n_steps, branch_len
else:
return n_steps
# ----------------------------------------------------------------------------------------
# looks both upwards (positive result) and downwards (negative result) for the nearest edge on which affinity increased from parent to child
def get_min_steps_to_affy_increase(affy_increasing_edges, node, dtree, line, also_return_branch_len=False, lbval=None, only_look_upwards=False, debug=False):
assert also_return_branch_len
if debug:
print ' %12s %5.3f%12s %2s %8s %9.4f' % (node.taxon.label, lbval, '', '', '', utils.per_seq_val(line, 'affinities', node.taxon.label))
n_ance, ance_branch_len = get_n_ancestors_to_affy_increase(affy_increasing_edges, node, dtree, line, also_return_branch_len=also_return_branch_len, debug=debug)
n_desc, desc_branch_len = None, None
if not only_look_upwards:
n_desc, desc_branch_len = get_n_descendents_to_affy_increase(affy_increasing_edges, node, dtree, line, also_return_branch_len=also_return_branch_len, debug=debug)
if n_desc is None and n_ance is None:
n_steps, blen = None, None
elif n_desc is None:
n_steps, blen = n_ance, ance_branch_len
elif n_ance is None:
n_steps, blen = -n_desc, -desc_branch_len
else: # NOTE only the ancestor one can return zero
n_steps, blen = (-n_desc, -desc_branch_len) if n_desc < n_ance else (n_ance, ance_branch_len) # NOTE decides based on N steps, not distance
if debug:
if n_steps is None:
nstr, bstr = [utils.color('yellow', ' ?') for _ in range(2)]
else:
nstr = utils.color(('red' if n_steps==0 else 'purple') if n_steps>=0 else 'blue', '%+2d'%n_steps)
bstr = '%+7.4f' % blen
print ' %12s %5s %12s %3s %s' % ('', '', '', nstr, bstr)
return n_steps, blen
# ----------------------------------------------------------------------------------------
# BELOW: old upward-only fcn. Should be very similar to new ancestor fcn, except that old one looked for affinity increase to <node> whereas new fcn looks for edge on which affinity increase occurred
# ----------------------------------------------------------------------------------------
# NOTE discussion of why we only look upwards in "evaluation framework" section of paper's .tex file (use .tex since there's commented bits)
# - summary:
# - Searching only upward reflects the fact that a mutation can only affect the fitness of nodes below it, and thus a high \lbr\ value at a node immediately above an important mutation is likely due to random chance rather than a signal of selection.
# - EDIT due to random chance OR MAYBE because the super high tau helps/lets the higher node look better since it's nearer to root
# - Nodes with high \lbr\ that are several steps below such a mutation, on the other hand, simply reflect the fact that increased fitness typically takes several generations to manifest itself as an increase in observed offspring.
# - In other words searching downward would improve the apparent performance of a metric, but only by counting as successes cases that were successfully predicted only through random chance.
# - Another reason we do not also search in the downward direction is that in a practical sense it is much more useful to know that the important mutation is above a node than below it.
# - We could imagine in the lab testing one or a few branches above a node, but because of the bifurcating nature of trees there would be far too many potential branches below (not to mention adding the ambiguity of potentially going up and then down, i.e.\ how to count cousins).
# UPDATE i think the big problem with only looking upwards is that then you don't know what to do with nodes that're above all affinity increases
# - then it seems reasonable (as below) to just ignore them, which is *bad* since in practice these high nodes will have really high scores
# - also, this makes it seem like super large tau is a good idea, since it ignores maybe the big downside to large tau: parents get too much credit for their children's offspring
# def get_n_ancestors_to_affy_change(node, dtree, line, affinity_changes=None, min_affinity_change=1e-6, n_max_steps=15, also_return_branch_len=False, affy_increasing_edges=None, debug=False):
# debug = True
# # find number of steps/ancestors to the nearest ancestor with lower affinity than <node>'s
# # - also finds the corresponding distance, which is to the lower end of the branch containing the corresponding affinity-increasing mutation
# # - this is chosen so that <n_steps> and <branch_len> are both 0 for the node at the bottom of a branch on which affinity increases, and are *not* the distance *to* the lower-affinity node
# # - because it's so common for affinity to get worse from ancestor to descendent, it's important to remember that here we are looking for the first ancestor with lower affinity than the node in question, which is *different* to looking for the first ancestor that has lower affinity than one of its immediate descendents (which we could also plot, but it probably wouldn't be significantly different to the metric performance, since for the metric performance we only really care about the left side of the plot, but this only affects the right side)
# # - <min_affinity_change> is just to eliminate floating point precision issues (especially since we're deriving affinity by inverting kd) (note that at least for now, and with default settings, the affinity changes should all be pretty similar, and not small)
# this_affinity = utils.per_seq_val(line, 'affinities', node.taxon.label)
# if debug:
# print ' %12s %12s %8s %9.4f' % (node.taxon.label, '', '', this_affinity)
# ancestor_node = node
# chosen_ancestor_affinity = None
# n_steps, branch_len = 0, 0.
# while n_steps < n_max_steps: # note that if we can't find an ancestor with worse affinity, we don't plot the node
# if ancestor_node is dtree.seed_node:
# break
# ancestor_distance = ancestor_node.edge_length # distance from current <ancestor_node> to its parent (who in the next line becomes <ancestor_node>)
# ancestor_node = ancestor_node.parent_node # move one more step up the tree
# ancestor_uid = ancestor_node.taxon.label
# if ancestor_uid not in line['unique_ids']:
# print ' %s ancestor %s of %s not in true line' % (utils.color('yellow', 'warning'), ancestor_uid, node.taxon.label)
# break
# ancestor_affinity = utils.per_seq_val(line, 'affinities', ancestor_uid)
# if this_affinity - ancestor_affinity > min_affinity_change: # if we found an ancestor with lower affinity, we're done
# chosen_ancestor_affinity = ancestor_affinity
# if affinity_changes is not None:
# affinity_changes.append(this_affinity - ancestor_affinity)
# # if affy_increasing_edges is not None:
# # assert any(e in affy_increasing_edges for e in ancestor_node.child_edge_iter())
# # # assert ancestor_node.edge in affy_increasing_edges
# # print 'OK'
# break
# if debug:
# print ' %12s %12s %8.4f %9.4f%s' % ('', ancestor_uid, branch_len, ancestor_affinity, utils.color('green', ' x') if ancestor_node is dtree.seed_node else '')
# n_steps += 1
# branch_len += ancestor_distance
# if chosen_ancestor_affinity is None: # couldn't find ancestor with lower affinity
# return (None, None) if also_return_branch_len else None
# if debug:
# print ' %12s %12s %8.4f %9.4f %s%-9.4f' % ('', ancestor_uid, branch_len, chosen_ancestor_affinity, utils.color('red', '+'), this_affinity - chosen_ancestor_affinity)
# if also_return_branch_len: # kind of hackey, but we only want the branch length for plotting atm, and actually we aren't even making those plots by default any more
# return n_steps, branch_len
# else:
# return n_steps
# ----------------------------------------------------------------------------------------
lonr_files = { # this is kind of ugly, but it's the cleanest way I can think of to have both this code and the R code know what they're called
'phy.outfname' : 'phy_out.txt',
'phy.treefname' : 'phy_tree.nwk',
'outseqs.fname' : 'outseqs.fasta',
'edgefname' : 'edges.tab',
'names.fname' : 'names.tab',
'lonrfname' : 'lonr.csv',
}
# ----------------------------------------------------------------------------------------
def build_lonr_tree(edgefos, debug=False):
# NOTE have to build the tree from the edge file, since the lonr code seems to add nodes that aren't in the newick file (which is just from phylip).
all_nodes = set([e['from'] for e in edgefos] + [e['to'] for e in edgefos])
effective_root_nodes = set([e['from'] for e in edgefos]) - set([e['to'] for e in edgefos]) # "effective" because it can be in an unrooted tree. Not sure if there's always exactly one node that has no inbound edges though
if len(effective_root_nodes) != 1:
raise Exception('too many effective root nodes: %s' % effective_root_nodes)
root_label = list(effective_root_nodes)[0] # should be '1' for dnapars
if debug:
print ' chose \'%s\' as root node' % root_label
tns = dendropy.TaxonNamespace(all_nodes)
root_node = dendropy.Node(taxon=tns.get_taxon(root_label)) # NOTE this sets node.label and node.taxon.label to the same thing, which may or may not be what we want # label=root_label, (if you start setting the node labels again, you also have to translate them below)
dtree = dendropy.Tree(taxon_namespace=tns, seed_node=root_node, is_rooted=True)
remaining_nodes = copy.deepcopy(all_nodes) - set([root_label]) # a.t.m. I'm not actually using <all_nodes> after this, but I still want to keep them separate in case I start using it
weight_or_distance_key = 'distance' # maybe should I be using the 'weight' column? I think they're just proportional though so I guess it shouldn't matter (same thing in the line below) #
root_edgefos = [efo for efo in edgefos if efo['from'] == root_label]
for efo in root_edgefos:
dtree.seed_node.new_child(taxon=tns.get_taxon(efo['to']), edge_length=efo[weight_or_distance_key]) # label=efo['to'], (if you start setting the node labels again, you also have to translate them below)
remaining_nodes.remove(efo['to'])
while len(remaining_nodes) > 0:
n_removed = 0 # I think I don't need this any more (it only happened before I remembered to remove the root node), but it doesn't seem like it'll hurt)
for lnode in dtree.leaf_node_iter():
children = [efo for efo in edgefos if efo['from'] == lnode.taxon.label]
if debug > 1 and len(children) > 0:
print ' adding children to %s:' % lnode.taxon.label
for chfo in children:
lnode.new_child(taxon=tns.get_taxon(chfo['to']), edge_length=chfo[weight_or_distance_key]) # label=chfo['to'], (if you start setting the node labels again, you also have to translate them below)
remaining_nodes.remove(chfo['to'])
n_removed += 1
if debug > 1:
print ' %s' % chfo['to']
if debug > 1:
print ' remaining: %d' % len(remaining_nodes)
if len(remaining_nodes) > 0 and n_removed == 0: # if there's zero remaining, we're just about to break anyway
if debug > 1:
print ' didn\'t remove any, so breaking: %s' % remaining_nodes
break
return dtree
# ----------------------------------------------------------------------------------------
def parse_lonr(outdir, input_seqfos, naive_seq_name, reco_info=None, debug=False):
def get_node_type_from_name(name, debug=False): # internal nodes in simulated trees should be labeled like 'mrca-<stuff>' (has to correspond to what bcr-phylo-benchmark did)
if 'mrca' in name:
return 'internal'
elif 'leaf' in name:
return 'leaf'
else:
if debug:
print ' not sure of node type for \'%s\'' % name
return None
# get lonr names (lonr replaces them with shorter versions, I think because of phylip)
lonr_names, input_names = {}, {}
with open(outdir + '/' + lonr_files['names.fname']) as namefile: # headers: "head head2"
reader = csv.DictReader(namefile, delimiter='\t')
for line in reader:
if line['head'][0] != 'L' and line['head'] != naive_seq_name: # internal node
dummy_int = int(line['head']) # check that it's just a (string of a) number
assert line['head2'] == '-'
continue
input_names[line['head']] = line['head2'] # head2 is our names
lonr_names[line['head2']] = line['head']
def final_name(lonr_name):
return input_names.get(lonr_name, lonr_name)
# read edge info (i.e., implicitly, the tree that lonr.r used)
edgefos = [] # headers: "from to weight distance"
with open(outdir + '/' + lonr_files['edgefname']) as edgefile:
reader = csv.DictReader(edgefile, delimiter='\t')
for line in reader:
line['distance'] = int(line['distance'])
line['weight'] = float(line['weight'])
edgefos.append(line)
dtree = build_lonr_tree(edgefos, debug=debug)
# switch leaves to input names
for node in dtree.leaf_node_iter():
node.taxon.label = input_names[node.taxon.label]
assert node.label is None # (if you start setting the node labels again, you also have to translate them here)
# node.label = node.taxon.label # (if you start setting the node labels again, you also have to translate them here)
if debug:
print utils.pad_lines(get_ascii_tree(dendro_tree=dtree, width=250))
nodefos = {node.taxon.label : {} for node in dtree.postorder_node_iter()} # info for each node (internal and leaf), destined for output
# read the sequences for both leaves and inferred (internal) ancestors
seqfos = {final_name(sfo['name']) : sfo['seq'] for sfo in utils.read_fastx(outdir + '/' + lonr_files['outseqs.fname'])}
input_seqfo_dict = {sfo['name'] : sfo['seq'] for sfo in input_seqfos} # just to make sure lonr didn't modify the input sequences
for node in dtree.postorder_node_iter():
label = node.taxon.label
if label not in seqfos:
raise Exception('unexpected sequence name %s' % label)
if node.is_leaf() or label == naive_seq_name:
if label not in input_seqfo_dict:
raise Exception('leaf node \'%s\' not found in input seqs' % label)
if seqfos[label] != input_seqfo_dict[label]:
print 'input: %s' % input_seqfo_dict[label]
print ' lonr: %s' % utils.color_mutants(input_seqfo_dict[label], seqfos[label], align=True)
raise Exception('lonr leaf sequence doesn\'t match input sequence (see above)')
nodefos[label]['seq'] = seqfos[label]
# read actual lonr info
lonrfos = []
if debug:
print ' pos mutation lonr syn./a.b.d. parent child'
with open(outdir + '/' + lonr_files['lonrfname']) as lonrfile: # heads: "mutation,LONR,mutation.type,position,father,son,flag"
reader = csv.DictReader(lonrfile)
for line in reader:
assert len(line['mutation']) == 2
assert line['mutation.type'] in ('S', 'R')
assert line['flag'] in ('TRUE', 'FALSE')
mutation = line['mutation'].upper() # dnapars has it upper case already, but neighbor has it lower case
parent_name = final_name(line['father'])
child_name = final_name(line['son'])
parent_seq = nodefos[parent_name]['seq']
pos = int(line['position']) - 1 # switch from one- to zero-indexing
child_seq = nodefos[child_name]['seq']
if parent_seq[pos] != mutation[0] or child_seq[pos] != mutation[1]:
print 'parent: %s' % parent_seq
print ' child: %s' % utils.color_mutants(parent_seq, child_seq, align=True)
raise Exception('mutation info (%s at %d) doesn\'t match sequences (see above)' % (mutation, pos))
lonrfos.append({
'mutation' : mutation,
'lonr' : float(line['LONR']),
'synonymous' : line['mutation.type'] == 'S',
'position' : pos,
'parent' : parent_name,
'child' : child_name,
'affected_by_descendents' : line['flag'] == 'TRUE',
})
if debug:
lfo = lonrfos[-1]
print ' %3d %2s %5.2f %s / %s %4s %-20s' % (lfo['position'], lfo['mutation'], lfo['lonr'], 'x' if lfo['synonymous'] else ' ', 'x' if lfo['affected_by_descendents'] else ' ', lfo['parent'], lfo['child'])
# check for duplicate nodes (not sure why lonr.r kicks these, but I should probably collapse them at some point)
# in simulation, we sample internal nodes, but then lonr.r's tree construction forces these to be leaves, but then frequently they're immediately adjacent to internal nodes in lonr.r's tree... so we try to collapse them
duplicate_groups = utils.group_seqs_by_value(nodefos.keys(), keyfunc=lambda q: nodefos[q]['seq'])
duplicate_groups = [g for g in duplicate_groups if len(g) > 1]
if len(duplicate_groups) > 0:
n_max = 15
dbg_str = ', '.join([' '.join(g) for g in duplicate_groups[:n_max]]) # only print the first 15 of 'em, if there's more
if len(duplicate_groups) > n_max:
dbg_str += utils.color('blue', ' [...]')
print ' collapsing %d groups of nodes with duplicate sequences (probably just internal nodes that were renamed by lonr.r): %s' % (len(duplicate_groups), dbg_str)
for dgroup in duplicate_groups:
non_phylip_names = [n for n in dgroup if get_node_type_from_name(n) is not None]
if len(non_phylip_names) == 0: # and phylip internal node names are of form str(<integer>), so just choose the first alphabetically, because whatever
name_to_use = sorted(dgroup)[0]
elif len(non_phylip_names) == 1:
name_to_use = non_phylip_names[0]
else:
raise Exception('wtf %s (should\'ve been either one or zero non-phylip names)' % non_phylip_names)
names_to_remove = [n for n in dgroup if n != name_to_use]
for rname in names_to_remove: # only info in here a.t.m. is the sequence
del nodefos[rname]
# NOTE not collapsing nodes in tree to match <nodefos> (see comment on next line)
# collapse_nodes(dtree, name_to_use, rname, allow_failure=True, debug=True) # holy fuckballs this is not worth the effort (it doesn't really work because the tree is too screwed up) [just gave up and added the duplicate info to the return dict]
for lfo in lonrfos:
for key in ('parent', 'child'):
if lfo[key] in names_to_remove:
lfo[key] = name_to_use
return {'tree' : dtree.as_string(schema='newick'), 'nodes' : nodefos, 'values' : lonrfos}
# ----------------------------------------------------------------------------------------
def run_lonr(input_seqfos, naive_seq_name, workdir, tree_method, lonr_code_file=None, phylip_treefile=None, phylip_seqfile=None, seed=1, debug=False):
if lonr_code_file is None:
lonr_code_file = os.path.dirname(os.path.realpath(__file__)).replace('/python', '/bin/lonr.r')
if not os.path.exists(lonr_code_file):
raise Exception('lonr code file %s d.n.e.' % lonr_code_file)
if tree_method not in ('dnapars', 'neighbor'):
raise Exception('unexpected lonr tree method %s' % tree_method)
# # installation stuff
# rcmds = [
# 'source("https://bioconductor.org/biocLite.R")',
# 'biocLite("Biostrings")',
# 'install.packages("seqinr", repos="http://cran.rstudio.com/")',
# ]
# utils.run_r(rcmds, workdir)
input_seqfile = workdir + '/input-seqs.fa'
with open(input_seqfile, 'w') as iseqfile:
for sfo in input_seqfos:
iseqfile.write('>%s\n%s\n' % (sfo['name'], sfo['seq']))
existing_phylip_output_str = ''
if phylip_treefile is not None: # using existing phylip output, e.g. from cft
tree = get_dendro_tree(treefname=phylip_treefile)
edgefos = []
for node in tree.preorder_node_iter():
for edge in node.child_edge_iter():
edgefos.append({'from' : node.taxon.label, 'to' : edge.head_node.taxon.label, 'weight' : edge.length})
existing_edgefname = workdir + '/edges.csv'
existing_node_seqfname = workdir + '/infered-node-seqs.fa'
with open(existing_edgefname, 'w') as edgefile:
writer = csv.DictWriter(edgefile, ('from', 'to', 'weight'))
writer.writeheader()
for line in edgefos:
writer.writerow(line)
with open(existing_node_seqfname, 'w') as node_seqfile:
writer = csv.DictWriter(node_seqfile, ('head', 'seq'))
writer.writeheader()
for sfo in utils.read_fastx(phylip_seqfile):
writer.writerow({'head' : sfo['name'], 'seq' : sfo['seq']})
existing_phylip_output_str = ', existing.edgefile="%s", existing.node.seqfile="%s"' % (existing_edgefname, existing_node_seqfname)
rcmds = [
'source("%s")' % lonr_code_file,
'set.seed(%d)' % seed,
'G.phy.outfname = "%s"' % lonr_files['phy.outfname'], # this is a pretty shitty way to do this, but the underlying problem is that there's too many files, but I don't want to parse them all into one or two files in R, so I need to pass all of 'em to the calling python script
'G.phy.treefname = "%s"' % lonr_files['phy.treefname'],
'G.outseqs.fname = "%s"' % lonr_files['outseqs.fname'],
'G.edgefname = "%s"' % lonr_files['edgefname'],
'G.names.fname = "%s"' % lonr_files['names.fname'],
'G.lonrfname = "%s"' % lonr_files['lonrfname'],
'compute.LONR(method="%s", infile="%s", workdir="%s/", outgroup="%s"%s)' % (tree_method, input_seqfile, workdir, naive_seq_name, existing_phylip_output_str),
]
outstr, errstr = utils.run_r(rcmds, workdir, extra_str=' ', return_out_err=True, debug=debug)
if debug:
print utils.pad_lines(outstr)
print utils.pad_lines(errstr)
os.remove(input_seqfile)
if phylip_treefile is not None:
os.remove(existing_edgefname)
os.remove(existing_node_seqfname)
# ----------------------------------------------------------------------------------------
def calculate_liberman_lonr(input_seqfos=None, line=None, reco_info=None, phylip_treefile=None, phylip_seqfile=None, tree_method=None, naive_seq_name='X-naive-X', seed=1, debug=False):
# NOTE see issues/notes in bin/lonr.r
if phylip_treefile is not None or phylip_seqfile is not None:
raise Exception('never got this (passing phylip output files to lonr.r) to work -- lonr.r kept barfing, although if you were running exactly the same phylip commands as lonr.r does, it would probably work.')
assert input_seqfos is None or line is None
if input_seqfos is None:
input_seqfos = [{'name' : line['unique_ids'][iseq], 'seq' : line['seqs'][iseq]} for iseq in range(len(line['unique_ids']))]
input_seqfos.insert(0, {'name' : naive_seq_name, 'seq' : line['naive_seq']})
if tree_method is None:
tree_method = 'dnapars' if len(input_seqfos) < 500 else 'neighbor'
workdir = utils.choose_random_subdir('/tmp/%s' % os.getenv('USER', default='partis-work'))
os.makedirs(workdir)
if debug:
print ' %s' % utils.color('green', 'lonr:')
run_lonr(input_seqfos, naive_seq_name, workdir, tree_method, phylip_treefile=phylip_treefile, phylip_seqfile=phylip_seqfile, seed=seed, debug=debug)
lonr_info = parse_lonr(workdir, input_seqfos, naive_seq_name, reco_info=reco_info, debug=debug)
for fn in lonr_files.values():
os.remove(workdir + '/' + fn)
os.rmdir(workdir)
return lonr_info
# ----------------------------------------------------------------------------------------
def get_tree_metric_lines(annotations, cpath, reco_info, use_true_clusters, min_overlap_fraction=0.5, only_use_best_partition=False, only_plot_uids_with_affinity_info=False, glfo=None, debug=False):
# collect inferred and true events
inf_lines_to_use, true_lines_to_use = None, None
if use_true_clusters: # use clusters from the true partition, rather than inferred one
assert reco_info is not None
true_partition = utils.get_partition_from_reco_info(reco_info)
print ' using %d true clusters to calculate inferred selection metrics (sizes: %s)' % (len(true_partition), ' '.join(str(l) for l in sorted([len(c) for c in true_partition], reverse=True)))
if len(annotations) != len(true_partition):
print ' %s different length true %d and inferred %d partitions when trying to match up clusters for use_true_clusters' % (utils.wrnstr(), len(true_partition), len(annotations))
if debug:
print ' choosing N N N frac (N chosen)'
print ' from true & chosen = in common in common (w/out duplicates)'
inf_lines_to_use, true_lines_to_use = [], []
chosen_ustrs = set() # now that we're using the fraction instead of the raw total, we mostly shouldn't get multiple true clusters corresponding to the same inferred cluster, but maybe it'll still happen occasionally
for cluster in true_partition:
true_lines_to_use.append(utils.synthesize_multi_seq_line_from_reco_info(cluster, reco_info)) # note: duplicates (a tiny bit of) code in utils.print_true_events()
n_max_in_common, max_frac_in_common, ustr_to_use = None, None, None # look for the inferred cluster that has the most uids in common with this true cluster
for ustr in set(annotations) - chosen_ustrs: # order will be different in reco info and inferred clusters
n_in_common = len(set(utils.uids_and_dups(annotations[ustr])) & set(cluster)) # can't just look for the actual cluster since we collapse duplicates, but bcr-phylo doesn't (but maybe I should throw them out when parsing bcr-phylo output)
frac_in_common = n_in_common**2 / float(len(utils.uids_and_dups(annotations[ustr])) * len(cluster)) # and have to use frac instead of total to guard against inferred clusters that include several true clusters (reminder: these inferred clusters may have been run with --n-final-clusters 1 or something similar)
if max_frac_in_common is None or frac_in_common > max_frac_in_common:
ustr_to_use = ustr
n_max_in_common = n_in_common
max_frac_in_common = frac_in_common
if max_frac_in_common is None:
raise Exception('cluster \'%s\' not found in inferred annotations (probably because use_true_clusters was set)' % ':'.join(cluster))
if max_frac_in_common < min_overlap_fraction:
raise Exception('overlap fraction %.3f too small: for true cluster (size %d), highest was for inferred cluster with size %d (%d including duplicates). Maybe need to set --simultaneous-true-clonal-seqs (if you did set --simultaneous-true-clonal-seqs, you probably need to set --no-indels, i.e. a true cluster got split apart because of incorrect indel calls).' % (max_frac_in_common, len(cluster), len(annotations[ustr_to_use]['unique_ids']), len(utils.uids_and_dups(annotations[ustr_to_use]))))
if debug:
print ' %4d %4d %4d %4d %4.2f (%d)' % (len(set(annotations) - chosen_ustrs), len(cluster), len(utils.uids_and_dups(annotations[ustr_to_use])), n_max_in_common, max_frac_in_common, len(annotations[ustr_to_use]['unique_ids']))
if max_frac_in_common < 1:
print ' note: couldn\'t find an inferred cluster that corresponded exactly to the true cluster (best was %d & %d = %d (frac %.2f), where the inferred includes %d duplicates)' % (len(utils.uids_and_dups(annotations[ustr_to_use])), len(cluster), n_max_in_common, max_frac_in_common, utils.n_dups(annotations[ustr_to_use]))
if ustr_to_use in chosen_ustrs:
raise Exception('chose the same inferred cluster to correspond to two different true clusters')
chosen_ustrs.add(ustr_to_use)
inf_lines_to_use.append(annotations[ustr_to_use])
else: # use clusters from the inferred partition (whether from <cpath> or <annotations>), and synthesize clusters exactly matching these using single true annotations from <reco_info> (to repeat: these are *not* true clusters)
inf_lines_to_use = annotations.values() # we used to restrict it to clusters in the best partition, but I'm switching since I think whenever there are extra ones in <annotations> we always actually want their tree metrics (at the moment there will only be extra ones if either --calculate-alternative-annotations or --write-additional-cluster-annotations are set, but in the future it could also be the default)
if only_use_best_partition:
assert cpath is not None and cpath.i_best is not None
inf_lines_to_use = [l for l in inf_lines_to_use if l['unique_ids'] in cpath.partitions[cpath.i_best]]
if only_plot_uids_with_affinity_info:
assert False # should work fine as is, but needs to be checked and integrated with things
tmplines = []
for line in inf_lines_to_use:
iseqs_to_keep = [i for i, a in enumerate(line['affinities']) if a is not None]
if len(iseqs_to_keep) == 0:
continue
print ' keeping %d/%d' % (len(iseqs_to_keep), len(line['unique_ids']))
new_line = copy.deepcopy(line) # *really* don't want to modify the annotations from partitiondriver
utils.restrict_to_iseqs(new_line, iseqs_to_keep, glfo)
tmplines.append(new_line)
inf_lines_to_use = tmplines
if reco_info is not None:
for line in inf_lines_to_use:
true_line = utils.synthesize_multi_seq_line_from_reco_info(line['unique_ids'], reco_info)
true_lines_to_use.append(true_line)
return inf_lines_to_use, true_lines_to_use
# ----------------------------------------------------------------------------------------
def plot_tree_metrics(args, plotdir, metrics_to_calc, antn_list, is_simu=False, inf_annotations=None, ete_path=None, workdir=None, include_relative_affy_plots=False, queries_to_include=None,
paired=False, debug=False):
reqd_args = [('selection_metric_plot_cfg', None), ('slice_bin_fname', None), ('queries_to_include', None), ('label_tree_nodes', False), ('affinity_key', None)]
for marg, dval in [a for a, _ in reqd_args if not hasattr(args, a)]: # "required" args, just so when i add an arg to bin/partis i don't also have to add it to dtr-run.py
setattr(args, marg, dval) # NOTE i can't actually test this atm since the individual tree metric fcn doesn't use this fcn for plotting (but it should)
assert not include_relative_affy_plots # would need updating
import plotting
import lbplotting
start = time.time()
print ' plotting to %s' % plotdir
if inf_annotations is not None:
assert is_simu
plot_cfg = args.selection_metric_plot_cfg
if plot_cfg is None:
plot_cfg = all_plot_cfg
if args.affinity_key is not None:
tmplines = [l for l in antn_list if args.affinity_key in l]
if len(tmplines) == 0:
print ' %s --affinity-key \'%s\' doesn\'t occur in any of the %d annotations' % (utils.wrnstr(), args.affinity_key, len(antn_list))
for atn in tmplines:
atn['affinities'] = atn[args.affinity_key]
has_affinities = any('affinities' in l for l in antn_list)
if has_affinities and any('affinities' not in l for l in antn_list): # if at least one has them, but not all of them do, add null values (this is kind of hackey, but it's way way better than handling some, but not all, of the lines missing affinities in all the differeing plotting fcns)
for atn in [l for l in antn_list if 'affinities' not in l]:
atn['affinities'] = [None for _ in atn['unique_ids']]
has_trees = is_simu or any(tk in l['tree-info']['lb'] for l in antn_list for tk in ['tree', 'aa-tree'])
if is_simu and (not has_affinities or all(affy is None for affy in antn_list[0]['affinities'])): # if it's bcr-phylo simulation we should have affinities for everybody, otherwise for nobody
print ' %s no affinity information in this simulation, so can\'t plot lb/affinity' % utils.color('yellow', 'note')
return
utils.prep_dir(plotdir, wildlings=['*.svg', '*.html'], allow_other_files=True, subdirs=lb_metrics.keys())
fnames = lbplotting.add_fn(None, init=True)
if has_affinities:
affy_fnames, slice_fnames = [[]], [[]]
for mtr in [m for m in metrics_to_calc if m in affy_metrics]:
if 'lb-vs-affy' in plot_cfg:
lbplotting.plot_lb_vs_affinity(plotdir, antn_list, mtr, only_csv=args.only_csv_plots, fnames=affy_fnames, separate_rows=True, is_true_line=is_simu, debug=debug)
if 'slice' in plot_cfg:
lbplotting.make_lb_vs_affinity_slice_plots(plotdir, antn_list, mtr, only_csv=args.only_csv_plots, fnames=slice_fnames, separate_rows=True, is_true_line=is_simu, paired=paired, n_bin_cfg_fname=args.slice_bin_fname, debug=debug)
# lbplotting.make_lb_scatter_plots('affinity-ptile', plotdir, mtr, antn_list, yvar=mtr+'-ptile', fnames=fnames, is_true_line=is_simu)
fnames += [['header', 'affinity metrics']] + affy_fnames + slice_fnames
if 'joy' in plot_cfg and not args.only_csv_plots:
fnames.append([])
for mtr in metrics_to_calc:
lbplotting.make_lb_affinity_joyplots(plotdir + '/joyplots', antn_list, mtr, fnames=fnames)
if 'lb-vs-daffy' in plot_cfg:
daffy_fnames = [[]]
for mtr in [m for m in metrics_to_calc if m in daffy_metrics]:
lbplotting.plot_lb_vs_ancestral_delta_affinity(plotdir + '/' + mtr, antn_list, mtr, is_true_line=is_simu, only_csv=args.only_csv_plots, fnames=daffy_fnames, separate_rows=True, debug=debug)
fnames += [['header', 'delta-affinity metrics']] + daffy_fnames
if ('distr' in plot_cfg or not has_affinities) and not args.only_csv_plots:
for mtr in metrics_to_calc:
lbplotting.plot_lb_distributions(mtr, plotdir, antn_list, is_true_line=is_simu, fnames=fnames, only_overall=False, n_iclust_plot_fnames=None if has_affinities else 8) #, stats='mean:max')
lbplotting.add_fn(fnames, new_row=True)
if not args.only_csv_plots: # all the various scatter plots are really slow
if 'lb-scatter' in plot_cfg:
for xv, yv in [(xv, yv) for xv, yv in [('cons-dist-aa', 'aa-lbi'), ('aa-lbi', 'lbi'), ('sum-cons-dist-aa', 'sum-aa-lbi'), ('sum-aa-lbi', 'sum-lbi')] if xv in metrics_to_calc and yv in metrics_to_calc]:
lbplotting.make_lb_scatter_plots(xv, plotdir, yv, antn_list, fnames=fnames, is_true_line=is_simu, colorvar='affinity' if has_affinities and 'cons-dist' in xv else None, add_jitter='cons-dist' in xv, n_iclust_plot_fnames=None if has_affinities else 8, queries_to_include=args.queries_to_include, add_stats='correlation')
if ete_path is not None and has_trees and 'tree' in plot_cfg:
lbplotting.plot_lb_trees(metrics_to_calc, plotdir, antn_list, ete_path, workdir, is_true_line=is_simu, queries_to_include=args.queries_to_include, fnames=fnames, label_all_nodes=args.label_tree_nodes)
subdirs = [d for d in os.listdir(plotdir) if os.path.isdir(plotdir + '/' + d)]
plotting.make_html(plotdir, fnames=fnames, new_table_each_row=True, htmlfname=plotdir + '/overview.html', extra_links=[(subd, '%s/' % subd) for subd in subdirs], bgcolor='#FFFFFF', title='all plots:')
if is_simu and not args.only_csv_plots and 'true-vs-inf-metrics' in plot_cfg:
assert inf_annotations is not None
for mtr in [m for m in metrics_to_calc if m in lb_metrics]:
lbplotting.plot_true_vs_inferred_lb(plotdir + '/' + mtr, antn_list, inf_annotations, mtr, fnames=fnames)
lbplotting.plot_cons_seq_accuracy(plotdir, antn_list, fnames=fnames)
if 'tree-mut-stats' in plot_cfg:
plotting.plot_tree_mut_stats(plotdir + '/hmm/tree-mut-stats', antn_list, is_simu) # only_leaves=True
print ' selection metric plotting time: %.1f sec' % (time.time() - start)
# ----------------------------------------------------------------------------------------
def get_tree_for_inf_line(line, treefname=None, cpath=None, annotations=None, use_true_clusters=False, ignore_existing_internal_node_labels=False, debug=False):
# figure out how we want to get the inferred tree
if treefname is not None:
uids_in_common = set()
for treestr in get_treestrs_from_file(treefname):
dtree = get_dendro_tree(treestr=treestr, ignore_existing_internal_node_labels=ignore_existing_internal_node_labels, debug=debug)
uids_in_common = set([n.taxon.label for n in dtree.preorder_node_iter()]) & set(line['unique_ids'])
if len(uids_in_common) > 0: # take the first one with any in common
origin = 'treefname'
break
if len(uids_in_common) == 0:
dtree = None
origin = 'no-uids'
print ' %s no uids in common between line and any trees from %s (line ids: %s)' % (utils.wrnstr(), treefname, ' '.join(line['unique_ids']))
elif False: # use_liberman_lonr_tree: # NOTE see issues/notes in bin/lonr.r
lonr_info = calculate_liberman_lonr(line=line, reco_info=reco_info, debug=debug)
dtree = get_dendro_tree(treestr=lonr_info['tree'])
# line['tree-info']['lonr'] = lonr_info
origin = 'lonr'
elif cpath is not None and cpath.i_best is not None and not use_true_clusters and line['unique_ids'] in cpath.partitions[cpath.i_best]: # if <use_true_clusters> is set, then the clusters in <inf_lines_to_use> won't correspond to the history in <cpath>, so this won't work NOTE now that I've added the direct check if the unique ids are in the best partition, i can probably remove the use_true_clusters check, but I don't want to mess with it a.t.m.
assert annotations is not None
i_only_cluster = cpath.partitions[cpath.i_best].index(line['unique_ids'])
cpath.make_trees(annotations=annotations, i_only_cluster=i_only_cluster, get_fasttrees=True, debug=False)
dtree = cpath.trees[i_only_cluster] # as we go through the loop, the <cpath> is presumably filling all of these in
origin = 'cpath'
else:
seqfos = [{'name' : uid, 'seq' : seq} for uid, seq in zip(line['unique_ids'], line['seqs'])]
dtree = get_fasttree_tree(seqfos, naive_seq=line['naive_seq'], debug=debug)
origin = 'fasttree'
return {'tree' : dtree, 'origin' : origin}
# ----------------------------------------------------------------------------------------
def check_lb_values(line, lbvals):
for metric in [m for m in lbvals if m in lb_metrics]:
missing = set(line['unique_ids']) - set(lbvals[metric])
if len(missing) > 0: # we expect to get extra ones in the tree, for inferred ancestral nodes for which we don't have sequences, but missing ones probabliy indicate something's up
# raise Exception('uids in annotation not the same as lb info keys\n missing: %s\n extra: %s' % (' '.join(set(line['unique_ids']) - set(lbvals[metric])), ' '.join(set(lbvals[metric]) - set(line['unique_ids']))))
extra = set(lbvals[metric]) - set(line['unique_ids'])
common = set(line['unique_ids']) & set(lbvals[metric])
print ' %s uids in annotation not the same as lb info keys for \'%s\': %d missing from lb info %d extra in lb info (%d in common)' % (utils.color('red', 'error'), metric, len(missing), len(extra), len(common))
if len(missing) + len(extra) < 35:
print ' missing from lb info: %s\n extra in lb info: %s\n common: %s' % (' '.join(missing), ' '.join(extra), ' '.join(common))
# NOTE this is not tested, but might be worth using in the future
# # ----------------------------------------------------------------------------------------
# def get_trees_for_annotations(annotations, cpath=None, workdir=None, min_cluster_size=default_min_selection_metric_cluster_size, cluster_indices=None, debug=False): # NOTE this duplicates some code in the following function (but I want them separate since I don't really care about this fcn much)
# print 'getting trees'
# inf_lines_to_use = annotations.values()
# n_before = len(inf_lines_to_use)
# inf_lines_to_use = sorted([l for l in inf_lines_to_use if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True)
# n_after = len(inf_lines_to_use) # after removing the small ones
# tree_origin_counts = {n : {'count' : 0, 'label' : l} for n, l in (('treefname', 'read from %s' % treefname), ('cpath', 'made from cpath'), ('fasttree', 'ran fasttree'), ('lonr', 'ran liberman lonr'))}
# print ' calculating selection metrics for %d cluster%s with size%s: %s' % (n_after, utils.plural(n_after), utils.plural(n_after), ' '.join(str(len(l['unique_ids'])) for l in inf_lines_to_use))
# print ' skipping %d smaller than %d' % (n_before - n_after, min_cluster_size)
# if cluster_indices is not None:
# if min(cluster_indices) < 0 or max(cluster_indices) >= len(inf_lines_to_use):
# raise Exception('invalid cluster indices %s for partition with %d clusters' % (cluster_indices, len(inf_lines_to_use)))
# print ' skipped all iclusts except %s (size%s %s)' % (' '.join(str(i) for i in cluster_indices), utils.plural(len(cluster_indices)), ' '.join(str(len(inf_lines_to_use[i]['unique_ids'])) for i in cluster_indices))
# n_already_there = 0
# for iclust, line in enumerate(inf_lines_to_use):
# if cluster_indices is not None and iclust not in cluster_indices:
# continue
# if debug:
# print ' %s sequence cluster' % utils.color('green', str(len(line['unique_ids'])))
# if 'tree-info' in line: # NOTE we used to continue here, but now I've decided we really want to overwrite what's there (although I'm a little worried that there was a reason I'm forgetting not to overwrite them)
# if debug:
# print ' %s overwriting tree that was already in <line>' % utils.color('yellow', 'warning')
# n_already_there += 1
# treefo = get_tree_for_inf_line(line, cpath=cpath, annotations=annotations, debug=debug)
# if treefo is None:
# continue
# tree_origin_counts[treefo['origin']]['count'] += 1
# line['tree-info'] = {} # NOTE <treefo> has a dendro tree, but what we put in the <line> (at least for now) is a newick string
# line['tree-info']['tree'] = treefo['tree'].as_string(schema='newick')
# print ' tree origins: %s' % ', '.join(('%d %s' % (nfo['count'], nfo['label'])) for n, nfo in tree_origin_counts.items() if nfo['count'] > 0)
# if n_already_there > 0:
# print ' %s overwriting %d / %d that already had trees' % (utils.color('yellow', 'warning'), n_already_there, n_after)
# ----------------------------------------------------------------------------------------
def get_aa_lb_metrics(line, nuc_dtree, lb_tau, lbr_tau_factor=None, only_calc_metric=None, dont_normalize_lbi=False, extra_str=None, iclust=None, debug=False): # and add them to <line>
utils.add_seqs_aa(line)
if max(get_leaf_depths(nuc_dtree).values()) > 1: # not really sure why i have to add this before converting to aa, but it seems necessary to avoid getting a huge branch below root (and for consistency -- if we're calculating also [nuc-]lbi the nuc tree is already rescaled when we get here
if line is None:
raise Exception('tree needs rescaling in lb calculation (metrics will be wrong): found leaf depth greater than 1 (even when less than 1 they can be wrong, but we can be fairly certain that your BCR sequences don\'t have real mutation frequencty greater than 1, so this case we can actually check). If you pass in annotations we can rescale to the observed mutation frequencty.')
print ' %s leaf depths greater than 1, so rescaling by sequence length' % utils.color('yellow', 'warning')
nuc_dtree.scale_edges(1. / numpy.mean([len(s) for s in line['seqs']])) # using treeutils.rescale_tree() breaks, it seems because the update_bipartitions() call removes nodes near root on unrooted trees
aa_dtree = get_aa_tree(nuc_dtree, line, extra_str=extra_str, debug=debug)
aa_lb_info = calculate_lb_values(aa_dtree, lb_tau, lbr_tau_factor=lbr_tau_factor, only_calc_metric=only_calc_metric, annotation=line, dont_normalize=dont_normalize_lbi, extra_str=extra_str, iclust=iclust, dbgstr=' on aa tree', debug=debug)
if 'tree-info' not in line:
line['tree-info'] = {'lb' : {}}
line['tree-info']['lb']['aa-tree'] = aa_dtree.as_string(schema='newick')
for nuc_metric in [k for k in aa_lb_info if k != 'tree']:
line['tree-info']['lb']['aa-'+nuc_metric] = aa_lb_info[nuc_metric]
# ----------------------------------------------------------------------------------------
def calculate_tree_metrics(args, metrics_to_calc, annotations, lb_tau, lbr_tau_factor=None, cpath=None, treefname=None, reco_info=None, use_true_clusters=False, base_plotdir=None,
train_dtr=False, dtr_cfg=None, ete_path=None, workdir=None, true_lines_to_use=None, outfname=None, only_use_best_partition=False, glfo=None, debug=False):
min_cluster_size = args.min_selection_metric_cluster_size # default_min_selection_metric_cluster_size
print 'getting selection metrics: %s' % ' '.join(metrics_to_calc)
if reco_info is not None:
if not use_true_clusters:
print ' note: getting selection metrics on simulation without setting <use_true_clusters> (i.e. probably without setting --simultaneous-true-clonal-seqs)'
for tmpline in reco_info.values():
assert len(tmpline['unique_ids']) == 1 # at least for the moment, we're splitting apart true multi-seq lines when reading in seqfileopener.py
if args.dtr_path is not None:
assert not args.dont_normalize_lbi # it's trained on normalized lbi, so results are garbage if you don't normalize
dtr_cfgvals, trainfo, skmodels, pmml_models, missing_models = init_dtr(train_dtr, args.dtr_path, cfg_fname=dtr_cfg)
if true_lines_to_use is not None: # i.e. being called by bin/dtr-run.py
assert reco_info is None
inf_lines_to_use = None
else: # called from python/partitiondriver.py
inf_lines_to_use, true_lines_to_use = get_tree_metric_lines(annotations, cpath, reco_info, use_true_clusters, only_use_best_partition=only_use_best_partition, glfo=glfo) # NOTE these continue to be modified (by removing clusters we don't want) further down, and then they get passed to the plotting functions
# get tree and calculate metrics for inferred lines
if inf_lines_to_use is not None and true_lines_to_use is None: # eh, maybe i can get away with not running inferred stuff on true lines?
n_before = len(inf_lines_to_use)
inf_lines_to_use = sorted([l for l in inf_lines_to_use if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True)
n_after = len(inf_lines_to_use) # after removing the small ones
tree_origin_counts = {n : {'count' : 0, 'label' : l} for n, l in (('treefname', 'read from %s' % treefname), ('cpath', 'made from cpath'), ('fasttree', 'ran fasttree'), ('lonr', 'ran liberman lonr'))}
print ' calculating selection metrics for %d cluster%s with size%s: %s' % (n_after, utils.plural(n_after), utils.plural(n_after), ' '.join(str(len(l['unique_ids'])) for l in inf_lines_to_use))
print ' skipping %d smaller than %d' % (n_before - n_after, min_cluster_size)
if args.cluster_indices is not None:
if min(args.cluster_indices) < 0 or max(args.cluster_indices) >= len(inf_lines_to_use):
raise Exception('invalid cluster indices %s for partition with %d clusters' % (args.cluster_indices, len(inf_lines_to_use)))
print ' skipped all iclusts except %s (size%s %s)' % (' '.join(str(i) for i in args.cluster_indices), utils.plural(len(args.cluster_indices)), ' '.join(str(len(inf_lines_to_use[i]['unique_ids'])) for i in args.cluster_indices))
n_already_there, n_skipped_uid = 0, 0
final_inf_lines = []
for iclust, line in enumerate(inf_lines_to_use):
if args.cluster_indices is not None and iclust not in args.cluster_indices:
continue
if debug:
print ' %s sequence cluster' % utils.color('green', str(len(line['unique_ids'])))
if 'tree-info' in line: # NOTE we used to continue here, but now I've decided we really want to overwrite what's there (although I'm a little worried that there was a reason I'm forgetting not to overwrite them)
if debug:
print ' %s overwriting selection metric info that was already in <line>' % utils.color('yellow', 'warning')
n_already_there += 1
line['tree-info'] = {'lb' : {}} # NOTE <treefo> has a dendro tree, but what we put in the <line> (at least for now) is a newick string
if 'cons-dist-aa' in metrics_to_calc:
add_cdists_to_lbfo(line, line['tree-info']['lb'], 'cons-dist-aa', debug=debug) # this adds the values both directly to the <line>, and to <line['tree-info']['lb']>, but the former won't end up in the output file unless the corresponding keys are specified as extra annotation columns (this distinction/duplication is worth having, although it's not ideal)
# get the tree if any of the requested metrics need it
if any(m in metrics_to_calc for m in ['lbi', 'lbr', 'aa-lbi', 'aa-lbr']):
treefo = get_tree_for_inf_line(line, treefname=treefname, cpath=cpath, annotations=annotations, use_true_clusters=use_true_clusters, debug=debug)
if treefo['tree'] is None and treefo['origin'] == 'no-uids':
n_skipped_uid += 1
continue
tree_origin_counts[treefo['origin']]['count'] += 1
if any(m in metrics_to_calc for m in ['lbi', 'lbr']): # have to (or at least easier to) calc both even if we only need one (although i think this is only because of the lbr_tau_factor shenanigans, which maybe we don't need any more?)
lbfo = calculate_lb_values(treefo['tree'], lb_tau, lbr_tau_factor=lbr_tau_factor, annotation=line, dont_normalize=args.dont_normalize_lbi, extra_str='inf tree', iclust=iclust, debug=debug)
check_lb_values(line, lbfo) # would be nice to remove this eventually, but I keep runnining into instances where dendropy is silently removing nodes
line['tree-info']['lb'].update(lbfo)
if any(m in metrics_to_calc for m in ['aa-lbi', 'aa-lbr']):
get_aa_lb_metrics(line, treefo['tree'], lb_tau, lbr_tau_factor=lbr_tau_factor, dont_normalize_lbi=args.dont_normalize_lbi, extra_str='(AA inf tree, iclust %d)'%iclust, iclust=iclust, debug=debug)
if args.dtr_path is not None and not train_dtr: # don't want to train on data (NOTE this would probably also need all the lb metrics calculated, but i don't care atm)
calc_dtr(False, line, line['tree-info']['lb'], treefo['tree'], None, pmml_models, dtr_cfgvals) # adds predicted dtr values to lbfo (hardcoded False and None are to make sure we don't train on data)
final_inf_lines.append(line)
print ' tree origins: %s' % ', '.join(('%d %s' % (nfo['count'], nfo['label'])) for n, nfo in tree_origin_counts.items() if nfo['count'] > 0)
if n_skipped_uid > 0:
print ' skipped %d/%d clusters that had no uids in common with tree in %s' % (n_skipped_uid, n_after, treefname)
if n_already_there > 0:
print ' %s replaced tree info in %d / %d that already had it' % (utils.color('yellow', 'warning'), n_already_there, n_after)
inf_lines_to_use = final_inf_lines # replace it with a new list that only has the clusters we really want
# calculate lb values for true lines/trees
if true_lines_to_use is not None: # note that if <base_plotdir> *isn't* set, we don't actually do anything with the true lb values
n_true_before = len(true_lines_to_use)
true_lines_to_use = sorted([l for l in true_lines_to_use if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True)
n_true_after = len(true_lines_to_use)
print ' also doing %d true cluster%s with size%s: %s' % (n_true_after, utils.plural(n_true_after), utils.plural(n_true_after), ' '.join(str(len(l['unique_ids'])) for l in true_lines_to_use))
print ' skipping %d smaller than %d' % (n_true_before - n_true_after, min_cluster_size)
final_true_lines = []
for iclust, true_line in enumerate(true_lines_to_use):
if args.cluster_indices is not None and iclust not in args.cluster_indices:
continue
true_dtree = get_dendro_tree(treestr=true_line['tree'])
true_lb_info = calculate_lb_values(true_dtree, lb_tau, lbr_tau_factor=lbr_tau_factor, annotation=true_line, dont_normalize=args.dont_normalize_lbi, extra_str='true tree', iclust=iclust, debug=debug)
true_line['tree-info'] = {'lb' : true_lb_info}
check_lb_values(true_line, true_line['tree-info']['lb']) # would be nice to remove this eventually, but I keep runnining into instances where dendropy is silently removing nodes
if any(m in metrics_to_calc for m in ['aa-lbi', 'aa-lbr']):
get_aa_lb_metrics(true_line, true_dtree, lb_tau, lbr_tau_factor=lbr_tau_factor, dont_normalize_lbi=args.dont_normalize_lbi, extra_str='(AA true tree, iclust %d)'%iclust, iclust=iclust, debug=debug)
if 'cons-dist-aa' in metrics_to_calc:
add_cdists_to_lbfo(true_line, true_line['tree-info']['lb'], 'cons-dist-aa', debug=debug) # see comment in previous call above
if args.dtr_path is not None:
calc_dtr(train_dtr, true_line, true_lb_info, true_dtree, trainfo, pmml_models, dtr_cfgvals) # either adds training values to trainfo, or adds predicted dtr values to lbfo
final_true_lines.append(true_line)
true_lines_to_use = final_true_lines # replace it with a new list that only has the clusters we really want
if args.dtr_path is not None: # it would be nice to eventually merge these two blocks, i.e. use the same code to plot dtr and lbi/lbr
if train_dtr:
print ' training decision trees into %s' % args.dtr_path
if dtr_cfgvals['n_train_per_family'] is not None:
print ' n_train_per_family: using only %d from each family for among-families dtr' % dtr_cfgvals['n_train_per_family']
for cg in cgroups:
for tvar in dtr_targets[cg]:
train_dtr_model(trainfo[cg][tvar], args.dtr_path, dtr_cfgvals, cg, tvar)
elif base_plotdir is not None:
assert true_lines_to_use is not None
plstart = time.time()
assert ete_path is None or workdir is not None # need the workdir to make the ete trees
import plotting
import lbplotting
# if 'affinities' not in annotations[0] or all(affy is None for affy in annotations[0]['affinities']): # if it's bcr-phylo simulation we should have affinities for everybody, otherwise for nobody
# return
print ' plotting to %s' % base_plotdir
true_plotdir = base_plotdir + '/true-tree-metrics'
lbmlist = sorted(m for m in dtr_metrics if m not in missing_models) # sorted() is just so the order in the html file matches that in the lb metric one
utils.prep_dir(true_plotdir, wildlings=['*.svg', '*.html'], allow_other_files=True, subdirs=lbmlist)
fnames = []
for lbm in lbmlist:
if 'delta-affinity' in lbm:
lbplotting.plot_lb_vs_ancestral_delta_affinity(true_plotdir+'/'+lbm, true_lines_to_use, lbm, is_true_line=True, only_csv=args.only_csv_plots, fnames=fnames, debug=debug)
else:
for affy_key in (['affinities', 'relative_affinities'] if args.include_relative_affy_plots else ['affinities']):
lbplotting.plot_lb_vs_affinity(true_plotdir, true_lines_to_use, lbm, is_true_line=True, only_csv=args.only_csv_plots, fnames=fnames, affy_key=affy_key)
if not args.only_csv_plots:
plotting.make_html(true_plotdir, fnames=fnames, extra_links=[(subd, '%s/%s/' % (true_plotdir, subd)) for subd in lbmlist])
print ' dtr plotting time %.1fs' % (time.time() - plstart)
elif base_plotdir is not None:
assert ete_path is None or workdir is not None # need the workdir to make the ete trees
if true_lines_to_use is None: # don't plot inferred metrics on simulation (saves time + complication, and we hardly ever actually want them)
plstr, antn_list, is_simu, inf_annotations = 'inferred', inf_lines_to_use, False, None
else:
plstr, antn_list, is_simu, inf_annotations = 'true', true_lines_to_use, True, inf_lines_to_use
plot_tree_metrics(args, '%s/%s-tree-metrics' % (base_plotdir, plstr), metrics_to_calc, antn_list, is_simu=is_simu, inf_annotations=inf_annotations, ete_path=ete_path, workdir=workdir, debug=debug)
if outfname is not None:
print ' writing selection metrics to %s' % outfname
utils.prep_dir(None, fname=outfname, allow_other_files=True)
def dumpfo(tl):
dumpfo = {'unique_ids' : l['unique_ids']}
dumpfo.update(l['tree-info'])
return dumpfo
with open(outfname, 'w') as tfile:
json.dump([dumpfo(l) for l in inf_lines_to_use if 'tree-info' in l], tfile)
# ----------------------------------------------------------------------------------------
def init_dtr(train_dtr, dtr_path, cfg_fname=None):
# ----------------------------------------------------------------------------------------
def read_cfg():
if cfg_fname is None: # just use the defaults
dtr_cfgvals = {}
else: # read cfg values from a file
with open(cfg_fname) as yfile:
dtr_cfgvals = yaml.load(yfile, Loader=Loader)
if 'vars' in dtr_cfgvals: # format is slightly different in the file (in the file we don't require the explicit split between per-seq and per-cluster variables)
allowed_vars = set(v for cg in cgroups for pc in dtr_vars[cg] for v in dtr_vars[cg][pc])
cfg_vars = set(v for cg in cgroups for v in dtr_cfgvals['vars'][cg])
bad_vars = cfg_vars - allowed_vars
if len(bad_vars) > 0:
raise Exception('unexpected dtr var%s (%s) in cfg file %s' % (utils.plural(len(bad_vars)), ', '.join(bad_vars), cfg_fname))
for cg in cgroups:
dtr_cfgvals['vars'][cg] = {pc : [v for v in dtr_vars[cg][pc] if v in dtr_cfgvals['vars'][cg]] for pc in pchoices} # loop over the allowed vars here so the order is always the same
for tk in set(default_dtr_options) - set(dtr_cfgvals): # set any missing ones to the defaults
if tk == 'vars':
dtr_cfgvals[tk] = dtr_vars
elif tk == 'n_jobs':
dtr_cfgvals[tk] = utils.auto_n_procs() # isn't working when I put it up top, not sure why
else:
dtr_cfgvals[tk] = default_dtr_options[tk]
return dtr_cfgvals
# ----------------------------------------------------------------------------------------
def read_model(cg, tvar):
if 'pypmml' not in sys.modules:
import pypmml
picklefname, pmmlfname = dtrfname(dtr_path, cg, tvar), dtrfname(dtr_path, cg, tvar, suffix='pmml')
if os.path.exists(picklefname): # pickle file (i.e. with entire model class written to disk, but *must* be read with the same version of sklearn that was used to write it) [these should always be there, since on old ones they were all we had, and on new ones we write both pickle and pmml]
if os.path.exists(pmmlfname): # pmml file (i.e. just with the info to make predictions, but can be read with other software versions)
pmml_models[cg][tvar] = sys.modules['pypmml'].Model.fromFile(pmmlfname)
else: # if the pmml file isn't there, this must be old files, so we read the pickle, convert to pmml, then read that new pmml file
if 'joblib' not in sys.modules: # just so people don't need to install it unless they're training (also scons seems to break it https://stackoverflow.com/questions/24453387/scons-attributeerror-builtin-function-or-method-object-has-no-attribute-disp)
import joblib
with open(picklefname) as dfile:
skmodels[cg][tvar] = sys.modules['joblib'].load(dfile)
write_pmml(pmmlfname, skmodels[cg][tvar], get_dtr_varnames(cg, dtr_cfgvals['vars']), tvar)
pmml_models[cg][tvar] = sys.modules['pypmml'].Model.fromFile(pmmlfname)
else:
if cg == 'among-families' and tvar == 'delta-affinity': # this is the only one that should be missing, since we added it last
missing_models.append('-'.join([cg, tvar, metric_method])) # this is fucking dumb, but I need it later when I have the full name, not cg and tvar
print ' %s %s doesn\'t exist, skipping (%s)' % (cg, tvar, dtrfname(dtr_path, cg, tvar))
return
raise Exception('model file doesn\'t exist: %s' % picklefname)
# ----------------------------------------------------------------------------------------
dtr_cfgvals = read_cfg()
skmodels = {cg : {tv : None for tv in dtr_targets[cg]} for cg in cgroups}
pmml_models = {cg : {tv : None for tv in dtr_targets[cg]} for cg in cgroups}
missing_models = []
trainfo = None
if train_dtr:
trainfo = {cg : {tv : {'in' : [], 'out' : []} for tv in dtr_targets[cg]} for cg in cgroups} # , 'weights' : []}
else:
rstart = time.time()
for cg in cgroups:
for tvar in dtr_targets[cg]:
read_model(cg, tvar)
print ' read decision trees from %s (%.1fs)' % (dtr_path, time.time() - rstart)
return dtr_cfgvals, trainfo, skmodels, pmml_models, missing_models
# ----------------------------------------------------------------------------------------
def calc_dtr(train_dtr, line, lbfo, dtree, trainfo, pmml_models, dtr_cfgvals, skmodels=None): # either add training values for <line>, or predict on it
# ----------------------------------------------------------------------------------------
def add_dtr_training_vals(cg, tvar, dtr_invals): # transfer dtr input values to tfo['in'], and add output (affinity stuff) values to tfo['out']
# trainfo[XXX]['weights'] += line['affinities']
def get_delta_affinity_vals():
tmpvals = {s : [] for s in tfo}
for iseq, uid in enumerate(line['unique_ids']):
if iseq==0:
print '%s dtr training target should be updated to include get_n_descendents_to_affy_increase()' % utils.color('yellow', 'warning')
n_steps = get_n_ancestors_to_affy_change(None, dtree.find_node_with_taxon_label(uid), dtree, line)
if n_steps is None: # can't train on None-type values
continue
tmpvals['in'].append(dtr_invals[cg][iseq])
tmpvals['out'].append(-n_steps)
return tmpvals
tfo = trainfo[cg][tvar]
if cg == 'within-families':
if tvar == 'affinity':
tfo['in'] += dtr_invals[cg]
max_affy = max(line['affinities'])
tfo['out'] += [a / max_affy for a in line['affinities']]
elif tvar == 'delta-affinity':
tmpvals = get_delta_affinity_vals()
tfo['in'] += tmpvals['in']
tfo['out'] += tmpvals['out']
else:
assert False
elif cg == 'among-families':
if dtr_cfgvals['n_train_per_family'] is None:
assert tvar == 'affinity' # eh why bother doing the other one
tfo['in'] += dtr_invals[cg]
tfo['out'] += line['affinities']
else:
if tvar == 'affinity':
i_to_keep = numpy.random.choice(range(len(line['unique_ids'])), size=dtr_cfgvals['n_train_per_family'], replace=False)
tfo['in'] += [dtr_invals[cg][i] for i in i_to_keep]
tfo['out'] += [line['affinities'][i] for i in i_to_keep]
elif tvar == 'delta-affinity':
tmpvals = get_delta_affinity_vals()
if len(tmpvals['in']) == 0: # no affinity increases
return
i_to_keep = numpy.random.choice(range(len(tmpvals['in'])), size=dtr_cfgvals['n_train_per_family'], replace=False)
tfo['in'] += [tmpvals['in'][i] for i in i_to_keep]
tfo['out'] += [tmpvals['out'][i] for i in i_to_keep]
else:
assert False
else:
assert False
# ----------------------------------------------------------------------------------------
utils.add_naive_seq_aa(line)
utils.add_seqs_aa(line)
for mtmp in ['cons-dist-nuc', 'cons-dist-aa']:
add_cdists_to_lbfo(line, lbfo, mtmp)
dtr_invals = {cg : get_dtr_vals(cg, dtr_cfgvals['vars'], line, lbfo, dtree) for cg in cgroups} # all dtr input variable values, before we fiddle with them for the different dtrs
if train_dtr: # train and write new model
for cg in cgroups:
for tvar in dtr_targets[cg]:
add_dtr_training_vals(cg, tvar, dtr_invals)
else: # read existing model
for cg in cgroups:
for tvar in dtr_targets[cg]:
if pmml_models[cg][tvar] is None: # only way this can happen atm is old dirs that don't have among-families delta-affinity
continue
outfo = {}
for iseq, uid in enumerate(line['unique_ids']):
pmml_invals = {var : val for var, val in zip(get_dtr_varnames(cg, dtr_cfgvals['vars']), dtr_invals[cg][iseq])} # convert from format for sklearn to format for pmml
outfo[uid] = pmml_models[cg][tvar].predict(pmml_invals)['predicted_%s'%tvar]
# if skmodels[cg][tvar] is not None: # leaving this here cause maybe we'll want to fall back to it or something if pmml ends up having problems
# sk_val = skmodels[cg][tvar].predict([dtr_invals[cg][iseq]])
# assert utils.is_normed(sk_val / outfo[uid])
lbfo['-'.join([cg, tvar, 'dtr'])] = outfo # NOTE it would be nice to automate this '-'.join() conversion, it happens in a few places already
# ----------------------------------------------------------------------------------------
# differences to calculate_tree_metrics(): this fcn
# 1) can run a bunch of metrics that the other can't
# 2) mosty focuses on running one metric at a time (as opposed to running all the ones that we typically want on data)
# 3) doesn't plot as many things
# 4) only runs on simulation (as opposed to making two sets of things, for simulation and data)
def calculate_individual_tree_metrics(metric_method, annotations, base_plotdir=None, ete_path=None, workdir=None, lb_tau=None, lbr_tau_factor=None, only_csv=False, min_cluster_size=None, include_relative_affy_plots=False,
dont_normalize_lbi=False, cluster_indices=None, only_look_upwards=False, debug=False):
# ----------------------------------------------------------------------------------------
def get_combo_lbfo(varlist, iclust, line, lb_tau, lbr_tau_factor, is_aa_lb=False): #, add_to_line=False):
if 'shm-aa' in varlist and 'seqs_aa' not in line:
utils.add_naive_seq_aa(line)
utils.add_seqs_aa(line)
lbfo = {}
for mtmp in [m for m in varlist if 'cons-dist-' in m]:
add_cdists_to_lbfo(line, lbfo, mtmp)
dtree = get_dendro_tree(treestr=line['tree'])
lbvars = set(varlist) & set(['lbi', 'lbr']) # although if is_aa_lb is set, we're really calculating aa-lbi/aa-lbr
if lb_tau is None or lbr_tau_factor is None:
print ' %s using default lb_tau %.3f and lbr_tau_factor %.3f to calculate individual tree metric %s' % (utils.color('yellow', 'warning'), default_lb_tau, default_lbr_tau_factor, metric_method)
lb_tau, lbr_tau_factor = default_lb_tau, default_lbr_tau_factor
tmp_tau, tmp_factor = lb_tau, lbr_tau_factor # weird/terrible hack (necessary to allow the calculation fcn to enforce that either a) we're calculating both metrics, so we probably want the factor applied or b) we're only calculating one, and we're not normalizing (i.e. we're probably calculating the bounds)
if len(lbvars) == 2:
only_calc_metric = None
elif len(lbvars) == 1:
only_calc_metric = list(lbvars)[0]
if only_calc_metric == 'lbr':
tmp_tau *= lbr_tau_factor
tmp_factor = None
else:
raise Exception('unexpected combination of variables %s' % varlist)
if is_aa_lb: # NOTE this adds the metrics to <line>
get_aa_lb_metrics(line, dtree, tmp_tau, lbr_tau_factor=tmp_factor, only_calc_metric=only_calc_metric, dont_normalize_lbi=dont_normalize_lbi, extra_str='true tree', iclust=iclust, debug=debug)
# lbfo.update(line['tree-info']['lb'])
else:
tmp_lb_info = calculate_lb_values(dtree, tmp_tau, only_calc_metric=only_calc_metric, lbr_tau_factor=tmp_factor, annotation=line, dont_normalize=dont_normalize_lbi, extra_str='true tree', iclust=iclust, debug=debug)
for lbm in [m for m in lb_metrics if m in varlist]: # this skips the tree, which I guess isn't a big deal
lbfo[lbm] = {u : tmp_lb_info[lbm][u] for u in line['unique_ids']} # remove the ones that aren't in <line> (since we don't have sequences for them, so also no consensus distance)
# if add_to_line:
# line['tree-info'] = {'lb' : lbfo}
return dtree, lbfo
# ----------------------------------------------------------------------------------------
def add_to_treefo(lbfo):
if 'tree-info' in line:
wstr = (' %s replacing existing info'%utils.wrnstr()) if metric_method in line['tree-info']['lb'] else ''
print ' add %s to existing lb keys: %s%s' % (metric_method, ' '.join(k for k in line['tree-info']['lb']), wstr)
line['tree-info']['lb'][metric_method] = lbfo
else:
print ' add new metric %s' % metric_method
line['tree-info'] = {'lb' : {metric_method : lbfo}}
# ----------------------------------------------------------------------------------------
if min_cluster_size is None:
min_cluster_size = default_min_selection_metric_cluster_size
n_before = len(annotations)
annotations = sorted([l for l in annotations if len(l['unique_ids']) >= min_cluster_size], key=lambda l: len(l['unique_ids']), reverse=True)
n_after = len(annotations)
print ' %s getting individual metric for %d true cluster%s with size%s: %s' % (utils.color('blue', metric_method), n_after, utils.plural(n_after), utils.plural(n_after), ' '.join(str(len(l['unique_ids'])) for l in annotations))
if n_before - n_after > 0:
print ' skipping %d smaller than %d' % (n_before - n_after, min_cluster_size)
pstart = time.time()
metric_antns = [] # just to keep track of the ones corresponding to <cluster_indices> (if set)
for iclust, line in enumerate(annotations):
if cluster_indices is not None and iclust not in cluster_indices:
continue
metric_antns.append(line)
if 'tree-info' in line and 'lb' in line['tree-info'] and metric_method in line['tree-info']['lb']:
print ' %s already in annotation, not doing anything' % metric_method
continue
if metric_method == 'shm':
metric_info = {u : -utils.per_seq_val(line, 'n_mutations', u) for u in line['unique_ids']}
add_to_treefo(metric_info)
elif metric_method == 'fay-wu-h': # NOTE this isn't actually tree info, but I"m comparing it to things calculated with a tree, so putting it in the same place at least for now
fwh = -utils.fay_wu_h(line)
add_to_treefo({u : fwh for i, u in enumerate(line['unique_ids'])}) # kind of weird to set it individually for each sequence when they all have the same value (i.e. it's a per-family metric), but I don't want to do actual per-family comparisons any more, and this way we can at least look at it
elif metric_method in ['cons-dist-nuc', 'cons-dist-aa']:
lbfo = {}
add_cdists_to_lbfo(line, lbfo, metric_method)
add_to_treefo(lbfo[metric_method])
elif metric_method == 'delta-lbi':
dtree, lbfo = get_combo_lbfo(['lbi'], iclust, line, lb_tau, lbr_tau_factor)
delta_lbfo = {}
for uid in line['unique_ids']:
node = dtree.find_node_with_taxon_label(uid)
if node is dtree.seed_node:
continue # maybe I should add it as something? not sure
delta_lbfo[uid] = lbfo['lbi'][uid] - lbfo['lbi'][node.parent_node.taxon.label] # I think the parent should always be in here, since I think we should calculate lbi for every node in the tree
add_to_treefo(delta_lbfo)
elif 'aa-lb' in metric_method: # aa versions of lbi and lbr
_, lbfo = get_combo_lbfo([metric_method.lstrip('aa-')], iclust, line, lb_tau, lbr_tau_factor, is_aa_lb=True) # NOTE i shouldn't have used lstrip() here (i keep forgetting it's character-based, not string-based', but i think it's ok
# NOTE do *not* call add_to_treefo() since they're already added to <line>
elif metric_method in ['lbi', 'lbr']: # last cause i'm adding them last, but would probably be cleaner to handle it differently (i'm just tired of having to run the full (non-individual) tree metric fcn to get them)
_, lbfo = get_combo_lbfo([metric_method], iclust, line, lb_tau, lbr_tau_factor) #, add_to_line=True)
add_to_treefo(lbfo[metric_method])
elif metric_method == 'cons-lbi': # now uses aa-lbi as a tiebreaker for cons-dist-aa, but used to be old z-score style combination of (nuc-)lbi and cons-dist
def tiefcn(uid):
cdist, aalbi = lbfo['cons-dist-aa'][uid], lbfo['aa-lbi'][uid]
return cdist + aalbi / max_aa_lbi
_, lbfo = get_combo_lbfo(['cons-dist-aa', 'lbi'], iclust, line, lb_tau, lbr_tau_factor, is_aa_lb=True)
max_aa_lbi = max(lbfo['aa-lbi'].values())
add_to_treefo({u : tiefcn(u) for u in line['unique_ids']})
else:
assert False
if time.time() - pstart > 60:
print ' tree quantity calculation/prediction time: %.1fs' % (time.time() - pstart)
if base_plotdir is not None:
plstart = time.time()
assert ete_path is None or workdir is not None # need the workdir to make the ete trees
import plotting
import lbplotting
if 'affinities' not in metric_antns[0] or all(affy is None for affy in metric_antns[0]['affinities']): # if it's bcr-phylo simulation we should have affinities for everybody, otherwise for nobody
return
true_plotdir = base_plotdir + '/true-tree-metrics'
utils.prep_dir(true_plotdir, wildlings=['*.svg', '*.html'], allow_other_files=True, subdirs=[metric_method])
fnames = []
if metric_method in daffy_metrics:
lbplotting.plot_lb_vs_ancestral_delta_affinity(true_plotdir+'/'+metric_method, metric_antns, metric_method, is_true_line=True, only_csv=only_csv, fnames=fnames, debug=debug, only_look_upwards=only_look_upwards)
else:
for affy_key in (['affinities', 'relative_affinities'] if include_relative_affy_plots else ['affinities']):
lbplotting.plot_lb_vs_affinity(true_plotdir, metric_antns, metric_method, is_true_line=True, only_csv=only_csv, fnames=fnames, affy_key=affy_key)
if ete_path is not None:
lbplotting.plot_lb_trees([metric_method], true_plotdir, metric_antns, ete_path, workdir, is_true_line=True, fnames=fnames)
if not only_csv:
plotting.make_html(true_plotdir, fnames=fnames, extra_links=[(metric_method, '%s/%s/' % (true_plotdir, metric_method)),], bgcolor='#FFFFFF')
print ' non-lb metric plotting time %.1fs' % (time.time() - plstart)
# ----------------------------------------------------------------------------------------
def run_laplacian_spectra(treestr, workdir=None, plotdir=None, plotname=None, title=None, debug=False):
# - https://www.ncbi.nlm.nih.gov/pubmed/26658901/
# - instructions here: https://besjournals.onlinelibrary.wiley.com/doi/full/10.1111/2041-210X.12526
# I think this is what ended up working (thought probably not in docker):
# apt-get install libgmp-dev libmpfr-dev
# > install.packages("RPANDA",dependencies=TRUE)
# ok but then I needed to modify the code, so downloaded the source from cran, and swapped out for the spectR.R that eric sent, then installed with:
# R CMD INSTALL -l packages/RPANDA/lib packages/RPANDA/ # NOTE needs to happen whenever you modify the R source
# condensation of docs from the above paper:
# - > res<-spectR(Phyllostomidae) # compute eigenvalues (and some metrics describing the distribution, e.g. skewness, kurtosis, eigengap)
# - > plot_spectR(res) # make plots for eigenvalue spectrum
# - if eigengap (largest gap between sorted eigenvalues) is e.g. between 3 and 4, then the tree can be separated into three regions, and you use the BIC stuff to find those regions
# - > res<-BICompare(Phyllostomidae,3)
# - > plot_BICompare(Phyllostomidae,res)
# - > res<-JSDtree(Phyllostomidae_genera) # pairwise jensen-shannon distances between the 25 phylogenies
# - > JSDtree_cluster(res) # plots heatmap and hierarchical cluster
if debug:
print utils.pad_lines(get_ascii_tree(treestr=treestr))
print treestr
if workdir is None:
workdir = utils.choose_random_subdir('/tmp/%s' % os.getenv('USER', default='partis-work'))
eigenfname = '%s/eigenvalues.txt' % workdir
os.makedirs(workdir)
cmdlines = [
'library(ape, quiet=TRUE)',
# 'library(RPANDA, quiet=TRUE)', # old way, before I had to modify the source code because the CRAN version removes all eigenvalues <1 (for method="standard" -- with method="normal" it's <0, which is probably better, but it also seems to smoosh all the eigenvalues to be almost exactly 1)
'library("RPANDA", lib.loc="%s/packages/RPANDA/lib", quiet=TRUE)' % os.path.dirname(os.path.realpath(__file__)).replace('/python', ''),
'tree <- read.tree(text = "%s")' % treestr,
# 'print(tree)',
'specvals <- spectR(tree, method=c("standard"))', # compute eigenvalues (and some metrics describing the distribution, e.g. skewness, kurtosis, eigengap)
# 'print(specvals)',
'capture.output(specvals$eigenvalues, file="%s")' % eigenfname,
]
outstr, errstr = utils.run_r(cmdlines, workdir, return_out_err=True) # if it crashes, call it without return_out_err, so it prints stuff as it goes
errstr = '\n'.join([l.strip() for l in errstr.split('\n') if 'This is vegan' not in l])
for oestr in (outstr, errstr):
if oestr.strip() == '':
continue
print utils.pad_lines(outstr)
eigenvalues = []
with open(eigenfname) as efile:
for line in efile:
for tstr in line.split():
if '[' in tstr:
if int(tstr.strip('[]')) != len(eigenvalues) + 1:
raise Exception('couldn\'t process line:\n%s' % line)
else:
eigenvalues.append(float(tstr))
os.remove(eigenfname)
os.rmdir(workdir)
if plotdir is not None:
import plotting
plotting.plot_laplacian_spectra(plotdir, plotname, eigenvalues, title)
# ----------------------------------------------------------------------------------------
def combine_selection_metrics(lp_infos, min_cluster_size=default_min_selection_metric_cluster_size, plotdir=None, ig_or_tr='ig', args=None, is_simu=False): # don't really like passing <args> like this, but it's the easiest cfg convention atm
# ----------------------------------------------------------------------------------------
def gsval(mfo, tch, vname, no_fail=False):
cln, iseq = mfo[tch], mfo[tch+'_iseq']
return utils.antnval(cln, vname, iseq=iseq, use_default=no_fail)
# ----------------------------------------------------------------------------------------
def gsvstr(val, vname):
if val is None:
return '?' #str(val)
if vname in args.selection_metrics_to_calculate:
return '%.2f' % val
elif vname == 'affinities':
return ('%.1f' % val) if val > 1 else str(utils.round_to_n_digits(val, 2)) # could probably round for the first case as well
elif type(val) == float:
return '%.3f' % val
else:
return str(val)
# ----------------------------------------------------------------------------------------
def sumv(mfo, kstr):
if kstr == 'seq_mtps': # NOTE this is the sum of utils.get_multiplicity() over identical sequences
def vfcn(c): return mtpys[c][gsval(mfo, c, 'input_seqs_aa')]
else:
def vfcn(c): return gsval(mfo, c, kstr)
kvals = [vfcn(c) for c in 'hl']
return None if None in kvals else sum(kvals)
# ----------------------------------------------------------------------------------------
def sum_nuc_shm_pct(mpfo):
total_len = sum(len(gsval(mpfo, c, 'seqs')) - gsval(mpfo, c, 'seqs').count(utils.ambig_base) for c in 'hl')
return 100 * sumv(mpfo, 'n_mutations') / float(total_len)
# ----------------------------------------------------------------------------------------
def get_did(uid, return_contigs=False):
return utils.get_droplet_id(uid, args.droplet_id_separators, args.droplet_id_indices, return_contigs=return_contigs)
# ----------------------------------------------------------------------------------------
def get_joint_did(mfo):
return utils.get_single_entry(list(set([get_did(gsval(mfo, c, 'unique_ids')) for c in 'hl'])))
# ----------------------------------------------------------------------------------------
def get_didstr(dids, cids, mpfo):
if len(set(dids)) == 1: # make sure they're from the same droplet
didstr = dids[0]
if any('chosens' in mpfo[c] and gsval(mpfo, c, 'chosens') for c in 'hl'):
didstr = utils.color('blue_bkg', didstr, width=20)
if args.queries_to_include is not None and any(u in args.queries_to_include for u in (hid, lid)):
didstr = utils.color('red', didstr, width=20)
else:
print ' %s paired seqs %s %s have different droplet ids (i.e. they were probably mis-paired) %s' % (utils.color('red', 'error'), hid, lid, dids)
didstr = 'see error'
cids = ['-' if c in utils.loci else c for c in cids] # previously chosen unobserved cons seqs just have e.g. igh as the contig id, which we don't want to look at in the output
return didstr, cids
# ----------------------------------------------------------------------------------------
def read_cfgfo():
def iconvert(tcfg, vname):
imax = max(tcfg.keys() + [cfgfo.get('n-families', 0) - 1])
def_val = False if tcfg.values()[0] is True else 0
nvals = [tcfg.get(i, def_val) for i in range(imax+1)]
# if 'n-families' in cfgfo and cfgfo['n-families'] != imax + 1: # i tried setting n-families automatically, but in practice it just tends to break things if you make it guess
# print ' %s \'n-families\' not equal to imax + 1 for %s' % (utils.wrnstr(), vname)
# cfgfo['n-families'] = max(imax + 1, cfgfo['n-families'])
return nvals
allowed_keys = set(['n-families', 'n-per-family', 'include-unobs-cons-seqs', 'include-unobs-naive-seqs', 'vars', 'cell-types', 'cell-type-key', 'max-ambig-positions', 'min-umis', 'min-median-nuc-shm-%', 'min-hdist-to-already-chosen', 'droplet-ids', 'similar-to-droplet-ids', 'meta-info-print-keys', 'include_previously_chosen'])
if debug:
print ' ab choice cfg:'
outstr, _ = utils.simplerun('cat %s'%args.ab_choice_cfg, return_out_err=True)
print utils.pad_lines(outstr)
with open(args.ab_choice_cfg) as cfile:
cfgfo = yaml.load(cfile, Loader=Loader)
if len(set(cfgfo) - allowed_keys) > 0:
raise Exception('unexpected key[s] in ab choice cfg: %s (choose from: %s)' % (' '.join(set(cfgfo) - allowed_keys), ' '.join(allowed_keys)))
for sortvar, vcfg in cfgfo['vars'].items():
if vcfg['sort'] not in ['low', 'high']:
raise Exception('value of sort var \'%s\' must be \'low\' or \'high\' (got \'%s\')' %(sortvar, vcfg['sort']))
if 'i' in vcfg:
vcfg['n'] = iconvert(vcfg['i'], sortvar)
if 'n' in vcfg and len(vcfg['n']) != cfgfo['n-families']:
raise Exception('length of n per family list %d for sort var %s doesn\'t match n-families %d' % (len(vcfg['n']), sortvar, cfgfo['n-families']))
if 'n-per-family' in cfgfo and any('n' in vcfg for vcfg in cfgfo['vars'].values()):
raise Exception('\'n-per-family\' was set, but also found key \'n\' in sort var[s] \'%s\' (can only specify number to take in one place)' % (' '.join(v for v, vcfg in cfgfo['vars'].items())))
for stype in ['cons', 'naive']:
tkey = 'include-unobs-%s-seqs'%stype
if tkey not in cfgfo:
cfgfo[tkey] = [False for _ in range(cfgfo['n-families'])]
else:
if hasattr(cfgfo[tkey], 'keys'): # if it's a dict like {i: N}
cfgfo[tkey] = iconvert(cfgfo[tkey], tkey)
if cfgfo[tkey] in [True, False]: # if it's a single value, expand it to the right length
cfgfo[tkey] = [cfgfo[tkey] for _ in range(cfgfo['n-families'])]
else:
if len(cfgfo[tkey]) != cfgfo['n-families']:
raise Exception('length of value for %s %d not equal to n-families %d' % (tkey, len(cfgfo[tkey]), cfgfo['n-families']))
if any(v not in [True, False] for v in cfgfo[tkey]):
raise Exception('values for %s must be bools but got: %s' % (tkey, ' '.join(str(v) for v in set(cfgfo[tkey]))))
return cfgfo
# ----------------------------------------------------------------------------------------
def get_unobs_mfo(stype, metric_pairs, tdbg=False):
assert stype in ['cons', 'naive'] # should be checked elsewhere, but not sure if it is
# ----------------------------------------------------------------------------------------
def use_iseqs(tch, mtmp, threshold=0.75): # if any observed seqs in the family have shm indels, we need to figure out whether the indel should be included in the cons seq
if stype == 'naive': # inferred naive should never have indels in it
return False
hsil = mtmp[tch]['has_shm_indels']
tstr = '(%d / %d = %.2f)' % (hsil.count(True), len(hsil), hsil.count(True) / float(len(hsil)))
if hsil.count(True) / float(len(hsil)) > threshold:
print ' %s more than %.2f %s of %s seqs have indels, so using *input* cons seq (note that if there\'s more than one indel, this may well be wrong, since you probably only want indels that are in a majority of the family [which is probably not all of them])' % (utils.color('yellow', 'warning'), threshold, tstr, tch)
return True
else:
if any(hsil): # if none of them have indels, don't print anything
print ' less than %.2f %s of %s seqs have indels, so not using input seqs for cons seq' % (threshold, tstr, tch)
return False
# ----------------------------------------------------------------------------------------
def getcseqs(tch, use_input_seqs, aa=False, aa_ref_seq=None):
if stype == 'cons':
return utils.cons_seq_of_line(mtmp[tch], aa=aa, use_input_seqs=use_input_seqs, codon_len=1 if aa else 3, aa_ref_seq=aa_ref_seq) # if we're not using input seqs and it's aa (so codon_len is 1) then it *should* be the same as the one that's already in the line
else:
return gsval(mtmp, tch, 'naive_seq'+('_aa' if aa else ''))
# ----------------------------------------------------------------------------------------
def tcsk(c, aastr): # shortand for within this fcn
return cskey(c, consfo, aastr=='aa')
# ----------------------------------------------------------------------------------------
mtmp = metric_pairs[0]
uis = {c : use_iseqs(c, mtmp) for c in 'hl'} # if any observed seqs in the family have shm indels, we need to figure out whether the indel should be included in the cons seq
consfo = {c : mtmp[c] for c in 'hl'}
consfo.update({'iclust' : iclust, 'seqtype' : stype})
consfo.update({c+'_use_input_seqs' : uis[c] for c in 'hl'})
consfo.update({tcsk(c, 'aa') : getcseqs(c, uis[c], aa=True) for c in 'hl'})
consfo.update({tcsk(c, 'nuc') : getcseqs(c, uis[c], aa=False, aa_ref_seq=consfo[tcsk(c, 'aa')]) for c in 'hl'})
if any(utils.ltranslate(consfo[tcsk(c, 'nuc')]) != consfo[tcsk(c, 'aa')] for c in 'hl'):
print ' %s nuc %s seq translation differs from aa %s seq:' % (utils.color('yellow', 'warning'), stype, stype)
print ' aa: %s %s' % tuple([consfo[tcsk(c, 'aa')] for c in 'hl'])
print ' nuc trans.: %s %s' % tuple([utils.color_mutants(consfo[tcsk(c, 'aa')], utils.ltranslate(consfo[tcsk(c, 'nuc')]), amino_acid=True) for c in 'hl'])
return consfo
# ----------------------------------------------------------------------------------------
def cskey(c, m, aa=False):
assert m['seqtype'] != 'observed'
return '%s_%sseq_%s' % (c, m['seqtype'][0], 'aa' if aa else 'nuc')
# ----------------------------------------------------------------------------------------
def ctkey():
return cfgfo.get('cell-type-key', 'cell-types') # allows multiple versions of cell type to be in annotation
# ----------------------------------------------------------------------------------------
def getseq(mfo, tch, aa=False):
if mfo['seqtype'] == 'observed':
return gsval(mfo, tch, 'input_seqs'+('_aa' if aa else ''))
else:
return mfo[cskey(tch, mfo, aa=aa)]
# ----------------------------------------------------------------------------------------
def nambig(mfo, tch, antn=None):
if mfo['seqtype'] != 'observed':
assert antn is not None # need to pass in a real annotation if this is wasn't observed
if antn is None:
antn = mfo[tch]
return utils.n_variable_ambig_aa(antn, getseq(mfo, tch, aa=True), getseq(mfo, tch, aa=False))
# ----------------------------------------------------------------------------------------
def mfseqs(mfo):
return tuple(getseq(mfo, c, aa=True) for c in 'hl')
# ----------------------------------------------------------------------------------------
def in_chosen_seqs(all_chosen_seqs, mfo): # NOTE all_chosen_seqs includes previously chosen ones
return mfseqs(mfo) in all_chosen_seqs
# ----------------------------------------------------------------------------------------
def too_close_to_chosen_seqs(all_chosen_seqs, mfo, hdist, ttdbg=False): # NOTE all_chosen_seqs includes previously chosen ones
if len(all_chosen_seqs) == 0:
return False
if ttdbg:
h_min, l_min = [min(local_hdist_aa(acseqs[i], mseq) for acseqs in all_chosen_seqs) for i, mseq in enumerate(mfseqs(mfo))]
print ' %d %d %s' % (h_min, l_min, utils.color('red', 'x') if sum([h_min, l_min]) < hdist else '')
return any(sum(local_hdist_aa(cseq, mseq) for mseq, cseq in zip(mfseqs(mfo), acseqs)) < hdist for acseqs in all_chosen_seqs)
# ----------------------------------------------------------------------------------------
def add_unobs_seq(stype, metric_pairs, chosen_mfos, all_chosen_seqs, tdbg=False):
# get the consfo: first see if we observed the cons/naive seq (i.e. if there's any observed seqs with zero cdist)
def kfcn(m): return sumv(m, 'aa-cfrac')==0 if stype=='cons' else sumv(m, 'n_mutations')==0 # NOTE cons is by aa, but naive is by nuc (since the naive nuc seq is actually really meaningful, and i don't want to have an additional kinda-sorta inferred naive seq floating around)
obs_mfos = [m for m in metric_pairs if kfcn(m)]
if 'max-ambig-positions' in cfgfo:
obs_mfos = [m for m in obs_mfos if sum(nambig(m, c) for c in 'hl') <= cfgfo['max-ambig-positions']]
if len(obs_mfos) > 0: # if we observed the cons seq, use [one of] the observed ones
obs_mfos = sorted(obs_mfos, key=lambda m: sumv(m, 'seq_mtps'), reverse=True) # sort by mtpy
consfo = obs_mfos[0] # choose the first one
else: # if we didn't observe it (with some criteria), make consfo from scratch
print ' %s seq not observed' % stype
consfo = get_unobs_mfo(stype, metric_pairs)
n_ambig_bases = sum(nambig(consfo, c, antn=metric_pairs[0][c]) for c in 'hl')
if 'max-ambig-positions' in cfgfo and n_ambig_bases > cfgfo['max-ambig-positions']:
print ' %s seq: too many ambiguous bases in h+l (%d > %d)' % (stype, n_ambig_bases, cfgfo['max-ambig-positions'])
return
# apply some more criteria
if in_chosen_seqs(all_chosen_seqs, consfo):
print ' %s seq: seq identical to previously-chosen seq' % stype
return
if 'min-hdist-to-already-chosen' in cfgfo and too_close_to_chosen_seqs(all_chosen_seqs, consfo, cfgfo['min-hdist-to-already-chosen']):
print ' %s seq: too close to previously-chosen seq' % stype
return
# add to chosen info
chosen_mfos.append(consfo)
all_chosen_seqs.add(tuple(getseq(consfo, c, aa=True) for c in 'hl'))
if tdbg:
indelstr = ''
if any(consfo.get(c+'_use_input_seqs', False) for c in 'hl'):
indelstr = ' (using %s input seq[s] becuase of indels)' % ' '.join(c for c in 'hl' if consfo[c+'_use_input_seqs'])
zdstr = ''
if len(obs_mfos) > 0:
zdstr = ' (using observed seqs with aa-cdist zero %s)' % ' '.join(gsval(consfo, c, 'unique_ids') for c in 'hl')
print ' %s: added %s seq%s%s' % (utils.color('green', 'x'), stype, indelstr, zdstr)
# ----------------------------------------------------------------------------------------
def local_hdist_aa(s1, s2, defval=None, frac=False): # ick, this is ugly, but I think makes sense for now
if len(s1) == len(s2):
hfcn = utils.hamming_fraction if frac else utils.hamming_distance
return hfcn(s1, s2, amino_acid=True)
elif defval is not None:
return defval
else:
return max([len(s1), len(s2)]) # NOTE it's kind of weird and arbitrary to return the max seq len if they're different lengths, but if they're different lengths we don't care anyway cause we're just looking for very similar sequences
# ----------------------------------------------------------------------------------------
def choose_abs(metric_pairs, iclust, tdbg=False):
# ----------------------------------------------------------------------------------------
def get_n_choose(tcfg, key):
if key not in tcfg:
return None
if isinstance(tcfg[key], int): # take the same number from each family
return tcfg[key]
else: # specify a different number for each family
if len(tcfg[key]) != cfgfo['n-families']:
raise Exception('length of n per family list for key %s (%d) not equal to n-families (%d)' % (key, len(tcfg[key]), cfgfo['n-families']))
return tcfg[key][iclust]
# ----------------------------------------------------------------------------------------
def finished(tcfg=None, n_newly_chosen=None):
if tcfg is not None:
assert n_newly_chosen is not None
# this takes the top <n> by <sortvar> (not including any unobs cons seq)
if get_n_choose(tcfg, 'n') is not None and n_newly_chosen >= get_n_choose(tcfg, 'n'): # number to choose for this var in this family
print ' finished: %d newly chosen >= %d' % (n_newly_chosen, get_n_choose(tcfg, 'n'))
return True
# whereas this makes sure we have N from the family over all sort vars (including any unobs cons seq), while still sorting by <sortvar>. It probably does *not* make sense to specify both versions
is_finished = get_n_choose(cfgfo, 'n-per-family') is not None and len(chosen_mfos) >= get_n_choose(cfgfo, 'n-per-family')
if is_finished:
print ' finished: %s' % ('n-per-family not specified' if get_n_choose(cfgfo, 'n-per-family') is None else '%d per family >= %d' % (len(chosen_mfos), get_n_choose(cfgfo, 'n-per-family')))
return is_finished
# ----------------------------------------------------------------------------------------
def handle_droplet_sim_choice(refid, n_take, rmfo):
def sfcn(m): return sum(utils.hamming_distance(gsval(m, c, 'seqs_aa'), gsval(rmfo, c, 'seqs_aa'), amino_acid=True) for c in 'hl') # note: *not* input seqs, since they aren't in general all the same length
if tdbg:
altid = gsval(rmfo, 'h', 'alternate-uids', no_fail=True)
print ' nearest to %s%s:' % (refid, ' (%s)'%altid if altid is not None else '')
print ' hdist contig'
print ' sum h l droplet h l'
n_chsn = 0
for simfo in sorted(metric_pairs, key=sfcn):
if n_chsn >= n_take:
break
chsnstr = ' '
if sfcn(simfo) > 0 and not in_chosen_seqs(all_chosen_seqs, simfo):
chosen_mfos.append(simfo)
all_chosen_seqs.add(tuple(gsval(simfo, c, 'input_seqs_aa') for c in 'hl'))
n_chsn += 1
chsnstr = utils.color('green', 'x')
if tdbg:
dids, cids = zip(*[get_did(gsval(simfo, c, 'unique_ids'), return_contigs=True) for c in 'hl'])
didstr, cids = get_didstr(dids, cids, simfo)
print ' %2d %2d %2d %s %20s %s %s %s %s' % (sfcn(simfo),
utils.hamming_distance(gsval(rmfo, 'h', 'seqs_aa'), gsval(simfo, 'h', 'seqs_aa'), amino_acid=True),
utils.hamming_distance(gsval(rmfo, 'l', 'seqs_aa'), gsval(simfo, 'l', 'seqs_aa'), amino_acid=True),
chsnstr, didstr, cids[0], cids[1],
utils.color_mutants(gsval(rmfo, 'h', 'seqs_aa'), gsval(simfo, 'h', 'seqs_aa'), amino_acid=True),
utils.color_mutants(gsval(rmfo, 'l', 'seqs_aa'), gsval(simfo, 'l', 'seqs_aa'), amino_acid=True)
)
if tdbg:
print ' chose %d abs similar to droplet id %s' % (n_chsn, refid)
# ----------------------------------------------------------------------------------------
# run through a bunch of options for skipping seqs/families
if args.choose_all_abs:
return metric_pairs
if iclust >= cfgfo['n-families']:
return []
chosen_mfos = [] # includes unobs cons + naive seqs plus seqs chosen from all sortvars
if finished(): # return if we weren't supposed to get any from this family
return chosen_mfos
if tdbg:
print ' %s: choosing abs from joint cluster with size %d (marked with %s)' % (utils.color('green', 'iclust %d'%iclust), len(metric_pairs), utils.color('green', 'x'))
all_chosen_seqs = set() # just for keeping track of the seqs we've already chosen (note that this includes previously-chosen ones)
if any('chosens' in mfo[c] for mfo in metric_pairs for c in 'hl'): # add any previously-chosen seqs
for mfo in metric_pairs:
if any('chosens' in mfo[c] and gsval(mfo, c, 'chosens') for c in 'hl'):
assert [gsval(mfo, c, 'chosens') for c in 'hl'].count(True) == 2 # can't choose only one of a pair of abs
if cfgfo.get('include_previously_chosen'):
chosen_mfos.append(mfo)
all_chosen_seqs.add(tuple(gsval(mfo, c, 'input_seqs_aa') for c in 'hl'))
if tdbg:
print ' adding previously-chosen ab: %s' % ' '.join(gsval(mfo, c, 'unique_ids') for c in 'hl')
if 'droplet-ids' in cfgfo: # add some specific seqs
for mfo in metric_pairs:
did = get_joint_did(mfo)
if did in cfgfo['droplet-ids']:
chosen_mfos.append(mfo)
all_chosen_seqs.add(tuple(gsval(mfo, c, 'input_seqs_aa') for c in 'hl'))
if tdbg:
print ' chose ab with droplet id %s' % did
for ctk, ntk in [('cell-types', ctkey()), ('min-umis', 'umis')]:
if len(metric_pairs) > 0 and ctk in cfgfo and ntk not in metric_pairs[0]['h']:
print ' %s \'%s\' in cfgfo but \'%s\' info not in annotation' % (utils.color('yellow', 'warning'), ctk, ntk)
if 'cell-types' in cfgfo and len(metric_pairs) > 0 and ctkey() in metric_pairs[0]['h']:
def keepfcn(m): return all(gsval(m, c, ctkey()) in cfgfo['cell-types'] for c in 'hl') # kind of dumb to check both, they should be the same, but whatever it'll crash in the debug printing below if they're different
n_before = len(metric_pairs)
metric_pairs = [m for m in metric_pairs if keepfcn(m)]
if tdbg and n_before - len(metric_pairs) > 0:
print ' skipped %d with cell type not among %s' % (n_before - len(metric_pairs), cfgfo['cell-types'])
if 'min-umis' in cfgfo and len(metric_pairs) > 0 and 'umis' in metric_pairs[0]['h']:
def keepfcn(m):
if args.queries_to_include is not None and any(gsval(m, c, 'unique_ids') in args.queries_to_include for c in 'hl'):
return True
return sumv(m, 'umis') > cfgfo['min-umis'] # queries_to_include probably won't have umis set, but still want to keep them
n_before = len(metric_pairs)
metric_pairs = [m for m in metric_pairs if keepfcn(m)]
if tdbg and n_before - len(metric_pairs) > 0:
print ' skipped %d with umis less than %d' % (n_before - len(metric_pairs), cfgfo['min-umis'])
if 'min-median-nuc-shm-%' in cfgfo and len(metric_pairs) > 0:
median_shm = numpy.median([sum_nuc_shm_pct(m) for m in metric_pairs])
skip_family = median_shm < cfgfo['min-median-nuc-shm-%']
if tdbg:
print ' %s family: median h+l nuc shm %.2f%% %s than %.2f%%' % (utils.color('yellow', 'skipping entire') if skip_family else 'keeping', median_shm, 'less' if skip_family else 'greater', cfgfo['min-median-nuc-shm-%'])
if skip_family:
return []
if 'max-ambig-positions' in cfgfo: # max number of ambiguous amino acid positions summed over h+l
def keepfcn(m):
return sum(nambig(m, c) for c in 'hl') <= cfgfo['max-ambig-positions']
n_before = len(metric_pairs)
metric_pairs = [m for m in metric_pairs if keepfcn(m)]
if tdbg and n_before - len(metric_pairs):
print ' skipped %d with too many ambiguous bases (>%d)' % (n_before - len(metric_pairs), cfgfo['max-ambig-positions'])
if 'similar-to-droplet-ids' in cfgfo: # add seqs similar to some specific seqs
for refid, n_take in cfgfo['similar-to-droplet-ids']:
rmfos = [m for m in metric_pairs if get_joint_did(m)==refid]
if len(rmfos) > 0: # if <refid> is in the family
handle_droplet_sim_choice(refid, n_take, utils.get_single_entry(rmfos))
if len(metric_pairs) == 0:
return []
if finished():
return chosen_mfos
# maybe add the unobserved cons/naive seqs
for stype in ['cons', 'naive']:
if cfgfo['include-unobs-%s-seqs'%stype][iclust]:
add_unobs_seq(stype, metric_pairs, chosen_mfos, all_chosen_seqs, tdbg=tdbg) # well, doesn't necessarily add it, but at least checks to see if we should
if finished():
return chosen_mfos
# actually choose them, sorted by the various specified vars
for sortvar, vcfg in cfgfo['vars'].items():
n_prev_var_chosen, n_same_seqs, n_too_close, n_this_var_chosen = 0, 0, 0, 0
sorted_mfos = metric_pairs
sorted_mfos = sorted(sorted_mfos, key=lambda m: sumv(m, 'seq_mtps'), reverse=True)
sorted_mfos = sorted(sorted_mfos, key=lambda m: sumv(m, sortvar), reverse=vcfg['sort']=='high')
for mfo in sorted_mfos:
if finished(tcfg=vcfg, n_newly_chosen=n_this_var_chosen):
break
if mfo in chosen_mfos:
n_prev_var_chosen += 1
continue
if in_chosen_seqs(all_chosen_seqs, mfo):
n_same_seqs += 1
continue
if 'min-hdist-to-already-chosen' in cfgfo and too_close_to_chosen_seqs(all_chosen_seqs, mfo, cfgfo['min-hdist-to-already-chosen']):
n_too_close += 1
continue
if any(gsval(mfo, c, 'has_shm_indels') for c in 'hl'):
print ' %s choosing ab with shm indel: the consensus sequence may or may not reflect the indels (see above). uids: %s %s' % (utils.color('yellow', 'warning'), gsval(mfo, 'h', 'unique_ids'), gsval(mfo, 'l', 'unique_ids'))
chosen_mfos.append(mfo)
all_chosen_seqs.add(tuple(gsval(mfo, c, 'input_seqs_aa') for c in 'hl'))
n_this_var_chosen += 1 # number chosen from this sortvar
if tdbg:
print ' %s: chose %d%s%s%s' % (sortvar, n_this_var_chosen,
'' if n_prev_var_chosen==0 else ' (%d were in common with a previous var)'%n_prev_var_chosen,
'' if n_same_seqs==0 else ' (%d had seqs identical to previously-chosen ones)'%n_same_seqs,
'' if n_too_close==0 else ' (%d had seqs too close to previously-chosen ones)'%n_too_close)
return chosen_mfos
# ----------------------------------------------------------------------------------------
def add_plotval_uids(iclust_plotvals, iclust_mfos, metric_pairs):
def waschosen(m):
return 'chosen' if all(gsval(m, c, 'unique_ids') in iclust_chosen_ids for c in 'hl') else 'nope'
def ustr(m):
rstr = ''
if waschosen(m) == 'chosen': # if this is commented, i think i can simplify this fcn a lot? UPDATE need the extra text for cases where lots of dots are on top of each other
rstr = 'x'
if args.queries_to_include is not None and all(gsval(m, c, 'unique_ids') in args.queries_to_include for c in 'hl'):
common_chars = ''.join(c for c, d in zip(gsval(m, 'h', 'unique_ids'), gsval(m, 'l', 'unique_ids')) if c==d)
common_chars = common_chars.rstrip('-ig')
if len(common_chars) > 0:
rstr += ' ' + common_chars
else:
rstr += ' ' + ' '.join(gsval(m, c, 'unique__ids') for c in 'hl')
return None if rstr == '' else rstr
observed_mfos = [m for m in iclust_mfos if m['seqtype'] == 'observed']
iclust_chosen_ids = [gsval(m, c, 'unique_ids') for m in observed_mfos for c in 'hl']
iclust_plotvals['uids'] = [ustr(m) for m in metric_pairs]
iclust_plotvals['chosen'] = [waschosen(m) for m in metric_pairs]
# ----------------------------------------------------------------------------------------
def write_chosen_file(all_chosen_mfos, hash_len=8):
# ----------------------------------------------------------------------------------------
def getofo(mfo):
ofo = collections.OrderedDict([('iclust', mfo['iclust'])])
if mfo['seqtype'] == 'observed':
ofo.update([(c+'_id', gsval(mfo, c, 'unique_ids')) for c in 'hl'])
for kn in ['aa-cfrac', 'shm-aa', 'aa-cdist'] + [m for m in args.selection_metrics_to_calculate if m != 'cons-dist-aa']:
ofo.update([('sum_'+kn, sumv(mfo, kn))])
else:
def gid(mfo, c):
hstr = utils.uidhashstr(getseq(mfo, c, aa=True))[:hash_len]
return '%s-%s-%d-%s' % (hstr, mfo['seqtype'], mfo['iclust'], mfo[c]['loci'][0]) # NOTE would be nice to use subj here, but i don't have it added to input meta info (yet)
ofo.update([(c+'_id', gid(mfo, c)) for c in 'hl'])
ofo.update([(c+'_family_size', len(mfo[c]['unique_ids'])) for c in 'hl'])
ofo.update([(c+'_'+r+'_gene' , mfo[c][r+'_gene']) for r in utils.regions for c in 'hl'])
ofo.update([(c+'_locus', mfo[c]['loci'][0]) for r in utils.regions for c in 'hl'])
if mfo['seqtype'] == 'observed':
okeys = [('has_shm_indels', None), ('aa-cfrac', None), ('aa-cdist', None), ('shm-aa', None), ('seq_nuc', 'input_seqs'), ('seq_aa', 'input_seqs_aa')]
if any(ctkey() in mfo[c] for c in 'hl'):
okeys.insert(1, ('cell_type', ctkey()))
for ok, lk in okeys:
ofo.update([(c+'_'+ok, gsval(mfo, c, utils.non_none([lk, ok]))) for c in 'hl'])
else:
for tch in 'hl':
ofo[tch+'_seq_aa'] = getseq(mfo, tch, aa=True)
ofo[tch+'_seq_nuc'] = getseq(mfo, tch, aa=False)
ofo[tch+'_has_shm_indels'] = mfo[tch+'_use_input_seqs']
if mfo['seqtype'] == 'observed': # check that the aa seqs are actually translations of the nuc seqs (for unobs cons seqs, we expect them to differ) NOTE i don't know if this is really worthwhile long term, but it makes me feel warm and fuzzy atm that it's here
for tch in 'hl':
if utils.ltranslate(ofo[tch+'_seq_nuc']) != ofo[tch+'_seq_aa']:
print ' %s aa seq not translation of nuc seq for %s %s:' % (utils.color('yellow', 'warning'), tch, ofo[tch+'_id'])
utils.color_mutants(utils.ltranslate(ofo[tch+'_seq_nuc']), ofo[tch+'_seq_aa'], amino_acid=True, print_result=True, extra_str=' ')
return ofo
# ----------------------------------------------------------------------------------------
if debug:
print ' writing %d chosen abs to %s' % (len(all_chosen_mfos), args.chosen_ab_fname)
with open(args.chosen_ab_fname, 'w') as cfile:
outfos, fieldnames = [], None
for mfo in all_chosen_mfos:
outfos.append(getofo(mfo))
if fieldnames is None or len(outfos[-1].keys()) > len(fieldnames):
fieldnames = outfos[-1].keys()
if len(all_chosen_mfos) > 0:
writer = csv.DictWriter(cfile, fieldnames)
writer.writeheader()
for ofo in outfos:
writer.writerow(ofo)
# ----------------------------------------------------------------------------------------
def print_dbg(metric_pairs, iclust_mfos, print_nuc_seqs=True):
# ----------------------------------------------------------------------------------------
def init_xtras():
xtra_heads = [(ctkey(), ['cell', 'type']), ('umis', ['umis', 'h+l']), ('c_genes', ['c_gene', '']), ('affinities', ['affin', 'ity'])]
if 'meta-info-print-keys' in cfgfo:
xtra_heads += [(k, [l, '']) for k, l in cfgfo['meta-info-print-keys']]
xtra_heads += [(h, [h, 'sum']) for h in smheads]
xheads, xtrafo, xlens = [[], []], [], {}
for xn, xh in xtra_heads:
# if all(xn not in mpfo[c] and xn not in smheads for mpfo in metric_pairs for c in 'hl'):
if all(gsval(mpfo, c, xn, no_fail=True) is None for mpfo in metric_pairs for c in 'hl'):
continue
xtrafo.append(xn)
ctlens = [len(gsvstr(gsval(m, c, xn), xn)) for m in metric_pairs for c in 'hl']
xlens[xn] = max([len(h) for h in xh] + ctlens) + 1
xheads = [x + [utils.wfmt(s, xlens[xn])] for x, s in zip(xheads, xh)]
return xtrafo, xheads, xlens
# ----------------------------------------------------------------------------------------
def neut_col(cg, tlen):
if cg in [None, 'None']: return ' ' * tlen
cg = float(cg)
tcol, cgstr = ('blue', '-') if cg < 0 else (None, '%.0f' % cg)
if cg > 50: tcol = 'yellow'
if cg > 75: tcol = 'red'
return utils.color(tcol, cgstr, width=tlen)
# ----------------------------------------------------------------------------------------
def get_xstr(mpfo):
xstr = [] # don't try to condense these into a block, they're too different
if ctkey() in xtrafo:
ctval = utils.get_single_entry(list(set(gsval(mpfo, c, ctkey()) for c in 'hl')))
xstr += [utils.wfmt(utils.non_none([ctval, '?']), xlens[ctkey()])]
if 'umis' in xtrafo:
uvals = [gsval(mpfo, c, 'umis') for c in 'hl']
xstr += [utils.wfmt('?' if None in uvals else sum(uvals), xlens['umis'])]
if 'c_genes' in xtrafo:
cg = gsval(mpfo, 'h', 'c_genes')
xstr += [utils.wfmt('?' if cg in [None, 'None'] else cg.replace('IGH', ''), xlens['c_genes'])]
if 'affinities' in xtrafo:
affy = utils.get_single_entry(list(set([gsvstr(gsval(mpfo, c, 'affinities'), 'affinities') for c in 'hl'])))
xstr += [utils.wfmt(affy, xlens['affinities'])]
if 'meta-info-print-keys' in cfgfo:
for mk in [k for k, _ in cfgfo['meta-info-print-keys'] if k in xtrafo]:
mv = utils.get_single_entry(list(set([gsval(mpfo, c, mk) for c in 'hl'])))
if 'neut-' in mk: # colors that make sense for % neut values
mv = neut_col(mv, xlens[mk])
elif mk == 'alternate-uids':
mv = utils.wfmt('' if mv is None else mv, xlens[mk])
xstr += [mv]
for sh in smheads:
xstr += [utils.wfmt(gsvstr(sumv(mpfo, sh), sh), xlens.get(sh, 7))]
return xstr
# ----------------------------------------------------------------------------------------
def getcdist(mpfo, tch, frac=False): # can't just use gsval() for cases where we used the "input" (indel'd) cons seq (although note that there's probably some other places where the orginal/indel-reversed version is used)
defval = gsval(mpfo, tch, 'aa-c'+('frac' if frac else 'dist'))
return local_hdist_aa(gsval(mpfo, tch, 'input_seqs_aa'), cons_mfo[tch+'_cseq_aa'], defval=defval, frac=frac)
# ----------------------------------------------------------------------------------------
def cstr(c, s2=None, aa=False):
if not aa and not print_nuc_seqs: return ''
cseq = cons_mfo['%s_cseq_%s' % (c, 'aa' if aa else 'nuc')]
return utils.color_mutants(cseq, cseq if s2 is None else s2, amino_acid=aa, align_if_necessary=s2 is not None) # align if necessary for naive seq, i.e. from nstr()
# ----------------------------------------------------------------------------------------
def nstr(c, aa=False):
nseq = (h_atn if c=='h' else l_atn)['naive_seq'+('_aa' if aa else '')]
return cstr(c, s2=nseq, aa=aa)
# ----------------------------------------------------------------------------------------
smheads = [m for m in args.selection_metrics_to_calculate if m != 'cons-dist-aa']
xtrafo, xheads, xlens = init_xtras()
utils.non_clonal_clusters(h_atn, [hl for hl, _ in antn_pairs], dtype='lev', aa=True, labelstr=utils.locstr(h_atn['loci'][0]), extra_str=' ')
utils.non_clonal_clusters(l_atn, [ll for _, ll in antn_pairs], dtype='lev', aa=True, labelstr=utils.locstr(l_atn['loci'][0]), extra_str=' ')
lstr = '%s %s' % (utils.locstr(h_atn['loci'][0]), utils.locstr(l_atn['loci'][0]))
h_cshm, l_cshm = [lb_cons_seq_shm(l, aa=True) for l in [h_atn, l_atn]]
cshm_str = '%2d %2d' % (h_cshm, l_cshm)
sstr = ' %3d %3d %3d' % (len(metric_pairs), len(h_atn['unique_ids']), len(l_atn['unique_ids']))
gstrs = ['%s %s' % (utils.color_gene(h_atn[r+'_gene']), utils.color_gene(l_atn[r+'_gene']) if r!='d' else '') for r in utils.regions]
gstr_len = max(utils.len_excluding_colors(s) for s in gstrs) # don't really need this as long as it's the last column
gstrs = ['%s%s' % (g, ' '*(gstr_len - utils.len_excluding_colors(g))) for g in gstrs]
if any(m['seqtype']=='cons' for m in iclust_mfos): # if the unobserved consensus was added for this cluster, we need to use the cons seq from cons_mfo for either of h/l that had enough shm indels that we used input seqs to calculate the cons seq (i.e. for which h/l_use_input_seqs was set)
cons_mfo = utils.get_single_entry([m for m in iclust_mfos if m['seqtype']=='cons'])
else:
cons_mfo = get_unobs_mfo('cons', metric_pairs) # if we didn't choose a cons seq, we need to get the cons seqs/info (since both aa and nuc "chosen" cons seqs can differ from the one in the annotation: both if there's lots of shm indels, and the nuc because of codon_len=3
print (' aa-cfrac (%%) aa-cdist droplet contig indels%s N %%shm N aa mutations sizes %s %s %s %s %s') % (' '.join(xheads[0]), utils.wfmt('genes cons:', gstr_len), cstr('h', aa=True), cstr('l', aa=True), cstr('h'), cstr('l'))
print (' sum h l h l h l h l %s sum h l nuc cons. obs. both h l %s %s %s %s %s') % (' '.join(xheads[1]), utils.wfmt('naive:', gstr_len), nstr('h', aa=True), nstr('l', aa=True), nstr('h'), nstr('l'))
sorted_mfos = sorted(metric_pairs, key=lambda m: sumv(m, 'seq_mtps'), reverse=True) # sort by sum of h and l sequence multiplicities
last_cdist_str, last_mtpy_str, last_aa_shmstr = None, None, None
for imp, mpfo in enumerate(sorted(sorted_mfos, key=lambda x: sum(getcdist(x, c, frac=True) for c in 'hl'))): # would be nice to use sumv()
hid, lid = [gsval(mpfo, c, 'unique_ids') for c in 'hl']
dids, cids = zip(*[get_did(u, return_contigs=True) for u in (hid, lid)])
didstr, cids = get_didstr(dids, cids, mpfo)
indelstr = ' '.join(utils.color('red', 'y') if utils.per_seq_val(l, 'has_shm_indels', u) else ' ' for c, u, l in zip('hl', [hid, lid], [h_atn, l_atn]))
h_seq, l_seq = [utils.color_mutants(cons_mfo[c+'_cseq_aa'], utils.per_seq_val(l, 'input_seqs_aa', u), amino_acid=True, align_if_necessary=True) for c, u, l in zip('hl', (hid, lid), (h_atn, l_atn))]
h_nuc_seq, l_nuc_seq = '', ''
if print_nuc_seqs:
h_nuc_seq, l_nuc_seq = [utils.color_mutants(cons_mfo[c+'_cseq_nuc'], utils.per_seq_val(l, 'input_seqs', u), align_if_necessary=True) for c, u, l in zip('hl', (hid, lid), (h_atn, l_atn))]
h_cfrac, l_cfrac = [getcdist(mpfo, c, frac=True) for c in 'hl']
h_cdist, l_cdist = [getcdist(mpfo, c) for c in 'hl']
aa_cdstr = '%4.1f %4.1f %4.1f %4d%4d' % (100*sum([h_cfrac, l_cfrac]), 100*h_cfrac, 100*l_cfrac, h_cdist, l_cdist)
h_mtpy, l_mtpy = [mtpys[c][gsval(mpfo, c, 'input_seqs_aa')] for c in 'hl']
mtpstr = '%3d %3d %3d' % (sum((h_mtpy, l_mtpy)), h_mtpy, l_mtpy)
aa_shmstr = '%2d %2d %2d' % (sumv(mpfo, 'shm-aa'), gsval(mpfo, 'h', 'shm-aa'), gsval(mpfo, 'l', 'shm-aa'))
print ' %s %s %s %20s %s %s %s' % (lstr if imp==0 else ' '*utils.len_excluding_colors(lstr),
aa_cdstr if aa_cdstr!=last_cdist_str else ' '*utils.len_excluding_colors(aa_cdstr),
utils.color('green', 'x') if mpfo in iclust_mfos else ' ',
didstr, cids[0], cids[1], indelstr),
print ' %s %s %4.1f %s %s %s %s %s %s %s %s' % (' '.join(get_xstr(mpfo)),
mtpstr if mtpstr != last_mtpy_str else ' '*utils.len_excluding_colors(mtpstr),
sum_nuc_shm_pct(mpfo),
cshm_str if imp==0 else ' '*len(cshm_str),
aa_shmstr if aa_shmstr!=last_aa_shmstr else ' '*utils.len_excluding_colors(aa_shmstr),
sstr if imp==0 else ' '*utils.len_excluding_colors(sstr), gstrs[imp] if imp<len(gstrs) else ' '*gstr_len,
h_seq, l_seq, h_nuc_seq, l_nuc_seq)
last_cdist_str, last_mtpy_str, last_aa_shmstr = aa_cdstr, mtpstr, aa_shmstr
for gs in gstrs[imp+1:]: # if the cluster was smaller than gstrs, need to print the extra gstrs (this shouldn't really ever happen unless i make gstrs much longer))
print '%81s%s' % ('', gs) # this width will sometimes be wrong
print ''
# ----------------------------------------------------------------------------------------
def get_sum_metrics(metric_pairs, h_atn): # return a fake annotation <p_atn> with the sum/joint metrics in it
# ----------------------------------------------------------------------------------------
def trfn(uid, idup=None):
tid = get_did(uid)
if idup is not None:
tid = '%s-DUPL-%d' % (tid, idup)
return tid
# ----------------------------------------------------------------------------------------
p_atn = {k : copy.deepcopy(h_atn[k]) for k in ['unique_ids', 'affinities', 'tree', 'min_target_distances'] if k in h_atn}
trns, reverse_translations = {}, {}
for uid in h_atn['unique_ids']: # translate uid to the droplet id, which ends up being a god damn clusterfuck because droplet ids can be repeated but we don't want duplicate ids
idup = None
while trfn(uid, idup=idup) in reverse_translations: # add an integer plus some crap to try to make it obvious that we hit a duplicate (yeah this solution sucks, but i think it's the best available atm)
if idup is None: idup = 0
idup += 1
trns[uid] = trfn(uid, idup=idup)
reverse_translations[trfn(uid, idup=idup)] = uid
utils.translate_uids([p_atn], trns=trns)
p_atn['tree-info'] = {'lb' : {}}
for b_mtr in args.selection_metrics_to_calculate + ['n_mutations', 'shm-aa']:
sum_mtr = 'sum-%s' % b_mtr
p_atn['tree-info']['lb'][sum_mtr] = {}
for mfo in metric_pairs:
sum_mval = sumv(mfo, b_mtr)
if sum_mval is None:
continue
pid = p_atn['unique_ids'][mfo['h_iseq']]
p_atn['tree-info']['lb'][sum_mtr][pid] = sum_mval
return p_atn
# # ----------------------------------------------------------------------------------------
# def makeplots(sum_antns):
# # h vs l aa-cdist scatter plots:
# # iclust_plotvals = {c+'_aa-cfrac' : [gsval(m, c, 'aa-cfrac') for m in metric_pairs] for c in 'hl'}
# # if any(vl.count(0)==len(vl) for vl in iclust_plotvals.values()): # doesn't plot anything useful, and gives a pyplot warning to std err which is annoying
# # return
# # add_plotval_uids(iclust_plotvals, iclust_mfos, metric_pairs) # add uids for the chosen ones
# # mstr = legtexts['cons-frac-aa']
# # lbplotting.plot_2d_scatter('h-vs-l-cfrac-iclust-%d'%iclust, plotdir, iclust_plotvals, 'l_aa-cfrac', 'light %s'%mstr, mstr, xvar='h_aa-cfrac', xlabel='heavy %s'%mstr, colorvar='chosen', stats='correlation') # NOTE this iclust will in general *not* correspond to the one in partition plots
# # # for k in iclust_plotvals:
# # # if k not in all_plotvals: all_plotvals[k] = [] # just for 'uids'
# # # all_plotvals[k] += iclust_plotvals[k]
# ----------------------------------------------------------------------------------------
def get_mtpys(metric_pairs): # NOTE this is the sum of utils.get_multiplicity() over identical sequences
mtpys = {}
for c in 'hl':
seqlist = [gsval(m, c, 'input_seqs_aa') for m in metric_pairs for _ in range(gsval(m, c, 'multipy'))]
mtpys[c] = {s : seqlist.count(s) for s in set(seqlist)}
return mtpys
# ----------------------------------------------------------------------------------------
import paircluster # if you import it up top it fails, and i don't feel like fixing the issue
debug = args.debug or args.debug_paired_clustering # not is_simu or
if 'cons-dist-aa' not in args.selection_metrics_to_calculate:
print ' %s \'cons-dist-aa\' not in --selection-metrics-to-calculate, so things may not work' % utils.color('yellow', 'warning')
all_chosen_mfos = []
cfgfo = read_cfgfo()
antn_pairs = []
for lpair in [lpk for lpk in utils.locus_pairs[ig_or_tr] if tuple(lpk) in lp_infos]:
antn_pairs += paircluster.find_cluster_pairs(lp_infos, lpair, required_keys=['tree-info'], min_cluster_size=min_cluster_size)
antn_pairs = sorted(antn_pairs, key=lambda x: sum(len(l['unique_ids']) for l in x), reverse=True) # sort by the sum of h+l ids (if i could start over i might sort by the number of common ids)
# all_plotvals = {k : [] for k in ('h_aa-cfrac', 'l_aa-cfrac')}
plot_antns = []
if debug:
print ' %d h/l pairs: %s' % (len(antn_pairs), ', '.join(' '.join(str(len(l['unique_ids'])) for l in p) for p in antn_pairs))
print ' key: %s %s %s (empty/blank numbers are same as previous line)' % (utils.color('red', 'queries-to-include'), utils.color('blue_bkg', 'previously chosen'), utils.color('red', utils.color('blue_bkg', 'both')))
for iclust, (h_atn, l_atn) in enumerate(antn_pairs):
for ltmp in (h_atn, l_atn):
utils.add_seqs_aa(ltmp)
utils.add_naive_seq_aa(ltmp)
metric_pairs = []
for hid, pids in zip(h_atn['unique_ids'], h_atn['paired-uids']):
if pids is None or len(pids) == 0: # should only have the latter now (set with .get() call in rewrite_input_metafo())
continue
lid = pids[0]
if lid not in l_atn['unique_ids']:
print ' paired light id %s missing' % lid
continue
mpfo = {'iclust' : iclust, 'seqtype' : 'observed'}
for tch, uid, ltmp in zip(('h', 'l'), (hid, lid), (h_atn, l_atn)):
mpfo[tch] = ltmp
mpfo[tch+'_iseq'] = ltmp['unique_ids'].index(uid)
metric_pairs.append(mpfo)
p_atn = get_sum_metrics(metric_pairs, h_atn)
if plotdir is not None:
plot_antns.append(p_atn)
if len(metric_pairs) == 0:
continue
mtpys = get_mtpys(metric_pairs)
iclust_mfos = choose_abs(metric_pairs, iclust, tdbg=debug)
if len(iclust_mfos) > 0:
all_chosen_mfos += iclust_mfos
if debug:
print ' chose %d total' % len(iclust_mfos)
if debug:
print_dbg(metric_pairs, iclust_mfos)
if plotdir is not None:
mtc = ['sum-'+m for m in args.selection_metrics_to_calculate]
plot_tree_metrics(args, plotdir, mtc, plot_antns, is_simu=is_simu, ete_path=args.ete_path, workdir=args.workdir, paired=True)
if args.chosen_ab_fname is not None:
write_chosen_file(all_chosen_mfos)
# if plotdir is not None: # eh, maybe there isn't a big reason for an overall one
# lbplotting.plot_2d_scatter('h-vs-l-cfrac-iclust-all', plotdir, all_plotvals, 'l_aa-cfrac', 'light %s'%mstr, mstr, xvar='h_aa-cfrac', xlabel='heavy %s'%mstr, colorvar='chosen', stats='correlation')
| psathyrella/partis | python/treeutils.py | Python | gpl-3.0 | 219,586 | [
"Bioconductor"
] | 427a53fafe7a3ab7075a077fabea4900ecadb3022162d40f5d562a9fcbb30268 |
# -*- coding: utf-8 -*-
# This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2010 Brian J. Crowell <brian@fluggo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import fractions
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from fluggo.media import process, timecode
from fluggo import signal
SMALL_TICK_THRESHOLD = 2
class TimeRuler(QWidget):
def __init__(self, parent=None, timecode=timecode.Frames(), scale=fractions.Fraction(1), frame_rate=fractions.Fraction(30, 1)):
QWidget.__init__(self, parent)
self.frame_rate = fractions.Fraction(frame_rate)
self.set_timecode(timecode)
self.set_scale(scale)
self.left_frame = 0.0
self.current_frame = 0
self.current_frame_changed = signal.Signal()
def sizeHint(self):
return QSize(60, 30)
def set_left_frame(self, left_frame):
if left_frame != self.left_frame:
self.left_frame = left_frame
self.update()
def set_current_frame(self, frame):
frame = int(frame)
if self.current_frame != frame:
self.current_frame = frame
self.current_frame_changed(frame)
self.update()
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
frame = int(round(float(fractions.Fraction(event.x()) / self.scale) + self.left_frame))
self.set_current_frame(frame)
def mouseMoveEvent(self, event):
frame = int(round(float(fractions.Fraction(event.x()) / self.scale) + self.left_frame))
self.set_current_frame(frame)
def scale(self):
return self.scale
def set_scale(self, scale):
'''
Set the scale, in pixels per frame.
'''
self.scale = fractions.Fraction(scale)
if len(self.ticks) < 3:
self.minor_tick = None
self.medium_tick = self.ticks[0]
self.major_tick = self.ticks[-1]
else:
for minor, medium, major in zip(self.ticks[0:], self.ticks[1:], self.ticks[2:]):
if fractions.Fraction(minor) * scale > SMALL_TICK_THRESHOLD:
self.minor_tick, self.medium_tick, self.major_tick = minor, medium, major
break
self.update()
def set_timecode(self, timecode):
self.timecode = timecode
major_ticks = self.timecode.get_major_ticks()
# Expand the major tick list with extra divisions
last_tick = 1
self.ticks = [1]
for major_tick in major_ticks:
for div in (10, 2):
(divend, rem) = divmod(major_tick, div)
if rem == 0 and divend > last_tick:
self.ticks.append(divend)
self.update()
def frame_to_pixel(self, frame):
return float(int((float(int(frame)) - self.left_frame) * float(self.scale))) + 0.5
def paintEvent(self, event):
paint = QPainter(self)
paint.setPen(QColor(0, 0, 0))
major_ticks = self.timecode.get_major_ticks()
start_frame = int(self.left_frame)
width_frames = int(float(fractions.Fraction(self.width()) / self.scale))
height = self.height()
if self.minor_tick:
for frame in range(start_frame - start_frame % self.minor_tick, start_frame + width_frames, self.minor_tick):
x = self.frame_to_pixel(frame)
paint.drawLine(x, height - 5, x, height)
for frame in range(start_frame - start_frame % self.medium_tick, start_frame + width_frames, self.medium_tick):
x = self.frame_to_pixel(frame)
paint.drawLine(x, height - 10, x, height)
for frame in range(start_frame - start_frame % self.major_tick, start_frame + width_frames, self.major_tick):
x = self.frame_to_pixel(frame)
paint.drawLine(x, height - 15, x, height)
prev_right = None
for frame in range(start_frame - start_frame % self.major_tick, start_frame + width_frames, self.major_tick):
x = self.frame_to_pixel(frame)
if prev_right is None or x > prev_right:
text = self.timecode.format(frame)
rect = paint.drawText(QRectF(), Qt.TextSingleLine, text)
prev_right = x + rect.width() + 5.0
paint.drawText(x + 2.5, 0.0, rect.width(), rect.height(), Qt.TextSingleLine, text)
# Draw the pointer
x = self.frame_to_pixel(self.current_frame)
paint.setPen(Qt.NoPen)
paint.setBrush(QColor.fromRgbF(1.0, 0.0, 0.0))
paint.drawConvexPolygon(QPoint(x, height), QPoint(x + 5, height - 15), QPoint(x - 5, height - 15))
| fluggo/Canvas | fluggo/editor/ui/ruler.py | Python | gpl-3.0 | 5,325 | [
"Brian"
] | ce996b32d2bf9ee895dd51b236d2977b0945297c368e533e5f929a32af7acc15 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("facet.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
| HBCompass/temp | config/urls.py | Python | bsd-3-clause | 1,275 | [
"VisIt"
] | 5cf9bc2ac66f447b63c9fc50da47c7005fe50b3d5e7c96d834f89c8be70a44f4 |
# Copyright (C) 2010, Joao Rodrigues (anaryin@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code for dealing with Structural Biology Web Servers.
""" | SBRG/ssbio | ssbio/biopython/Bio/Struct/WWW/__init__.py | Python | mit | 286 | [
"Biopython"
] | 724cc228f53948fd09f081e8c29b94cce87b80214bcb485054013097f96f636e |
import urllib2
from madcow import __version__ as current_version
from madcow.conf import settings
from madcow.util import Task
class Main(Task):
url = 'http://dis.gruntle.org/app/madcow/latest/'
agent = 'Madcow Updater v' + current_version
msg_fmt = 'Madcow v%(new_version)s is available, you have v%(current_version)s. Visit http://madcow.googlecode.com/ to update.\x07'
def init(self):
self.frequency = settings.UPDATER_FREQ
self.output = settings.UPDATER_ANNOUNCE_CHANNELS
self.opener = urllib2.build_opener()
self.opener.addheaders = [('User-Agent', self.agent)]
def response(self, *args):
"""This is called by madcow, should return a string or None"""
self.log.info('checking for updates for madcow...')
new_version = self.opener.open(self.url).read().strip()
if numeric(new_version) > numeric(current_version):
msg = self.msg_fmt % {'current_version': current_version, 'new_version': new_version}
self.log.warn(msg)
return msg
else:
self.log.info('you are up to date')
def numeric(version):
"""Convert multi-part version string into a numeric value"""
return sum(int(part) * (100 ** (2 - i)) for i, part in enumerate(version.split('.')) if part.strip().isdigit())
| ToxicFrog/lancow | madcow/tasks/updater.py | Python | gpl-3.0 | 1,325 | [
"VisIt"
] | f96ef9a666533b1601c94c09f6f01e3318a4aaa524f8a6c18baf679564650f72 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAnalysispageserver(RPackage):
"""AnalysisPageServer is a modular system that enables sharing
of customizable R analyses via the web."""
homepage = "https://www.bioconductor.org/packages/AnalysisPageServer/"
git = "https://git.bioconductor.org/packages/AnalysisPageServer.git"
version('1.10.0', commit='876c87073be116fa15a1afdd407e21152eb80d50')
depends_on('r@3.4.0:3.4.9', when='@1.10.0')
depends_on('r-log4r', type=('build', 'run'))
depends_on('r-rjson', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-graph', type=('build', 'run'))
| mfherbst/spack | var/spack/repos/builtin/packages/r-analysispageserver/package.py | Python | lgpl-2.1 | 1,883 | [
"Bioconductor"
] | b09a3e4451ce126a7e7a7d16208da47adfb48976902ce2bfc99f11a9272a6132 |
import copy
import csv
import datetime
import json
import mock
import os
import re
import shutil
import tempfile
import urllib
import pyquery
from cStringIO import StringIO
from nose.tools import eq_, ok_, assert_raises
from nose.plugins.skip import SkipTest
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.models import (
User,
AnonymousUser,
Group,
Permission
)
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from crashstats.base.tests.testbase import DjangoTestCase
from crashstats.crashstats import models
from crashstats.crashstats.management import PERMISSIONS
from .test_models import Response
SAMPLE_STATUS = {
"breakpad_revision": "1035",
"hits": [
{
"date_oldest_job_queued": "2012-09-28T20:39:33+00:00",
"date_recently_completed": "2012-09-28T20:40:00+00:00",
"processors_count": 1,
"avg_wait_sec": 16.407,
"waiting_job_count": 56,
"date_created": "2012-09-28T20:40:02+00:00",
"id": 410655,
"avg_process_sec": 0.914149
},
{
"date_oldest_job_queued": "2012-09-28T20:34:33+00:00",
"date_recently_completed": "2012-09-28T20:35:00+00:00",
"processors_count": 1,
"avg_wait_sec": 13.8293,
"waiting_job_count": 48,
"date_created": "2012-09-28T20:35:01+00:00",
"id": 410654,
"avg_process_sec": 1.24177
},
{
"date_oldest_job_queued": "2012-09-28T20:29:32+00:00",
"date_recently_completed": "2012-09-28T20:30:01+00:00",
"processors_count": 1,
"avg_wait_sec": 14.8803,
"waiting_job_count": 1,
"date_created": "2012-09-28T20:30:01+00:00",
"id": 410653,
"avg_process_sec": 1.19637
}
],
"total": 12,
"socorro_revision": "017d7b3f7042ce76bc80949ae55b41d1e915ab62",
"schema_revision": "schema_12345"
}
SAMPLE_META = """ {
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s"
} """
SAMPLE_UNREDACTED = """ {
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": "%s",
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"json_dump": {
"status": "OK",
"sensitive": {
"exploitability": "high"
},
"threads": []
}
} """
BUG_STATUS = """ {
"hits": [{"id": "222222",
"signature": "FakeSignature1"},
{"id": "333333",
"signature": "FakeSignature1"},
{"id": "444444",
"signature": "Other FakeSignature"}
]
} """
SAMPLE_SIGNATURE_SUMMARY = {
"reports": {
"products": [
{
"version_string": "33.0a2",
"percentage": "57.542",
"report_count": 103,
"product_name": "Firefox"
},
],
"uptime": [
{
"category": "< 1 min",
"percentage": "29.126",
"report_count": 30
}
],
"architecture": [
{
"category": "x86",
"percentage": "100.000",
"report_count": 103
}
],
"flash_version": [
{
"category": "[blank]",
"percentage": "100.000",
"report_count": 103
}
],
"graphics": [
{
"report_count": 24,
"adapter_name": None,
"vendor_hex": "0x8086",
"percentage": "23.301",
"vendor_name": None,
"adapter_hex": "0x0166"
}
],
"distinct_install": [
{
"crashes": 103,
"version_string": "33.0a2",
"product_name": "Firefox",
"installations": 59
}
],
"devices": [
{
"cpu_abi": "XXX",
"manufacturer": "YYY",
"model": "ZZZ",
"version": "1.2.3",
"report_count": 52311,
"percentage": "48.440",
}
],
"os": [
{
"category": "Windows 8.1",
"percentage": "55.340",
"report_count": 57
}
],
"process_type": [
{
"category": "Browser",
"percentage": "100.000",
"report_count": 103
}
],
"exploitability": [
{
"low_count": 0,
"high_count": 0,
"null_count": 0,
"none_count": 4,
"report_date": "2014-08-12",
"medium_count": 0
}
]
}
}
class RobotsTestViews(DjangoTestCase):
@override_settings(ENGAGE_ROBOTS=True)
def test_robots_txt(self):
url = '/robots.txt'
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/plain')
ok_('Allow: /' in response.content)
@override_settings(ENGAGE_ROBOTS=False)
def test_robots_txt_disengage(self):
url = '/robots.txt'
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/plain')
ok_('Disallow: /' in response.content)
class FaviconTestViews(DjangoTestCase):
def test_favicon(self):
tmp_static_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmp_static_root)
favicon_dir = os.path.join(tmp_static_root, 'img')
os.makedirs(favicon_dir)
favicon_path = os.path.join(favicon_dir, 'favicon.ico')
with open(favicon_path, 'wb') as icon:
icon.write('totally fake')
with self.settings(STATIC_ROOT=tmp_static_root):
response = self.client.get('/favicon.ico')
eq_(response.status_code, 200)
ok_('image/x-icon' in response['Content-Type'])
class BaseTestViews(DjangoTestCase):
@mock.patch('requests.get')
def setUp(self, rget):
super(BaseTestViews, self).setUp()
# checking settings.CACHES isn't as safe as `cache.__class__`
if 'LocMemCache' not in cache.__class__.__name__:
raise ImproperlyConfigured(
'The tests requires that you use LocMemCache when running'
)
# we do this here so that the current/versions thing
# is cached since that's going to be called later
# in every view more or less
def mocked_get(url, params, **options):
now = datetime.datetime.utcnow()
yesterday = now - datetime.timedelta(days=1)
if '/platforms/' in url:
return Response({
"hits": [
{
'code': 'win',
'name': 'Windows',
},
{
'code': 'mac',
'name': 'Mac OS X',
},
{
'code': 'lin',
'name': 'Linux',
}
],
"total": 6
})
if 'products/' in url:
return Response("""
{"products": [
"WaterWolf",
"NightTrain",
"SeaMonkey",
"LandCrab"
],
"hits": {
"WaterWolf": [
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "19.0",
"release": "Beta",
"id": 922},
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "18.0",
"release": "Stable",
"id": 920},
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "2012-03-09",
"start_date": "2012-03-08",
"featured": true,
"version": "19.1",
"release": "Nightly",
"id": 928},
{"product": "WaterWolf",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "20.0",
"release": "Nightly",
"id": 923}
],
"NightTrain":[
{"product": "NightTrain",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "18.0",
"release": "Aurora",
"id": 924},
{"product": "NightTrain",
"throttle": "100.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "19.0",
"release": "Nightly",
"id": 925}
],
"SeaMonkey": [
{"product": "SeaMonkey",
"throttle": "99.00",
"end_date": "%(yesterday)s",
"start_date": "2012-03-08",
"featured": true,
"version": "9.5",
"release": "Alpha",
"id": 921},
{"product": "SeaMonkey",
"throttle": "99.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": true,
"version": "10.5",
"release": "nightly",
"id": 926}
],
"LandCrab": [
{"product": "LandCrab",
"throttle": "99.00",
"end_date": "%(end_date)s",
"start_date": "2012-03-08",
"featured": false,
"version": "1.5",
"release": "Release",
"id": 927}
]
},
"total": 4
}
""" % {'end_date': now.strftime('%Y-%m-%d'),
'yesterday': yesterday.strftime('%Y-%m-%d')})
if '/supersearch/fields/' in url:
from crashstats.supersearch.tests.test_views import (
SUPERSEARCH_FIELDS_MOCKED_RESULTS
)
results = copy.copy(SUPERSEARCH_FIELDS_MOCKED_RESULTS)
# to be realistic we want to introduce some dupes
# that have a different key but its `in_database_name`
# is one that is already in the hardcoded list (the
# baseline)
assert 'accessibility' not in results
results['accessibility'] = {
'name': 'accessibility',
'query_type': 'string',
'namespace': 'raw_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'in_database_name': 'Accessibility',
}
return Response(results)
raise NotImplementedError(url)
rget.side_effect = mocked_get
# call these here so it gets patched for each test because
# it gets used so often
from crashstats.crashstats.models import CurrentVersions, Platforms
CurrentVersions().get()
Platforms().get()
from crashstats.supersearch.models import SuperSearchFields
SuperSearchFields().get()
def tearDown(self):
super(BaseTestViews, self).tearDown()
cache.clear()
def _login(self):
user = User.objects.create_user('test', 'test@mozilla.com', 'secret')
assert self.client.login(username='test', password='secret')
return user
def _logout(self):
self.client.logout()
def _add_permission(self, user, codename, group_name='Hackers'):
group = self._create_group_with_permission(codename)
user.groups.add(group)
def _create_group_with_permission(self, codename, group_name='Group'):
appname = 'crashstats'
ct, __ = ContentType.objects.get_or_create(
model='',
app_label=appname,
defaults={'name': appname}
)
permission, __ = Permission.objects.get_or_create(
codename=codename,
name=PERMISSIONS[codename],
content_type=ct
)
group, __ = Group.objects.get_or_create(
name=group_name,
)
group.permissions.add(permission)
return group
class TestGoogleAnalytics(BaseTestViews):
@override_settings(GOOGLE_ANALYTICS_ID='xyz123')
@override_settings(GOOGLE_ANALYTICS_DOMAIN='test.biz')
@mock.patch('requests.get')
def test_google_analytics(self, rget):
url = reverse('crashstats:home', args=('WaterWolf',))
def mocked_get(url, params, **options):
if 'products' in url:
return Response("""
{
"hits": [{
"is_featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"build_type": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('xyz123' in response.content)
ok_('test.biz' in response.content)
class TestViews(BaseTestViews):
def test_contribute_json(self):
response = self.client.get('/contribute.json')
eq_(response.status_code, 200)
# should be valid JSON
ok_(json.loads(response.content))
eq_(response['Content-Type'], 'application/json')
@mock.patch('requests.get')
def test_handler500(self, rget):
root_urlconf = __import__(
settings.ROOT_URLCONF,
globals(),
locals(),
['urls'],
-1
)
# ...so that we can access the 'handler500' defined in there
par, end = root_urlconf.handler500.rsplit('.', 1)
# ...which is an importable reference to the real handler500 function
views = __import__(par, globals(), locals(), [end], -1)
# ...and finally we have the handler500 function at hand
handler500 = getattr(views, end)
# to make a mock call to the django view functions you need a request
fake_request = RequestFactory().request(**{'wsgi.input': None})
# Need a fake user for the persona bits on crashstats_base
fake_request.user = AnonymousUser()
# the reason for first causing an exception to be raised is because
# the handler500 function is only called by django when an exception
# has been raised which means sys.exc_info() is something.
try:
raise NameError('sloppy code')
except NameError:
# do this inside a frame that has a sys.exc_info()
response = handler500(fake_request)
eq_(response.status_code, 500)
ok_('Internal Server Error' in response.content)
ok_('id="products_select"' not in response.content)
def test_handler404(self):
url = reverse('crashstats:home', args=('Unknown',))
response = self.client.get(url)
eq_(response.status_code, 404)
ok_('Page not Found' in response.content)
ok_('id="products_select"' not in response.content)
def test_homepage_redirect(self):
response = self.client.get('/')
eq_(response.status_code, 302)
destination = reverse('crashstats:home',
args=[settings.DEFAULT_PRODUCT])
ok_(destination in response['Location'])
def test_homepage_products_redirect_without_versions(self):
url = reverse('crashstats:home', args=['WaterWolf'])
# some legacy URLs have this
url += '/versions/'
response = self.client.get(url)
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
destination = reverse('crashstats:home', args=['WaterWolf'])
ok_(destination in response['Location'])
def test_legacy_query_redirect(self):
response = self.client.get('/query/query?foo=bar')
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
ok_(reverse('crashstats:query') + '?foo=bar' in response['Location'])
@mock.patch('requests.get')
def test_buginfo(self, rget):
url = reverse('crashstats:buginfo')
def mocked_get(url, params, **options):
if 'bug?id=' in url:
return Response('{"bugs": [{"product": "allizom.org"}]}')
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 400)
response = self.client.get(url, {'bug_ids': '123,456'})
eq_(response.status_code, 400)
response = self.client.get(url, {'include_fields': 'product'})
eq_(response.status_code, 400)
response = self.client.get(url, {'bug_ids': ' 123, 456 ',
'include_fields': ' product'})
eq_(response.status_code, 200)
struct = json.loads(response.content)
ok_(struct['bugs'])
eq_(struct['bugs'][0]['product'], 'allizom.org')
@mock.patch('requests.get')
def test_buginfo_with_caching(self, rget):
url = reverse('crashstats:buginfo')
def mocked_get(url, params, **options):
if 'bug?id=' in url:
return Response("""{"bugs": [
{"id": "987",
"product": "allizom.org",
"summary": "Summary 1"},
{"id": "654",
"product": "mozilla.org",
"summary": "Summary 2"}
]}""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'bug_ids': '987,654',
'include_fields': 'product,summary'
})
eq_(response.status_code, 200)
struct = json.loads(response.content)
eq_(struct['bugs'][0]['product'], 'allizom.org')
eq_(struct['bugs'][0]['summary'], 'Summary 1')
eq_(struct['bugs'][0]['id'], '987')
eq_(struct['bugs'][1]['product'], 'mozilla.org')
eq_(struct['bugs'][1]['summary'], 'Summary 2')
eq_(struct['bugs'][1]['id'], '654')
# expect to be able to find this in the cache now
cache_key = 'buginfo:987'
eq_(cache.get(cache_key), struct['bugs'][0])
@mock.patch('requests.get')
def test_home(self, rget):
url = reverse('crashstats:home', args=('WaterWolf',))
def mocked_get(url, params, **options):
if '/products' in url and 'versions' not in params:
return Response("""
{
"products": [
"WaterWolf"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 1
}
""")
elif '/products' in url:
return Response("""
{
"hits": [{
"is_featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"build_type": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
# Testing with unknown product
url = reverse('crashstats:home', args=('InternetExplorer',))
response = self.client.get(url)
eq_(response.status_code, 404)
# Testing with unknown version for product
url = reverse('crashstats:home', args=('WaterWolf', '99'))
response = self.client.get(url)
eq_(response.status_code, 404)
# Testing with valid version for product
url = reverse('crashstats:home', args=('WaterWolf', '19.0'))
response = self.client.get(url)
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_frontpage_json(self, rget):
url = reverse('crashstats:frontpage_json')
def mocked_get(url, params, **options):
if '/crashes/daily' in url:
return Response("""
{
"hits": {
"WaterWolf:19.0": {
"2012-10-08": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 71.099999999999994,
"version": "19.0",
"report_count": 2133,
"date": "2012-10-08"
},
"2012-10-02": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 77.299999999999997,
"version": "19.0",
"report_count": 2319,
"date": "2012-10-02"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {'product': 'WaterWolf'})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['product_versions'])
eq_(struct['count'], 1)
@mock.patch('requests.get')
def test_frontpage_json_bad_request(self, rget):
url = reverse('crashstats:frontpage_json')
def mocked_get(url, params, **options):
assert '/crashes/daily' in url, url
if 'product' in params and params['product'] == 'WaterWolf':
return Response("""
{
"hits": {
"WaterWolf:19.0": {
"2012-10-08": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 71.099999999999994,
"version": "19.0",
"report_count": 2133,
"date": "2012-10-08"
},
"2012-10-02": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 77.299999999999997,
"version": "19.0",
"report_count": 2319,
"date": "2012-10-02"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {'product': 'Neverheardof'})
eq_(response.status_code, 400)
response = self.client.get(url, {'versions': '999.1'})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'versions': '99.9' # mismatch
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'versions': '19.0'
})
eq_(response.status_code, 200)
response = self.client.get(url, {
'product': 'WaterWolf',
'duration': 'xxx'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'duration': '-100'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'duration': '10'
})
eq_(response.status_code, 200)
response = self.client.get(url, {
'product': 'WaterWolf',
'date_range_type': 'junk'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'date_range_type': 'build'
})
eq_(response.status_code, 200)
response = self.client.get(url, {
'product': 'WaterWolf',
'date_range_type': 'report'
})
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_frontpage_json_no_data_for_version(self, rget):
url = reverse('crashstats:frontpage_json')
def mocked_get(url, params, **options):
assert '/crashes/daily' in url, url
if 'product' in params and params['product'] == 'WaterWolf':
return Response("""
{
"hits": {}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'versions': '20.0'
})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
# Even though there was no data, the product_versions
# property should still exist and be populated.
eq_(struct['count'], 0)
ok_(struct['product_versions'])
selected_product = struct['product_versions'][0]
eq_(selected_product['product'], 'WaterWolf')
eq_(selected_product['version'], '20.0')
@mock.patch('requests.get')
def test_products_list(self, rget):
url = reverse('crashstats:products_list')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"Fennec"
],
"hits": [
{
"sort": "1",
"default_version": "15.0.1",
"release_name": "firefox",
"rapid_release_version": "5.0",
"product_name": "WaterWolf"
},
{
"sort": "3",
"default_version": "10.0.6esr",
"release_name": "mobile",
"rapid_release_version": "5.0",
"product_name": "Fennec"
}],
"total": "2"
}
""")
rget.side_effect = mocked_get
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@mock.patch('requests.get')
def test_gccrashes(self, rget):
url = reverse('crashstats:gccrashes', args=('WaterWolf',))
unknown_product_url = reverse('crashstats:gccrashes',
args=('NotKnown',))
invalid_version_url = reverse('crashstats:gccrashes',
args=('WaterWolf', '99'))
def mocked_get(**options):
if '/products' in options['url']:
return Response("""
{
"products": ["WaterWolf"],
"hits": [
{
"product": "WaterWolf",
"version": "20.0",
"release": "Nightly"
}
],
"total": "1"
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Total Volume of GC Crashes for WaterWolf 19.1'
in response.content)
response = self.client.get(invalid_version_url)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
eq_(doc('.django-form-error li b')[0].text, 'Version:')
response = self.client.get(unknown_product_url)
eq_(response.status_code, 404)
@mock.patch('requests.get')
def test_gccrashes_json(self, rget):
url = reverse('crashstats:gccrashes_json')
def mocked_get(url, params, **options):
if '/gccrashes' in url:
return Response("""
{
"hits": [
[
"20140203000001",
366
]
],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-01-27',
'end_date': '2014-02-04'
})
ok_(response.status_code, 200)
ok_('application/json' in response['content-type'])
@mock.patch('requests.get')
def test_gccrashes_json_bad_request(self, rget):
url = reverse('crashstats:gccrashes_json')
def mocked_get(url, **options):
if 'gccrashes/' in url:
return Response("""
{
"hits": [
[
"20140203000001",
366
]
],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': 'XXXXXX', # not even close
'end_date': '2014-02-04'
})
ok_(response.status_code, 400)
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-02-33', # crazy date
'end_date': '2014-02-04'
})
ok_(response.status_code, 400)
# same but on the end_date
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-02-13',
'end_date': '2014-02-44' # crazy date
})
ok_(response.status_code, 400)
# start_date > end_date
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2014-02-02',
'end_date': '2014-01-01' # crazy date
})
ok_(response.status_code, 400)
def test_crash_trends(self):
url = reverse('crashstats:crash_trends', args=('WaterWolf',))
no_nightly_url = reverse('crashstats:crash_trends', args=('LandCrab',))
inconsistent_case_url = reverse('crashstats:crash_trends',
args=('SeaMonkey',))
unkown_product_url = reverse('crashstats:crash_trends',
args=('NotKnown',))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Nightly Crash Trends For WaterWolf' in response.content)
response = self.client.get(unkown_product_url)
eq_(response.status_code, 404)
# This used to cause a 500 because there is no Nightly associated
# with this product, should 200 now.
response = self.client.get(no_nightly_url)
eq_(response.status_code, 200)
ok_('Nightly Crash Trends For LandCrab' in response.content)
# This used to cause a 500 because of inconsistent case for
# release names in the DB, causing some releases to be returned
# as 'nightly' instead of 'Nightly'. This should now return 200.
response = self.client.get(inconsistent_case_url)
eq_(response.status_code, 200)
ok_('Nightly Crash Trends For SeaMonkey' in response.content)
@mock.patch('requests.get')
def test_get_nightlies_for_product_json(self, rget):
url = reverse('crashstats:get_nightlies_for_product_json')
def mocked_get(**options):
if '/products' in options['url']:
return Response("""
{
"hits": [
{
"sort": "1",
"default_version": "5.0a1",
"release_name": "waterwolf",
"rapid_release_version": "5.0",
"product_name": "WaterWolf"
}],
"total": "1"
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {'product': 'WaterWolf'})
ok_('application/json' in response['content-type'])
eq_(response.status_code, 200)
ok_(response.content, ['20.0'])
response = self.client.get(url, {'product': 'NightTrain'})
eq_(response.status_code, 200)
ok_(response.content, ['18.0', '19.0'])
response = self.client.get(url, {'product': 'Unknown'})
ok_(response.content, [])
@mock.patch('requests.get')
def test_crashtrends_json(self, rget):
url = reverse('crashstats:crashtrends_json')
def mocked_get(url, params, **options):
ok_('start_date' in params)
eq_('2012-10-01', params['start_date'])
ok_('end_date' in params)
eq_('2012-10-10', params['end_date'])
if '/crashtrends' in url:
return Response("""
{
"crashtrends": [{
"build_date": "2012-10-10",
"version_string": "5.0a1",
"product_version_id": 1,
"days_out": 6,
"report_count": 144,
"report_date": "2012-10-04",
"product_name": "WaterWolf"
},
{
"build_date": "2012-10-06",
"version_string": "5.0a1",
"product_version_id": 1,
"days_out": 2,
"report_count": 162,
"report_date": "2012-10-08",
"product_name": "WaterWolf"
},
{
"build_date": "2012-09-29",
"version_string": "5.0a1",
"product_version_id": 1,
"days_out": 5,
"report_count": 144,
"report_date": "2012-10-04",
"product_name": "WaterWolf"
}]
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'product': 'WaterWolf',
'version': '20.0',
'start_date': '2012-10-01',
'end_date': '2012-10-10'
})
ok_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
eq_(struct['total'], 2)
# Test with product that does not have a nightly
response = self.client.get(url, {
'product': 'LandCrab',
'version': '9.5',
'start_date': '2012-10-01',
'end_date': '2012-10-10'
})
ok_(response.status_code, 400)
ok_('text/html' in response['content-type'])
ok_(
'LandCrab is not one of the available choices'
in response.content
)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topcrasher_ranks_bybug(self, rget, rpost):
url = reverse('crashstats:topcrasher_ranks_bybug')
def mocked_post(**options):
assert '/bugs' in options['url'], options['url']
return Response("""
{"hits": [{"id": "123456789", "signature": "FakeSignature 1"},
{"id": "123456789", "signature": "FakeSignature 3"}]}
""")
def mocked_get(url, params, **options):
signature_summary_data = copy.deepcopy(SAMPLE_SIGNATURE_SUMMARY)
if '/signaturesummary' in url:
signature_summary_data['reports']['products'] = [
{
"version_string": "18.0",
"percentage": "48.440",
"report_count": 52311,
"product_name": "WaterWolf",
},
{
"version_string": "18.0",
"percentage": "48.440",
"report_count": 52311,
"product_name": "NightTrain",
},
{
"version_string": "13.0b4",
"percentage": "9.244",
"report_count": 9983,
"product_name": "WaterWolf",
}
]
return Response(signature_summary_data)
if '/crashes/signatures' in url:
return Response(u"""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature 1",
"versions_count": 8,
"changeInRank": 1,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
},
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature 2",
"versions_count": 8,
"changeInRank": 1,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 2}
""")
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url, {'bug_number': '123456789'})
ok_('FakeSignature 1' in response.content)
ok_('FakeSignature 2' not in response.content)
ok_('FakeSignature 3' in response.content)
report_list_url = reverse('crashstats:report_list')
report_list_url1 = (
'%s?signature=%s' % (
report_list_url,
urllib.quote_plus('FakeSignature 1')
)
)
ok_(report_list_url1 in response.content)
report_list_url3 = (
'%s?signature=%s' % (
report_list_url,
urllib.quote_plus('FakeSignature 3')
)
)
ok_(report_list_url3 in response.content)
# ensure that multiple products appear
doc = pyquery.PyQuery(response.content)
eq_(doc('td[class=product]')[0].text, 'WaterWolf')
eq_(doc('td[class=product]')[1].text, 'NightTrain')
eq_(response.status_code, 200)
# we also have a signature with no active product+version
ok_('Not found in active topcrash lists' in response.content)
response = self.client.get(url, {'bug_number': '123bad'})
eq_(response.status_code, 400)
response = self.client.get(url, {'bug_number': '1234564654564646'})
eq_(response.status_code, 400)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topcrasher(self, rget, rpost):
# first without a version
no_version_url = reverse('crashstats:topcrasher',
args=('WaterWolf',))
url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0'))
has_builds_url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0', 'build'))
reports_count_default = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0'))
reports_count_100 = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0', None, None,
None, '100'))
response = self.client.get(no_version_url)
ok_(url in response['Location'])
def mocked_post(**options):
assert '/bugs/' in options['url'], options['url']
return Response("""{
"hits": [
{"id": 123456789,
"signature": "Something"},
{"id": 22222,
"signature": "FakeSignature1 \u7684 Japanese"},
{"id": 33333,
"signature": "FakeSignature1 \u7684 Japanese"}
]
}
""")
def mocked_get(url, params, **options):
if '/crashes/signatures' in url:
return Response(u"""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature1 \u7684 Japanese",
"versions_count": 8,
"changeInRank": 1,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
if '/products' in url:
return Response("""
{
"hits": [
{
"is_featured": true,
"throttle": 1.0,
"end_date": "string",
"start_date": "integer",
"build_type": "string",
"product": "WaterWolf",
"version": "19.0",
"has_builds": true
}],
"total": "1"
}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('By Crash Date' in response.content)
response = self.client.get(has_builds_url)
eq_(response.status_code, 200)
ok_('By Build Date' in response.content)
response = self.client.get(reports_count_default)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
selected_count = doc('.tc-result-count a[class="selected"]')
eq_(selected_count.text(), '50')
# there's actually only one such TD
bug_ids = [x.text for x in doc('td.bug_ids_more > a')]
# higher bug number first
eq_(bug_ids, ['33333', '22222'])
response = self.client.get(reports_count_100)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
selected_count = doc('.tc-result-count a[class="selected"]')
eq_(selected_count.text(), '100')
# also, render the CSV
response = self.client.get(url, {'format': 'csv'})
eq_(response.status_code, 200)
ok_('text/csv' in response['Content-Type'])
# know your fixtures :)
ok_('WaterWolf' in response['Content-Disposition'])
ok_('19.0' in response['Content-Disposition'])
# we should be able unpack it
reader = csv.reader(StringIO(response.content))
line1, line2 = reader
eq_(line1[0], 'Rank')
try:
eq_(int(line2[0]), 1)
except Exception:
raise SkipTest
# bytestring when exported as CSV with UTF-8 encoding
eq_(line2[4], 'FakeSignature1 \xe7\x9a\x84 Japanese')
def test_topcrasher_with_invalid_version(self):
# 0.1 is not a valid release version
url = reverse('crashstats:topcrasher',
args=('WaterWolf', '0.1'))
response = self.client.get(url)
eq_(response.status_code, 404)
def test_topcrasher_with_product_sans_release(self):
# SnowLion is not a product at all
url = reverse('crashstats:topcrasher',
args=('SnowLion', '0.1'))
response = self.client.get(url)
eq_(response.status_code, 404)
# SeaMonkey is a product but has no active releases
url = reverse('crashstats:topcrasher',
args=('SeaMonkey', '9.5'))
response = self.client.get(url)
eq_(response.status_code, 404)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topcrasher_without_any_signatures(self, rget, rpost):
# first without a version
no_version_url = reverse('crashstats:topcrasher',
args=('WaterWolf',))
url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0'))
has_builds_url = reverse('crashstats:topcrasher',
args=('WaterWolf', '19.0', 'build'))
response = self.client.get(no_version_url)
ok_(url in response['Location'])
def mocked_post(**options):
assert '/bugs' in options['url'], options['url']
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
def mocked_get(url, params, **options):
if '/crashes/signatures' in url:
return Response(u"""
{"crashes": [],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
if '/products' in url:
return Response("""
{
"hits": [
{
"is_featured": true,
"throttle": 1.0,
"end_date": "string",
"start_date": "integer",
"build_type": "string",
"product": "WaterWolf",
"version": "19.0",
"has_builds": true
}],
"total": "1"
}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('By Crash Date' in response.content)
response = self.client.get(has_builds_url)
eq_(response.status_code, 200)
ok_('By Build Date' in response.content)
# also, render the CSV
response = self.client.get(url, {'format': 'csv'})
eq_(response.status_code, 200)
ok_('text/csv' in response['Content-Type'])
# know your fixtures :)
ok_('WaterWolf' in response['Content-Disposition'])
ok_('19.0' in response['Content-Disposition'])
#
# no signatures, the CSV is empty apart from the header
eq_(len(response.content.splitlines()), 1)
reader = csv.reader(StringIO(response.content))
line1, = reader
eq_(line1[0], 'Rank')
def test_topcrasher_without_versions_redirect(self):
response = self.client.get('/topcrasher/products/WaterWolf/versions/')
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
actual_url = reverse('crashstats:topcrasher',
kwargs={'product': 'WaterWolf'})
ok_(response['location'].endswith(actual_url))
@mock.patch('requests.get')
def test_exploitable_crashes_without_product(self, rget):
url = reverse('crashstats:exploitable_crashes_legacy')
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 301)
correct_url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
ok_(response['location'].endswith(correct_url))
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_exploitable_crashes(self, rget, rpost):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
def mocked_post(url, **options):
assert '/bugs' in url, url
return Response({
"hits": [
{"id": "111111111", "signature": "FakeSignature 1"},
{"id": "222222222", "signature": "FakeSignature 3"},
{"id": "101010101", "signature": "FakeSignature"}
]
})
rpost.side_effect = mocked_post
def mocked_get(url, params, **options):
assert '/crashes/exploitability' in url
ok_('product' in params)
eq_('WaterWolf', params['product'])
return Response("""
{
"hits": [
{
"signature": "FakeSignature",
"report_date": "2013-06-06",
"high_count": 4,
"medium_count": 3,
"low_count": 2,
"none_count": 1,
"product_name": "%s",
"version_string": "2.0"
}
],
"total": 1
}
""" % (settings.DEFAULT_PRODUCT,))
rget.side_effect = mocked_get
response = self.client.get(url)
ok_(settings.LOGIN_URL in response['Location'] + '?next=%s' % url)
ok_(response.status_code, 302)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 302)
ok_(settings.LOGIN_URL in response['Location'] + '?next=%s' % url)
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('FakeSignature' in response.content)
# only this bug ID should be shown
ok_('101010101' in response.content)
# not these bug IDs
ok_('222222222' not in response.content)
ok_('111111111' not in response.content)
# if you try to mess with the paginator it should just load page 1
response = self.client.get(url, {'page': 'meow'})
ok_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_exploitable_crashes_by_product_and_version(self, rget, rpost):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT, '19.0')
)
def mocked_post(url, **options):
assert '/bugs' in url, url
return Response({
"hits": [
{"id": "111111111", "signature": "FakeSignature 1"},
{"id": "222222222", "signature": "FakeSignature 3"},
{"id": "101010101", "signature": "FakeSignature"}
]
})
rpost.side_effect = mocked_post
def mocked_get(url, params, **options):
assert '/crashes/exploitability' in url
ok_('product' in params)
eq_('WaterWolf', params['product'])
ok_('version' in params)
eq_('19.0', params['version'])
return Response("""
{
"hits": [
{
"signature": "FakeSignature",
"report_date": "2013-06-06",
"high_count": 4,
"medium_count": 3,
"low_count": 2,
"none_count": 1,
"product_name": "%s",
"version_string": "123.0"
}
],
"total": 1
}
""" % (settings.DEFAULT_PRODUCT,))
rget.side_effect = mocked_get
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('FakeSignature' in response.content)
@mock.patch('requests.get')
def test_exploitable_crashes_by_unknown_version(self, rget):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT, '999.0')
)
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 404)
@mock.patch('requests.get')
def test_daily(self, rget):
url = reverse('crashstats:daily')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"NightTrain"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"NightTrain": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "NightTrain",
"release": "Nightly",
"version": "18.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 2
}
""")
if '/crashes' in url:
# This list needs to match the versions as done in the common
# fixtures set up in setUp() above.
return Response("""
{
"hits": {
"WaterWolf:20.0": {
"2012-09-23": {
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "20.0"
}
},
"WaterWolf:19.0": {
"2012-08-23": {
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "19.0"
}
},
"WaterWolf:18.0": {
"2012-08-13": {
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "18.0"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'p': 'WaterWolf',
'v': ['20.0', '19.0']
})
eq_(response.status_code, 200)
# XXX any basic tests with can do on response.content?
ok_('18.0' in response.content.split('id="version3"')[1].
split("</select>")[0])
ok_('18.0' in response.content.split('id="version2"')[1].
split("</select>")[0])
ok_('18.0' in response.content.split('id="version1"')[1].
split("</select>")[0])
ok_('18.0' in response.content.split('id="version0"')[1].
split("</select>")[0])
# check that the CSV version is working too
response = self.client.get(url, {
'p': 'WaterWolf',
'v': ['20.0', '19.0'],
'format': 'csv'
})
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/csv')
# also, I should be able to read it
reader = csv.reader(response)
# because response is an iterator that will return a blank line first
# we skip till the next time
rows = list(reader)[1:]
ok_(rows)
head_row = rows[0]
eq_(head_row[0], 'Date')
eq_(
head_row[1:],
[
'WaterWolf 20.0 Crashes',
'WaterWolf 20.0 ADI',
'WaterWolf 20.0 Throttle',
'WaterWolf 20.0 Ratio',
'WaterWolf 19.0 Crashes',
'WaterWolf 19.0 ADI',
'WaterWolf 19.0 Throttle',
'WaterWolf 19.0 Ratio'
]
)
first_row = rows[1]
eq_(first_row[0], '2012-09-23')
# Test dates don't cause problems
response = self.client.get(url, {
'p': 'WaterWolf',
'v': ['20.0', '19.0'],
'date_start': '2010-01-01'
})
eq_(response.status_code, 200)
@mock.patch('crashstats.crashstats.models.Platforms')
@mock.patch('requests.get')
def test_daily_by_os(self, rget, platforms_get):
url = reverse('crashstats:daily')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"NightTrain"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"NightTrain": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "NightTrain",
"release": "Nightly",
"version": "18.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 2
}
""")
if '/crashes' in url:
ok_('separated_by' in params)
eq_('os', params['separated_by'])
ok_('os' in params)
eq_(['Windows', 'Amiga'], params['os'])
# This list needs to match the versions as done in the common
# fixtures set up in setUp() above.
return Response("""
{
"hits": {
"WaterWolf:20.0:win": {
"2012-09-23": {
"os": "Windows",
"adu": 80388,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 9871,
"throttle": 0.1,
"version": "20.0"
}
},
"WaterWolf:20.0:ami": {
"2012-09-23": {
"os": "Amiga",
"adu": 7377,
"crash_hadu": 12.279,
"date": "2012-08-23",
"product": "WaterWolf",
"report_count": 871,
"throttle": 0.1,
"version": "20.0"
}
}
}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_platforms_get():
return [
{'code': 'win', 'name': 'Windows', 'display': True},
{'code': 'ami', 'name': 'Amiga', 'display': True},
{'code': 'win', 'name': 'Windows95'}, # not displayed
]
platforms_get().get.side_effect = mocked_platforms_get
response = self.client.get(url, {
'p': 'WaterWolf',
'v': '20.0',
'form_selection': 'by_os'
})
eq_(response.status_code, 200)
# XXX any basic tests with can do on response.content?
# check that the CSV version is working too
response = self.client.get(url, {
'p': 'WaterWolf',
'v': '20.0',
'format': 'csv',
'form_selection': 'by_os'
})
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/csv')
# also, we should be able to read it
reader = csv.reader(response)
# because response is an iterator that will return a blank line first
# we skip till the next time
rows = list(reader)[1:]
head_row = rows[0]
first_row = rows[1]
eq_(head_row[0], 'Date')
eq_(
head_row[1:],
[
'WaterWolf 20.0 on Windows Crashes',
'WaterWolf 20.0 on Windows ADI',
'WaterWolf 20.0 on Windows Throttle',
'WaterWolf 20.0 on Windows Ratio',
'WaterWolf 20.0 on Amiga Crashes',
'WaterWolf 20.0 on Amiga ADI',
'WaterWolf 20.0 on Amiga Throttle',
'WaterWolf 20.0 on Amiga Ratio'
]
)
eq_(first_row[0], '2012-09-23')
def test_daily_legacy_redirect(self):
url = reverse('crashstats:daily')
response = self.client.get(url + '?p=WaterWolf&v[]=Something')
eq_(response.status_code, 301)
ok_('p=WaterWolf' in response['Location'].split('?')[1])
ok_('v=Something' in response['Location'].split('?')[1])
response = self.client.get(
url + '?p=WaterWolf&os[]=Something&os[]=Else'
)
eq_(response.status_code, 301)
ok_('p=WaterWolf' in response['Location'].split('?')[1])
ok_('os=Something' in response['Location'].split('?')[1])
ok_('os=Else' in response['Location'].split('?')[1])
@mock.patch('requests.get')
def test_daily_with_bad_input(self, rget):
url = reverse('crashstats:daily')
def mocked_get(url, params, **options):
if '/products' in url:
return Response("""
{
"products": [
"WaterWolf",
"NightTrain"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"NightTrain": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "NightTrain",
"release": "Nightly",
"version": "18.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 2
}
""")
if '/crashes' in url:
# This list needs to match the versions as done in the common
# fixtures set up in setUp() above.
return Response("""
{
"hits": {}
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url, {
'p': 'WaterWolf',
'date_start': u' \x00'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'p': 'WaterWolf',
'date_range_type': 'any old crap'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'p': 'WaterWolf',
'hang_type': 'any old crap'
})
eq_(response.status_code, 400)
response = self.client.get(url, {
'p': 'WaterWolf',
'format': 'csv',
})
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'text/csv')
# last sanity check
response = self.client.get(url, {
'p': 'WaterWolf',
})
eq_(response.status_code, 200)
def test_quick_search(self):
url = reverse('crashstats:quick_search')
# Test with no parameter.
response = self.client.get(url)
eq_(response.status_code, 302)
target = reverse('supersearch.search')
ok_(response['location'].endswith(target))
# Test with a signature.
response = self.client.get(
url,
{'query': 'moz'}
)
eq_(response.status_code, 302)
target = reverse('supersearch.search') + '?signature=%7Emoz'
ok_(response['location'].endswith(target))
# Test with a crash_id.
crash_id = '1234abcd-ef56-7890-ab12-abcdef130802'
response = self.client.get(
url,
{'query': crash_id}
)
eq_(response.status_code, 302)
target = reverse(
'crashstats:report_index',
kwargs=dict(crash_id=crash_id)
)
ok_(response['location'].endswith(target))
# Test a simple search containing a crash id and spaces
crash_id = ' 1234abcd-ef56-7890-ab12-abcdef130802 '
response = self.client.get(
url,
{'query': crash_id}
)
eq_(response.status_code, 302)
ok_(response['location'].endswith(target))
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query(self, rget, rpost):
def mocked_post(**options):
assert '/bugs' in options['url'], options['url']
return Response("""
{"hits": [
{
"id": "123456",
"signature": "nsASDOMWindowEnumerator::GetNext()"
}
],
"total": 1
}
""")
def mocked_get(url, params, **options):
assert '/search/signatures' in url
if 'products' in params and 'WaterWolf' in params['products']:
return Response("""{
"hits": [
{
"count": 586,
"signature": "nsASDOMWindowEnumerator::GetNext()",
"numcontent": 0,
"is_windows": 586,
"is_linux": 0,
"numplugin": 56,
"is_mac": 0,
"numhang": 0
},
{
"count": 13,
"signature": "mySignatureIsCool",
"numcontent": 0,
"is_windows": 10,
"is_linux": 2,
"numplugin": 0,
"is_mac": 1,
"numhang": 0
},
{
"count": 2,
"signature": "mineIsCoolerThanYours",
"numcontent": 0,
"is_windows": 0,
"is_linux": 0,
"numplugin": 0,
"is_mac": 2,
"numhang": 2
},
{
"count": 2,
"signature": null,
"numcontent": 0,
"is_windows": 0,
"is_linux": 0,
"numplugin": 0,
"is_mac": 2,
"numhang": 2
}
],
"total": 4
} """)
elif 'products' in params and 'NightTrain' in params['products']:
return Response('{"hits": [], "total": 0}')
elif 'products' in params and 'SeaMonkey' in params['products']:
ok_('plugin_search_mode' in params)
eq_(params['plugin_search_mode'], 'is_exactly')
return Response("""
{"hits": [
{
"count": 586,
"signature": "nsASDOMWindowEnumerator::GetNext()",
"numcontent": 0,
"is_windows": 586,
"is_linux": 0,
"numplugin": 533,
"is_mac": 0,
"numhang": 0,
"pluginname": "superAddOn",
"pluginfilename": "addon.dll",
"pluginversion": "1.2.3"
}],
"total": 1
}
""")
else:
return Response("""
{"hits": [
{
"count": 586,
"signature": "nsASDOMWindowEnumerator::GetNext()",
"numcontent": 0,
"is_windows": 586,
"is_linux": 0,
"numplugin": 0,
"is_mac": 0,
"numhang": 0
}],
"total": 1
}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatureList"' not in response.content)
# Verify that the passed product is selected in search form
response = self.client.get(url, {'product': 'NightTrain'})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatureList"' not in response.content)
ok_('value="NightTrain" selected' in response.content)
# Verify that the passed version is selected in nav
response = self.client.get(url, {
'product': 'NightTrain',
'version': 'NightTrain:18.0'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatureList"' not in response.content)
# Because versions in the search form only gets set on DOM ready,
# we here ensure that the version was passed and set by checking
# that the correct version is selected in the versions drop-down.
ok_('option value="18.0" selected' in response.content)
response = self.client.get(url, {
'product': 'WaterWolf',
'date': '2012-01-01'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
ok_('mySignatureIsCool' in response.content)
ok_('mineIsCoolerThanYours' in response.content)
ok_('(null signature)' in response.content)
# Test that the default value for query_type is 'contains'
ok_('<option value="contains" selected' in response.content)
# Test with empty results
response = self.client.get(url, {
'product': 'NightTrain',
'date': '2012-01-01'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('The maximum query date' not in response.content)
ok_('table id="signatureList"' not in response.content)
ok_('Results within' in response.content)
ok_('No results were found' in response.content)
response = self.client.get(url, {'query': 'nsASDOMWindowEnumerator'})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
ok_('123456' in response.content)
# Test that the signature parameter is used as default value
response = self.client.get(url, {'signature': 'myFunctionIsCool'})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('table id="signatures-list"' not in response.content)
ok_('value="myFunctionIsCool"' in response.content)
# Test that null bytes break the page cleanly
response = self.client.get(url, {'date': u' \x00'})
eq_(response.status_code, 400)
ok_('<h2>Query Results</h2>' not in response.content)
ok_('Enter a valid date/time' in response.content)
# Test that do_query forces the query
response = self.client.get(url, {
'do_query': 1,
'product': 'WaterWolf'
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
# Test that old query types are changed
# Test that plugin data is displayed
response = self.client.get(url, {
'do_query': 1,
'product': 'SeaMonkey',
'plugin_query_type': 'exact',
'process_type': 'plugin',
})
eq_(response.status_code, 200)
ok_('<h2>Query Results</h2>' in response.content)
ok_('table id="signatureList"' in response.content)
ok_('nsASDOMWindowEnumerator::GetNext()' in response.content)
ok_('Plugin Filename' in response.content)
ok_('Plugin Name/Ver' in response.content)
ok_('addon.dll' in response.content)
ok_('superAddOn 1.2.3' in response.content)
# Test 'all' is an accepted value for report_type and hang_type
response = self.client.get(url, {
'do_query': 1,
'product': 'WaterWolf',
'hang_type': 'all',
'process_type': 'all',
})
eq_(response.status_code, 200)
ok_('table id="signatureList"' in response.content)
ok_('value="any" checked' in response.content)
# Test defaut date
expected = datetime.datetime.utcnow()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(expected.strftime('%m/%d/%Y %H:00:00') in response.content)
# Test passed date
response = self.client.get(url, {
'date': '11/27/2031 10:10:10'
})
eq_(response.status_code, 200)
ok_('11/27/2031 10:10:10' in response.content)
# Test value of build ids
response = self.client.get(url, {
'build_id': '12345'
})
eq_(response.status_code, 200)
ok_('value="12345"' in response.content)
response = self.client.get(url, {
'build_id': '12345,54321'
})
eq_(response.status_code, 200)
ok_('value="12345, 54321"' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_range(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
assert '/search/signatures' in url
response = ','.join('''
{
"count": %(x)s,
"signature": "sig%(x)s",
"numcontent": 0,
"is_windows": %(x)s,
"is_linux": 0,
"numplugin": 0,
"is_mac": 0,
"numhang": 0
}
''' % {'x': x} for x in range(150))
return Response('{"hits": [%s], "total": 150}' % response)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
# Test an out-of-range date range
response = self.client.get(url, {
'query': 'js::',
'range_unit': 'weeks',
'range_value': 9
})
eq_(response.status_code, 200)
ok_('The maximum query date' in response.content)
ok_('Admins may log in' in response.content)
ok_('name="range_value" value="%s"' % settings.QUERY_RANGE_DEFAULT_DAYS
in response.content)
ok_('value="days" selected' in response.content)
# Test an out-of-range date range for a logged in user
user = self._login()
group = self._create_group_with_permission('run_long_queries')
user.groups.add(group)
response = self.client.get(url, {
'query': 'js::',
'range_unit': 'weeks',
'range_value': 9
})
eq_(response.status_code, 200)
# we're logged in, that works now
ok_('The maximum query date' not in response.content)
# ... but this doesn't
response = self.client.get(url, {
'query': 'js::',
'range_unit': 'weeks',
'range_value': 30
})
eq_(response.status_code, 200)
ok_('The maximum query date' in response.content)
# an admin won't see that message
ok_('Admins may log in' not in response.content)
ok_('name="range_value" value="%s"' % settings.QUERY_RANGE_DEFAULT_DAYS
in response.content)
ok_('value="days" selected' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_pagination(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
assert '/search/signatures' in url
response = ','.join('''
{
"count": %(x)s,
"signature": "sig%(x)s",
"numcontent": 0,
"is_windows": %(x)s,
"is_linux": 0,
"numplugin": 0,
"is_mac": 0,
"numhang": 0
}
''' % {'x': x} for x in range(150))
return Response('{"hits": [%s], "total": 150}' % response)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {'do_query': 1})
eq_(response.status_code, 200)
next_page_url = '%s?do_query=1&page=2' % url
ok_(next_page_url in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_summary(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'query': 'test',
'query_type': 'contains'
})
eq_(response.status_code, 200)
ok_('Results within' in response.content)
ok_("crash signature contains 'test'" in response.content)
ok_('the crashing process was of any type' in response.content)
response = self.client.get(url, {
'query': 'test',
'query_type': 'is_exactly',
'build_id': '1234567890',
'product': ['WaterWolf', 'NightTrain'],
'version': ['WaterWolf:18.0'],
'platform': ['mac'],
'process_type': 'plugin',
'plugin_query_type': 'starts_with',
'plugin_query_field': 'filename',
'plugin_query': 'lib'
})
eq_(response.status_code, 200)
ok_('Results within' in response.content)
ok_("crash signature is exactly 'test'" in response.content)
ok_('product is one of WaterWolf, NightTrain' in response.content)
ok_('version is one of WaterWolf:18.0' in response.content)
ok_('platform is one of Mac OS X' in response.content)
ok_('for build 1234567890' in response.content)
ok_('the crashing process was a plugin' in response.content)
ok_('and its filename starts with lib' in response.content)
@override_settings(SEARCH_MIDDLEWARE_IMPL='elasticsearch')
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_force_impl_settings(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
ok_('_force_api_impl' in params)
eq_('elasticsearch', params['_force_api_impl'])
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'do_query': 1,
})
eq_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_force_impl_url(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
ok_('_force_api_impl' in params)
eq_('postgres', params['_force_api_impl'])
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'do_query': 1,
'_force_api_impl': 'postgres'
})
eq_(response.status_code, 200)
@override_settings(SEARCH_MIDDLEWARE_IMPL='mongodb')
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_query_force_impl_url_over_settings(self, rget, rpost):
def mocked_post(**options):
return Response('{"hits": [], "total": 0}')
def mocked_get(url, params, **options):
ok_('_force_api_impl' in params)
eq_('mysql', params['_force_api_impl'])
return Response('{"hits": [], "total": 0}')
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:query')
response = self.client.get(url, {
'do_query': 1,
'_force_api_impl': 'mysql'
})
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_plot_signature(self, rget):
def mocked_get(url, params, **options):
if '/crashes/signature_history' in url:
return Response("""
{
"hits": [],
"total": 0
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
# missing signature
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2011-12-01', '2011-12-02', ''))
response = self.client.get(url)
eq_(response.status_code, 400)
# invalid start date
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2012-02-33', '2012-12-01',
'Read::Bytes'))
response = self.client.get(url)
eq_(response.status_code, 400)
# invalid end date
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2012-02-28', '2012-13-01',
'Read::Bytes'))
response = self.client.get(url)
eq_(response.status_code, 400)
# valid dates
url = reverse('crashstats:plot_signature',
args=('WaterWolf', '19.0',
'2011-12-01', '2011-12-02',
'Read::Bytes'))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['signature'])
@mock.patch('requests.get')
def test_explosive_view_without_explosives(self, rget):
url = reverse('crashstats:explosive')
def mocked_get(url, params, **options):
if '/suspicious' in url:
return Response("""
{"hits": [], "total": 0}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
resp = self.client.get(url)
eq_(resp.status_code, 200)
assert 'No explosive crashes found' in resp.content
@mock.patch('requests.get')
def test_explosive_view_with_explosives(self, rget):
url = reverse('crashstats:explosive')
def mocked_get(url, params, **options):
if '/suspicious' in url:
return Response("""
{"hits": [
{"date": "2013-09-01",
"signatures": ["signature1", "signature2"]
}
], "total": 1}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
resp = self.client.get(url)
eq_(resp.status_code, 200)
assert 'is explosive' in resp.content
@mock.patch('requests.get')
def test_explosive_data(self, rget):
url = reverse('crashstats:explosive_data',
args=('signature', '2013-03-05'))
def mocked_get(url, params, **options):
if '/crashes/count_by_day' in url:
return Response("""{
"hits": {
"2013-02-26": 100,
"2013-02-27": 100,
"2013-02-28": 100,
"2013-03-01": 100,
"2013-03-02": 100,
"2013-03-03": 100,
"2013-03-04": 100,
"2013-03-05": 100,
"2013-03-06": 100,
"2013-03-07": 100,
"2013-03-08": 100
}
}""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
resp = json.loads(response.content)
ok_('counts' in resp)
# returns 11 days of data since we are after it.
# the first day is 7 days prior, the last is 3 days after.
eq_(len(resp['counts']), 11)
eq_(resp['counts'][0][0], '2013-02-26')
eq_(resp['counts'][0][1], 100)
eq_(resp['counts'][-1][0], '2013-03-08')
eq_(resp['counts'][-1][1], 100)
@mock.patch('requests.get')
def test_explosive_data_today(self, rget):
now = datetime.datetime.utcnow()
start = now - datetime.timedelta(10)
now = now.strftime('%Y-%m-%d')
start = start.strftime('%Y-%m-%d')
url = reverse('crashstats:explosive_data', args=('signature', now))
def mocked_get(url, params, **options):
if '/crashes/count_by_day' in url:
dates = []
current = datetime.datetime.strptime(start, "%Y-%m-%d")
end = datetime.datetime.strptime(now, "%Y-%m-%d")
while current <= end:
dates.append(current.strftime("%Y-%m-%d"))
current += datetime.timedelta(1)
return Response("""{
"hits": {
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100,
"%s": 100
}
}""" % tuple(dates))
rget.side_effect = mocked_get
response = self.client.get(url)
eq_(response.status_code, 200)
resp = json.loads(response.content)
eq_(resp['counts'][0][0], start)
eq_(resp['counts'][0][1], 100)
eq_(resp['counts'][-1][0], now)
eq_(resp['counts'][-1][1], 100)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_topchangers(self, rget, rpost):
url = reverse('crashstats:topchangers',
args=('WaterWolf', '19.0'))
bad_url = reverse('crashstats:topchangers',
args=('SeaMonkey', '19.0'))
bad_url2 = reverse('crashstats:topchangers',
args=('WaterWolf', '19.999'))
url_wo_version = reverse('crashstats:topchangers',
args=('WaterWolf',))
def mocked_post(**options):
assert 'by=signatures' in options['url'], options['url']
return Response("""
{"bug_associations": [{"bug_id": "123456789",
"signature": "Something"}]}
""")
def mocked_get(url, params, **options):
if '/crashes/signatures' in url:
return Response("""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature1",
"versions_count": 8,
"changeInRank": 0,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
response = self.client.get(url_wo_version)
eq_(response.status_code, 200)
# invalid version for the product name
response = self.client.get(bad_url)
eq_(response.status_code, 404)
# invalid version for the product name
response = self.client.get(bad_url2)
eq_(response.status_code, 404)
response = self.client.get(url)
eq_(response.status_code, 200)
def test_topchangers_without_versions_redirect(self):
response = self.client.get('/topchangers/products/WaterWolf/versions/')
redirect_code = settings.PERMANENT_LEGACY_REDIRECTS and 301 or 302
eq_(response.status_code, redirect_code)
actual_url = reverse('crashstats:topchangers',
kwargs={'product': 'WaterWolf'})
ok_(response['location'].endswith(actual_url))
@mock.patch('requests.get')
def test_signature_summary(self, rget):
def mocked_get(url, params, **options):
if '/signaturesummary' in url:
assert params['report_types']
return Response({
"reports": {
"products": [
{
"version_string": "33.0a2",
"percentage": "57.542",
"report_count": 103,
"product_name": "Firefox"
},
],
"uptime": [
{
"category": "< 1 min",
"percentage": "29.126",
"report_count": 30
}
],
"architecture": [
{
"category": "x86",
"percentage": "100.000",
"report_count": 103
}
],
"flash_version": [
{
"category": "[blank]",
"percentage": "100.000",
"report_count": 103
}
],
"graphics": [
{
"report_count": 24,
"adapter_name": None,
"vendor_hex": "0x8086",
"percentage": "23.301",
"vendor_name": None,
"adapter_hex": "0x0166"
}
],
"distinct_install": [
{
"crashes": 103,
"version_string": "33.0a2",
"product_name": "Firefox",
"installations": 59
}
],
"devices": [
{
"cpu_abi": "XXX",
"manufacturer": "YYY",
"model": "ZZZ",
"version": "1.2.3",
"report_count": 52311,
"percentage": "48.440",
}
],
"os": [
{
"category": "Windows 8.1",
"percentage": "55.340",
"report_count": 57
}
],
"process_type": [
{
"category": "Browser",
"percentage": "100.000",
"report_count": 103
}
],
"exploitability": [
{
"low_count": 0,
"high_count": 0,
"null_count": 0,
"none_count": 4,
"report_date": "2014-08-12",
"medium_count": 0
}
]
}
})
raise NotImplementedError(url)
url = reverse('crashstats:signature_summary')
rget.side_effect = mocked_get
# first try without the necessary parameters
response = self.client.get(url)
eq_(response.status_code, 400)
response = self.client.get(url, {
'range_value': '1',
'signature': 'sig',
'version': 'WaterWolf:19.0'
})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['architectures'])
ok_(struct['flashVersions'])
ok_(struct['percentageByOs'])
ok_(struct['processTypes'])
ok_(struct['productVersions'])
ok_(struct['uptimeRange'])
ok_(struct['distinctInstall'])
ok_(struct['devices'])
ok_(struct['graphics'])
ok_(not struct['canViewExploitability'])
ok_('exploitabilityScore' not in struct)
# percentages are turned into string as they're fed straight into
# a mustache template.
# for example,
eq_(struct['uptimeRange'][0]['percentage'], '29.13')
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
response = self.client.get(url, {'range_value': '1',
'signature': 'sig',
'version': 'WaterWolf:19.0'})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['canViewExploitability'])
ok_(struct['exploitabilityScore'])
@mock.patch('requests.get')
def test_signature_summary_flash_exploitability(self, rget):
def mocked_get(url, params, **options):
signature_summary_data = copy.deepcopy(SAMPLE_SIGNATURE_SUMMARY)
if '/signaturesummary' in url:
if 'sig1' in params['signature']:
signature_summary_data['reports']['flash_version'] = [
{
"category": "11.9.900.117",
"percentage": "50.794",
"report_count": 320
},
{
"category": "11.9.900.152",
"percentage": "45.397",
"report_count": 286
},
{
"category": "11.7.700.224",
"percentage": "1.429",
"report_count": 9
}
]
elif 'sig2' in params['signature']:
signature_summary_data['reports']['flash_version'] = [
{
"category": "11.9.900.117",
"percentage": "50.794",
"report_count": 320
},
{
"category": "[blank]",
"percentage": "45.397",
"report_count": 286
},
{
"category": "11.7.700.224",
"percentage": "1.429",
"report_count": 9
}
]
return Response(signature_summary_data)
raise NotImplementedError(url)
url = reverse('crashstats:signature_summary')
rget.side_effect = mocked_get
user = self._login()
group = self._create_group_with_permission('view_flash_exploitability')
user.groups.add(group)
response = self.client.get(url, {
'range_value': '1',
'signature': 'sig1',
'version': 'WaterWolf:19.0'
})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(struct['canViewExploitability'])
ok_(struct['exploitabilityScore'])
response = self.client.get(url, {'range_value': '1',
'signature': 'sig2', # different
'version': 'WaterWolf:19.0'})
eq_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
ok_(not struct['canViewExploitability'])
ok_('exploitabilityScore' not in struct)
@mock.patch('requests.get')
def test_status(self, rget):
def mocked_get(url, **options):
assert '/server_status' in url, url
return Response(SAMPLE_STATUS)
rget.side_effect = mocked_get
url = reverse('crashstats:status')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('schema_12345' in response.content)
ok_('017d7b3f7042ce76bc80949ae55b41d1e915ab62' in response.content)
ok_('1035' in response.content)
ok_('Sep 28 2012 20:30:01' in response.content)
@mock.patch('requests.get')
def test_status_revision(self, rget):
def mocked_get(url, **options):
assert '/server_status' in url, url
return Response(SAMPLE_STATUS)
rget.side_effect = mocked_get
url = reverse('crashstats:status_revision')
response = self.client.get(url)
eq_(response.status_code, 200)
eq_(response.content, '017d7b3f7042ce76bc80949ae55b41d1e915ab62')
ok_('text/plain' in response['content-type'])
def test_login_required(self):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
response = self.client.get(url)
eq_(response.status_code, 302)
ok_(settings.LOGIN_URL in response['Location'] + '?next=%s' % url)
@mock.patch('requests.get')
def test_status_json(self, rget):
def mocked_get(**options):
assert '/server_status' in options['url'], options['url']
return Response(SAMPLE_STATUS)
rget.side_effect = mocked_get
url = reverse('crashstats:status_json')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(response.content.strip().startswith('{'))
ok_('017d7b3f7042ce76bc80949ae55b41d1e915ab62' in response.content)
ok_('1035' in response.content)
ok_('2012-09-28T20:30:01+00:00' in response.content)
ok_('application/json' in response['Content-Type'])
eq_('*', response['Access-Control-Allow-Origin'])
def test_crontabber_state(self):
url = reverse('crashstats:crontabber_state')
response = self.client.get(url)
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_your_crashes(self, rget):
url = reverse('crashstats:your_crashes')
def mocked_get(url, params, **options):
assert '/supersearch/' in url
if '/supersearch/fields/' in url:
return Response({
'email': {
'name': 'email',
'query_type': 'string',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': ['crashstats.view_pii'],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
}
})
assert 'email' in params
assert params['email'] == ['test@mozilla.com']
return Response({
'hits': [
{
'uuid': '1234abcd-ef56-7890-ab12-abcdef130801',
'date': '2000-01-01T00:00:00'
},
{
'uuid': '1234abcd-ef56-7890-ab12-abcdef130802',
'date': '2000-01-02T00:00:00'
}
],
'total': 2
})
rget.side_effect = mocked_get
# A user needs to be signed in to see this page.
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('1234abcd-ef56-7890-ab12-abcdef130801' in response.content)
ok_('1234abcd-ef56-7890-ab12-abcdef130802' in response.content)
ok_('test@mozilla.com' in response.content)
@mock.patch('requests.get')
def test_your_crashes_no_data(self, rget):
url = reverse('crashstats:your_crashes')
def mocked_get(url, params, **options):
assert '/supersearch/' in url
if '/supersearch/fields/' in url:
return Response({
'email': {
'name': 'email',
'query_type': 'string',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': ['crashstats.view_pii'],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
}
})
assert 'email' in params
assert params['email'] == ['test@mozilla.com']
return Response({
'hits': [],
'total': 0
})
rget.side_effect = mocked_get
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('test@mozilla.com' in response.content)
ok_('no crash report' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index(self, rget, rpost):
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment\\nOn multiple lines"
comment0 += "\\npeterbe@mozilla.com"
comment0 += "\\nwww.p0rn.com"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
return Response(SAMPLE_UNREDACTED % (
dump,
comment0
))
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
# which bug IDs appear is important and the order matters too
ok_(
-1 ==
response.content.find('444444') <
response.content.find('333333') <
response.content.find('222222')
)
ok_('FakeSignature1' in response.content)
ok_('11cb72f5-eb28-41e1-a8e4-849982120611' in response.content)
comment_transformed = (
comment0
.replace('\\n', '<br>')
.replace('peterbe@mozilla.com', '(email removed)')
.replace('www.p0rn.com', '(URL removed)')
)
ok_(comment_transformed in response.content)
# but the email should have been scrubbed
ok_('peterbe@mozilla.com' not in response.content)
ok_(email0 not in response.content)
ok_(url0 not in response.content)
ok_(
'You need to be signed in to be able to download raw dumps.'
in response.content
)
# Should not be able to see sensitive key from stackwalker JSON
ok_('"sensitive"' not in response.content)
ok_('"exploitability"' not in response.content)
# the email address will appear if we log in
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
assert user.has_perm('crashstats.view_pii')
response = self.client.get(url)
ok_('peterbe@mozilla.com' in response.content)
ok_(email0 in response.content)
ok_(url0 in response.content)
ok_('"sensitive"' in response.content)
ok_('"exploitability"' in response.content)
eq_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_additional_raw_dump_links(self, rget, rpost):
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response({
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "secret@email.com",
"Vendor": "Mozilla",
"URL": "farmville.com",
"additional_minidumps": "foo, bar,",
})
if params['datatype'] == 'unredacted':
return Response({
"client_crash_date": "2012-06-11T06:08:45",
"dump": dump,
"signature": "FakeSignature1",
"user_comments": None,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": None,
"distributor_version": None,
"truncated": True,
"process_type": None,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": None,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": True,
"exploitability": "Unknown Exploitability"
})
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
url = reverse('crashstats:report_index', args=(crash_id,))
response = self.client.get(url)
eq_(response.status_code, 200)
# first of all, expect these basic URLs
raw_json_url = reverse('crashstats:raw_data', args=(crash_id, 'json'))
raw_dmp_url = reverse('crashstats:raw_data', args=(crash_id, 'dmp'))
# not quite yet
ok_(raw_json_url not in response.content)
ok_(raw_dmp_url not in response.content)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
# still they don't appear
ok_(raw_json_url not in response.content)
ok_(raw_dmp_url not in response.content)
group = self._create_group_with_permission('view_rawdump')
user.groups.add(group)
response = self.client.get(url)
eq_(response.status_code, 200)
# finally they appear
ok_(raw_json_url in response.content)
ok_(raw_dmp_url in response.content)
# also, check that the other links are there
foo_dmp_url = reverse(
'crashstats:raw_data_named',
args=(crash_id, 'upload_file_minidump_foo', 'dmp')
)
ok_(foo_dmp_url in response.content)
bar_dmp_url = reverse(
'crashstats:raw_data_named',
args=(crash_id, 'upload_file_minidump_bar', 'dmp')
)
ok_(bar_dmp_url in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_fennecandroid_report(self, rget, rpost):
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment\\nOn multiple lines"
comment0 += "\\npeterbe@mozilla.com"
comment0 += "\\nwww.p0rn.com"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
raw_crash_json = SAMPLE_UNREDACTED % (
dump,
comment0
)
raw_crash_json = json.loads(raw_crash_json)
raw_crash_json['product'] = 'WinterSun'
return Response(raw_crash_json)
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
bug_product_map = {
'WinterSun': 'Winter Is Coming'
}
with self.settings(BUG_PRODUCT_MAP=bug_product_map):
response = self.client.get(url)
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
link = doc('#bugzilla a[target="_blank"]').eq(0)
eq_(link.text(), 'Winter Is Coming')
ok_('product=Winter+Is+Coming' in link.attr('href'))
# also, the "More Reports" link should have WinterSun in it
link = doc('a.sig-overview').eq(0)
ok_('product=WinterSun' in link.attr('href'))
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_odd_product_and_version(self, rget, rpost):
"""If the processed JSON references an unfamiliar product and
version it should not use that to make links in the nav to
reports for that unfamiliar product and version."""
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment\\nOn multiple lines"
comment0 += "\\npeterbe@mozilla.com"
comment0 += "\\nwww.p0rn.com"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
processed_json = SAMPLE_UNREDACTED % (dump, comment0)
assert '"WaterWolf"' in processed_json
assert '"5.0a1"' in processed_json
processed_json = processed_json.replace(
'"WaterWolf"', '"SummerWolf"'
)
processed_json = processed_json.replace(
'"5.0a1"', '"99.9"'
)
return Response(processed_json)
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
# the title should have the "SummerWolf 99.9" in it
doc = pyquery.PyQuery(response.content)
title = doc('title').text()
ok_('SummerWolf' in title)
ok_('99.9' in title)
# there shouldn't be any links to reports for the product
# mentioned in the processed JSON
bad_url = reverse('crashstats:home', args=('SummerWolf',))
ok_(bad_url not in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_correlations_failed(self, rget, rpost):
# using \\n because it goes into the JSON string
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
return Response(SAMPLE_UNREDACTED % (
dump,
comment0
))
if 'correlations/signatures' in url:
raise models.BadStatusCodeError(500)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_no_dump(self, rget, rpost):
dump = ""
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
def mocked_get(url, params, **options):
if '/crash_data' in url:
assert 'datatype' in params
if params['datatype'] == 'meta':
return Response(SAMPLE_META % (email0, url0))
if params['datatype'] == 'unredacted':
data = json.loads(
SAMPLE_UNREDACTED % (dump, comment0)
)
del data['dump']
del data['json_dump']
return Response(data)
if 'correlations/signatures' in url:
raise models.BadStatusCodeError(500)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response(BUG_STATUS)
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('No dump available' in response.content)
def test_report_index_invalid_crash_id(self):
# last 6 digits indicate 30th Feb 2012 which doesn't exist
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120230'])
response = self.client.get(url)
eq_(response.status_code, 400)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_pending_today(self, rget, rpost):
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(404)
rget.side_effect = mocked_get
today = datetime.datetime.utcnow().strftime('%y%m%d')
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982%s' % today])
response = self.client.get(url)
ok_('pendingStatus' in response.content)
eq_(response.status_code, 200)
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
yesterday = yesterday.strftime('%y%m%d')
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982%s' % yesterday])
response = self.client.get(url)
ok_('Crash Not Found' in response.content)
eq_(response.status_code, 200)
url = reverse('crashstats:report_index',
args=['blablabla'])
response = self.client.get(url)
eq_(response.status_code, 400)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_hangid_in_raw_data(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
ok_('Hang Minidump' in response.content)
# the HangID in the fixture above
ok_('123456789' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_invalid_InstallTime(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "Not a number",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
ok_('<th>Install Time</th>' not in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_invalid_parsed_dump(self, rget, rpost):
json_dump = {
u'crash_info': {
u'address': u'0x88',
u'type': u'EXCEPTION_ACCESS_VIOLATION_READ'
},
u'main_module': 0,
u'modules': [
{
u'base_addr': u'0x980000',
u'debug_file': u'FlashPlayerPlugin.pdb',
u'debug_id': u'5F3C0D3034CA49FE9B94FC97EBF590A81',
u'end_addr': u'0xb4d000',
u'filename': u'FlashPlayerPlugin_13_0_0_214.exe',
u'version': u'13.0.0.214'},
],
u'sensitive': {u'exploitability': u'none'},
u'status': u'OK',
u'system_info': {
u'cpu_arch': u'x86',
u'cpu_count': 8,
u'cpu_info': u'GenuineIntel family 6 model 26 stepping 4',
u'os': u'Windows NT',
u'os_ver': u'6.0.6002 Service Pack 2'
},
u'thread_count': 1,
u'threads': [{u'frame_count': 0, u'frames': []}]
}
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "Not a number",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"json_dump": %s,
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % json.dumps(json_dump))
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
ok_('<th>Install Time</th>' not in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_sparse_json_dump(self, rget, rpost):
json_dump = {u'status': u'ERROR_NO_MINIDUMP_HEADER', u'sensitive': {}}
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "Not a number",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"json_dump": %s,
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % json.dumps(json_dump))
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=['11cb72f5-eb28-41e1-a8e4-849982120611'])
response = self.client.get(url)
eq_(response.status_code, 200)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_with_crash_exploitability(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "Not a number",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s",
"HangID": "123456789"
}
""" % (email0, url0))
if '/crashes/paireduuid' in url:
return Response("""
{
"hits": [{
"uuid": "e8820616-1462-49b6-9784-e99a32120201"
}],
"total": 1
}
""")
if '/crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if '/correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index', args=[crash_id])
response = self.client.get(url)
ok_('Exploitability</th>' not in response.content)
# you must be signed in to see exploitability
user = self._login()
group = self._create_group_with_permission('view_exploitability')
user.groups.add(group)
response = self.client.get(url)
ok_('Exploitability</th>' in response.content)
ok_('Unknown Exploitability' in response.content)
@mock.patch('requests.get')
def test_report_index_processed_crash_not_found(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(404)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_("Crash Not Found" in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_raw_crash_not_found(self, rget, rpost):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
def mocked_get(url, params, **options):
assert '/crash_data/' in url
assert 'datatype' in params
if params['datatype'] == 'unredacted':
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
elif params['datatype'] == 'meta': # raw crash json!
raise models.BadStatusCodeError(404)
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_index',
args=[crash_id])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_("Crash Not Found" in response.content)
@mock.patch('requests.get')
def test_report_index_pending(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(408)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Fetching this archived report' in response.content)
@mock.patch('requests.get')
def test_report_index_too_old(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(410)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('This archived report has expired' in response.content)
@mock.patch('requests.get')
def test_report_index_other_error(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response('Scary Error', status_code=500)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_index',
args=[crash_id])
assert_raises(
models.BadStatusCodeError,
self.client.get,
url
)
# Let's also check that we get the response in the exception
# message.
try:
self.client.get(url)
assert False # shouldn't get here
except models.BadStatusCodeError as exception:
ok_('Scary Error' in str(exception))
# and it should include the URL it used
mware_url = models.UnredactedCrash.base_url + '/crash_data/'
ok_(mware_url in str(exception))
@mock.patch('requests.get')
def test_report_pending_json(self, rget):
crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
raise models.BadStatusCodeError(408)
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_pending',
args=[crash_id])
response = self.client.get(url)
expected = {
'status': 'error',
'status_message': ('The report for %s'
' is not available yet.' % crash_id),
'url_redirect': ''
}
eq_(response.status_code, 200)
eq_(expected, json.loads(response.content))
def test_report_index_and_pending_missing_crash_id(self):
url = reverse('crashstats:report_index', args=[''])
response = self.client.get(url)
eq_(response.status_code, 404)
url = reverse('crashstats:report_pending', args=[''])
response = self.client.get(url)
eq_(response.status_code, 404)
def test_report_list(self):
url = reverse('crashstats:report_list')
response = self.client.get(url)
eq_(response.status_code, 400)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 'xxx'
})
eq_(response.status_code, 400)
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('Crash Reports for sig' in response.content)
def test_report_list_all_link(self):
url = reverse('crashstats:report_list')
sig = 'js::jit::EnterBaselineMethod(JSContext*, js::RunState&)'
response = self.client.get(url, {
'product': 'WaterWolf',
'signature': sig
})
eq_(response.status_code, 200)
doc = pyquery.PyQuery(response.content)
for link in doc('a'):
if link.text and 'View ALL' in link.text:
ok_(urllib.quote_plus(sig) in link.attrib['href'])
def test_report_list_columns_offered(self):
url = reverse('crashstats:report_list')
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
# The "user_comments" field is a choice
ok_('<option value="user_comments">' in response.content)
# The "URL" field is not a choice
ok_('<option value="URL">' not in response.content)
# also, all fields in models.RawCrash.API_WHITELIST should
# be there
for field in models.RawCrash.API_WHITELIST:
html = '<option value="%s">' % field
ok_(html in response.content)
# but it's different if you're logged in
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
ok_('<option value="user_comments">' in response.content)
ok_('<option value="URL">' in response.content)
# and a column from the Raw Crash
ok_('<option value="Accessibility">' in response.content)
# and it's only supposed to appear once
eq_(response.content.count('<option value="Accessibility">'), 1)
@mock.patch('requests.get')
def test_report_list_partial_correlations(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"release_channel": "Release",
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"release_channel": "Release",
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"release_channel": "Release",
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"release_channel": "Release",
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('correlations',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# relevant data is put into 'data' attributes
ok_('data-correlation_version="5.0a1"' in response.content)
ok_('data-correlation_os="Mac OS X"' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_correlations_no_data(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('correlations',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# relevant data is put into 'data' attributes
ok_('data-correlation_version=""' in response.content)
ok_('data-correlation_os=""' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_sigurls(self, rget):
really_long_url = (
'http://thisistheworldsfivehundredthirtyfifthslong'
'esturk.com/that/contains/a/path/and/?a=query&'
)
assert len(really_long_url) > 80
def mocked_get(url, params, **options):
# no specific product was specified, then it should be all products
ok_('products' in params)
ok_(settings.DEFAULT_PRODUCT not in params['products'])
ok_('ALL' in params['products'])
if '/signatureurls' in url:
return Response("""{
"hits": [
{"url": "http://farm.ville", "crash_count":123},
{"url": "%s", "crash_count": 1}
],
"total": 2
}
""" % (really_long_url))
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('sigurls',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('Must be signed in to see signature URLs' in response.content)
ok_('http://farm.ville' not in response.content)
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# <a href="HERE" title="HERE">HERE</a>
eq_(response.content.count('http://farm.ville'), 3)
# because the label is truncated
# <a href="HERE" title="HERE">HE...</a>
eq_(response.content.count(really_long_url), 2)
@mock.patch('requests.get')
def test_report_list_partial_sigurls_specific_product(self, rget):
really_long_url = (
'http://thisistheworldsfivehundredthirtyfifthslong'
'esturk.com/that/contains/a/path/and/?a=query&'
)
assert len(really_long_url) > 80
def mocked_get(url, params, **options):
# 'NightTrain' was specifically requested
ok_('products' in params)
ok_('NightTrain' in params['products'])
if '/signatureurls' in url:
return Response("""{
"hits": [
{"url": "http://farm.ville", "crash_count":123},
{"url": "%s", "crash_count": 1}
],
"total": 2
}
""" % (really_long_url))
raise NotImplementedError(url)
rget.side_effect = mocked_get
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
url = reverse('crashstats:report_list_partial', args=('sigurls',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'product': 'NightTrain'
})
eq_(response.status_code, 200)
eq_(response.content.count('http://farm.ville'), 3)
@mock.patch('requests.get')
def test_report_list_partial_comments(self, rget):
def mocked_get(url, params, **options):
if '/crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "I LOVE CHEESE cheese@email.com",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "bob@uncle.com",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('comments',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('I LOVE CHEESE' in response.content)
ok_('email removed' in response.content)
ok_('bob@uncle.com' not in response.content)
ok_('cheese@email.com' not in response.content)
user = self._login()
group = self._create_group_with_permission('view_pii')
user.groups.add(group)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('I LOVE CHEESE' in response.content)
ok_('email removed' not in response.content)
ok_('bob@uncle.com' in response.content)
ok_('cheese@email.com' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_comments_paginated(self, rget):
called_with_params = []
def mocked_get(url, params, **options):
if '/crashes/comments' in url:
called_with_params.append(params)
if params.get('result_offset'):
return Response({
"hits": [{
"user_comments": "I LOVE HAM",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "bob@uncle.com",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}],
"total": 2
})
else:
return Response({
"hits": [{
"user_comments": "I LOVE CHEESE",
"date_processed": "2011-08-21T11:17:28-07:00",
"email": "bob@uncle.com",
"uuid": "469bde48-0e8f-3586-d486-b98810120829"
}],
"total": 2
})
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('comments',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('I LOVE CHEESE' in response.content)
ok_('I LOVE HAM' not in response.content)
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'page': 2,
})
eq_(response.status_code, 200)
ok_('I LOVE HAM' in response.content)
ok_('I LOVE CHEESE' not in response.content)
eq_(len(called_with_params), 2)
@mock.patch('requests.get')
def test_report_list_partial_reports(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('0xdeadbeef' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_reports_with_sorting(self, rget):
mock_calls = []
def mocked_get(url, params, **options):
mock_calls.append(params)
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T22:19:59+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
data = {
'signature': 'FakeSignature2',
'range_value': 3
}
response = self.client.get(url, data)
eq_(response.status_code, 200)
assert len(mock_calls) == 1
eq_(mock_calls[-1]['sort'], 'date_processed')
ok_('reverse' not in mock_calls[-1])
response = self.client.get(url, dict(
data,
sort='build'
))
eq_(response.status_code, 200)
assert len(mock_calls) == 2
eq_(mock_calls[-1]['sort'], 'build')
ok_('reverse' not in mock_calls[-1])
response = self.client.get(url, dict(
data,
sort='build',
reverse='True'
))
eq_(response.status_code, 200)
assert len(mock_calls) == 3
eq_(mock_calls[-1]['sort'], 'build')
eq_(mock_calls[-1]['reverse'], True)
@mock.patch('requests.get')
def test_report_list_partial_reports_columns_override(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'c': ['crap', 'date_processed', 'reason', 'os_and_version']
})
eq_(response.status_code, 200)
# 'reason' in _columns
ok_('reason7' in response.content)
# 'address' not in _columns
ok_('0xdeadbeef' not in response.content)
# 'cpu_name' not in _columns
ok_('x86' not in response.content)
# 'os_and_version' not in _columns
ok_('Mac OS X' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_reports_with_rawcrash(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null,
"raw_crash": {
"Winsock_LSP": "Peter",
"SecondsSinceLastCrash": "Bengtsson"
}
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null,
"raw_crash": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3,
'c': ['date_processed', 'Winsock_LSP', 'SecondsSinceLastCrash']
})
eq_(response.status_code, 200)
ok_('Peter' in response.content)
ok_('Bengtsson' in response.content)
# and also the table headers should be there
ok_('Winsock_LSP*' in response.content)
ok_('SecondsSinceLastCrash*' in response.content)
@mock.patch('requests.get')
def test_report_list_partial_reports_page_2(self, rget):
uuids = []
_date = datetime.datetime.now()
for i in range(300):
uuids.append(
'441017f4-e006-4eea-8451-dc20e' +
_date.strftime('%Y%m%d')
)
_date += datetime.timedelta(days=1)
def mocked_get(url, params, **options):
if 'report/list' in url:
result_number = int(params['result_number'])
try:
result_offset = int(params['result_offset'])
except KeyError:
result_offset = 0
first = {
"user_comments": None,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": None,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": None
}
hits = []
for i in range(result_offset, result_offset + result_number):
try:
item = dict(first, uuid=uuids[i])
hits.append(item)
except IndexError:
break
return Response(json.dumps({
"hits": hits,
"total": len(uuids)
}))
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {
'signature': 'sig',
})
eq_(response.status_code, 200)
ok_(uuids[0] in response.content)
ok_(uuids[-1] not in response.content)
# expect there to be a link with `page=2` in there
report_list_url = reverse('crashstats:report_list')
report_list_url += '?signature=sig'
ok_(report_list_url + '&page=2' in response.content)
# we'll need a copy of this for later
response_first = response
response = self.client.get(url, {
'signature': 'sig',
'page': 2
})
eq_(response.status_code, 200)
ok_(uuids[0] not in response.content)
ok_(uuids[-1] in response.content)
# try to be a smartass
response_zero = self.client.get(url, {
'signature': 'sig',
'page': 0
})
eq_(response.status_code, 200)
# because with page < 1 you get page=1
tbody_zero = response_zero.content.split('<tbody')[1]
tbody_first = response_first.content.split('<tbody')[1]
eq_(hash(tbody_zero), hash(tbody_first))
response = self.client.get(url, {
'signature': 'sig',
'page': 'xx'
})
eq_(response.status_code, 400)
@mock.patch('requests.get')
def test_report_list_partial_reports_non_defaults(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Linux",
"uuid": "441017f4-e006-4eea-8451-dc20e0120905",
"cpu_info": "...",
"url": "http://example.com/116",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "browser",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120901000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
},
{
"user_comments": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"uuid": "e491c551-be0d-b0fb-c69e-107380120905",
"cpu_info": "...",
"url": "http://example.com/60053",
"last_crash": 1234,
"date_processed": "2012-09-05T21:18:58+00:00",
"cpu_name": "x86",
"uptime": 1234,
"process_type": "content",
"hangid": null,
"reason": "reason7",
"version": "5.0a1",
"os_version": "1.2.3.4",
"build": "20120822000007",
"install_age": 1234,
"signature": "FakeSignature2",
"install_time": "2012-09-05T20:58:24+00:00",
"address": "0xdeadbeef",
"duplicate_of": null
}
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
data = {
'signature': 'sig',
'range_unit': settings.RANGE_UNITS[-1],
'process_type': settings.PROCESS_TYPES[-1],
'range_value': 48,
'plugin_field': settings.PLUGIN_FIELDS[-1],
'hang_type': settings.HANG_TYPES[-1],
'plugin_query_type': settings.QUERY_TYPES[-1],
'product': 'NightTrain',
}
response = self.client.get(url, data)
eq_(response.status_code, 200)
def test_report_list_partial_reports_invalid_range_value(self):
url = reverse('crashstats:report_list_partial', args=('reports',))
data = {
'signature': 'sig',
'range_unit': 'days',
'process_type': settings.PROCESS_TYPES[-1],
'range_value': 48,
'plugin_field': settings.PLUGIN_FIELDS[-1],
'hang_type': settings.HANG_TYPES[-1],
'plugin_query_type': settings.QUERY_TYPES[-1],
'product': 'NightTrain',
}
response = self.client.get(url, data)
eq_(response.status_code, 400)
response = self.client.get(url, dict(data, range_unit='weeks'))
eq_(response.status_code, 400)
response = self.client.get(url, dict(
data,
range_unit='hours',
range_value=24 * 48
))
eq_(response.status_code, 400)
@mock.patch('requests.post')
def test_report_list_partial_bugzilla(self, rpost):
def mocked_post(url, **options):
if '/bugs/' in url:
return Response({
"hits": [
{"id": 111111,
"signature": "Something"},
{"id": 123456789,
"signature": "Something"}
]
})
raise NotImplementedError(url)
rpost.side_effect = mocked_post
url = reverse('crashstats:report_list_partial', args=('bugzilla',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
# not the right signature so it's part of "Related Crash Signatures"
ok_(
response.content.find('Related Crash Signatures') <
response.content.find('123456789')
)
response = self.client.get(url, {
'signature': 'Something',
'range_value': 3
})
eq_(response.status_code, 200)
# now the right signature
ok_('123456789' in response.content)
ok_('111111' in response.content)
# because bug id 123456789 is > than 111111 we expect that order
# in the rendered output
ok_(
response.content.find('123456789') <
response.content.find('111111') <
response.content.find('Related Crash Signatures')
)
@mock.patch('requests.get')
def test_report_list_partial_table(self, rget):
def mocked_get(url, params, **options):
if '/crashes/frequency' in url:
# these fixtures make sure we stress the possibility that
# the build_date might be invalid or simply just null.
return Response("""
{
"hits": [
{
"count": 1050,
"build_date": "20130806030203",
"count_mac": 0,
"frequency_windows": 1.0,
"count_windows": 1050,
"frequency": 1.0,
"count_linux": 0,
"total": 1050,
"frequency_linux": 0.0,
"frequency_mac": 0.0
},
{
"count": 1150,
"build_date": "notadate",
"count_mac": 0,
"frequency_windows": 1.0,
"count_windows": 1150,
"frequency": 1.0,
"count_linux": 0,
"total": 1150,
"frequency_linux": 0.0,
"frequency_mac": 0.0
},
{
"count": 1250,
"build_date": null,
"count_mac": 0,
"frequency_windows": 1.0,
"count_windows": 1250,
"frequency": 1.0,
"count_linux": 0,
"total": 1250,
"frequency_linux": 0.0,
"frequency_mac": 0.0
}
]
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('table',))
response = self.client.get(url, {
'signature': 'sig',
'range_value': 3
})
eq_(response.status_code, 200)
ok_('1050 - 100.0%' in response.content)
ok_('1150 - 100.0%' in response.content)
ok_('1250 - 100.0%' in response.content)
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_report_index_redirect_by_prefix(self, rget, rpost):
dump = "OS|Mac OS X|10.6.8 10K549\\nCPU|amd64|family 6 mod|1"
comment0 = "This is a comment"
email0 = "some@emailaddress.com"
url0 = "someaddress.com"
email1 = "some@otheremailaddress.com"
def mocked_get(url, params, **options):
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'meta'
):
return Response("""
{
"InstallTime": "1339289895",
"FramePoisonSize": "4096",
"Theme": "classic/1.0",
"Version": "5.0a1",
"Email": "%s",
"Vendor": "Mozilla",
"URL": "%s"
}
""" % (email0, url0))
if 'crashes/comments' in url:
return Response("""
{
"hits": [
{
"user_comments": "%s",
"date_processed": "2012-08-21T11:17:28-07:00",
"email": "%s",
"uuid": "469bde48-0e8f-3586-d486-b98810120830"
}
],
"total": 1
}
""" % (comment0, email1))
if (
'/crash_data' in url and
'datatype' in params and
params['datatype'] == 'unredacted'
):
return Response("""
{
"client_crash_date": "2012-06-11T06:08:45",
"dump": "%s",
"signature": "FakeSignature1",
"user_comments": null,
"uptime": 14693,
"release_channel": "nightly",
"uuid": "11cb72f5-eb28-41e1-a8e4-849982120611",
"flash_version": "[blank]",
"hangid": null,
"distributor_version": null,
"truncated": true,
"process_type": null,
"id": 383569625,
"os_version": "10.6.8 10K549",
"version": "5.0a1",
"build": "20120609030536",
"ReleaseChannel": "nightly",
"addons_checked": null,
"product": "WaterWolf",
"os_name": "Mac OS X",
"last_crash": 371342,
"date_processed": "2012-06-11T06:08:44",
"cpu_name": "amd64",
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"address": "0x8",
"completeddatetime": "2012-06-11T06:08:57",
"success": true,
"exploitability": "Unknown Exploitability"
}
""" % dump)
if 'correlations/signatures' in url:
return Response("""
{
"hits": [
"FakeSignature1",
"FakeSignature2"
],
"total": 2
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
def mocked_post(url, **options):
if '/bugs/' in url:
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
raise NotImplementedError(url)
rpost.side_effect = mocked_post
base_crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
crash_id = settings.CRASH_ID_PREFIX + base_crash_id
assert len(crash_id) > 36
url = reverse('crashstats:report_index', args=[crash_id])
response = self.client.get(url)
correct_url = reverse('crashstats:report_index', args=[base_crash_id])
self.assertRedirects(response, correct_url)
@mock.patch('requests.get')
def test_report_list_with_no_data(self, rget):
def mocked_get(url, params, **options):
if 'report/list' in url:
return Response("""
{
"hits": [],
"total": 0
}
""")
raise NotImplementedError(url)
rget.side_effect = mocked_get
url = reverse('crashstats:report_list_partial', args=('reports',))
response = self.client.get(url, {'signature': 'sig'})
eq_(response.status_code, 200)
# it sucks to depend on the output like this but it'll do for now since
# it's quite a rare occurance.
ok_('</html>' not in response.content) # it's a partial
ok_('no reports in the time period specified' in response.content)
@mock.patch('requests.get')
def test_raw_data(self, rget):
def mocked_get(url, params, **options):
assert '/crash_data' in url
if 'datatype' in params and params['datatype'] == 'raw':
return Response("""
bla bla bla
""".strip())
else:
# default is datatype/meta
return Response("""
{"foo": "bar",
"stuff": 123}
""")
rget.side_effect = mocked_get
crash_id = '176bcd6c-c2ec-4b0c-9d5f-dadea2120531'
json_url = reverse('crashstats:raw_data', args=(crash_id, 'json'))
response = self.client.get(json_url)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % json_url
)
eq_(response.status_code, 302)
user = self._login()
group = self._create_group_with_permission('view_rawdump')
user.groups.add(group)
assert user.has_perm('crashstats.view_rawdump')
response = self.client.get(json_url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/json')
eq_(json.loads(response.content),
{"foo": "bar", "stuff": 123})
dump_url = reverse('crashstats:raw_data', args=(crash_id, 'dmp'))
response = self.client.get(dump_url)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/octet-stream')
ok_('bla bla bla' in response.content, response.content)
# dump files are cached.
# check the mock function and expect no change
def different_mocked_get(url, **options):
if '/crash_data' in url and 'datatype=raw' in url:
return Response("""
SOMETHING DIFFERENT
""".strip())
raise NotImplementedError(url)
rget.side_effect = different_mocked_get
response = self.client.get(dump_url)
eq_(response.status_code, 200)
ok_('bla bla bla' in response.content) # still. good.
@mock.patch('requests.post')
@mock.patch('requests.get')
def test_remembered_date_range_type(self, rget, rpost):
# if you visit the home page, the default date_range_type will be
# 'report' but if you switch to 'build' it'll remember that
def mocked_get(url, params, **options):
if '/products' in url and 'versions' not in params:
return Response("""
{
"products": [
"WaterWolf"
],
"hits": {
"WaterWolf": [{
"featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"release": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}]
},
"total": 1
}
""")
elif '/products' in url:
return Response("""
{
"hits": [{
"is_featured": true,
"throttle": 100.0,
"end_date": "2012-11-27",
"product": "WaterWolf",
"build_type": "Nightly",
"version": "19.0",
"has_builds": true,
"start_date": "2012-09-25"
}],
"total": 1
}
""")
if '/crashes/daily' in url:
return Response("""
{
"hits": {
"WaterWolf:19.0": {
"2012-10-08": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 71.099999999999994,
"version": "19.0",
"report_count": 2133,
"date": "2012-10-08"
},
"2012-10-02": {
"product": "WaterWolf",
"adu": 30000,
"crash_hadu": 77.299999999999997,
"version": "19.0",
"report_count": 2319,
"date": "2012-10-02"
}
}
}
}
""")
if '/crashes/signatures' in url:
return Response("""
{"crashes": [
{
"count": 188,
"mac_count": 66,
"content_count": 0,
"first_report": "2012-06-21",
"startup_percent": 0.0,
"currentRank": 0,
"previousRank": 1,
"first_report_exact": "2012-06-21T21:28:08",
"versions":
"2.0, 2.1, 3.0a2, 3.0b2, 3.1b1, 4.0a1, 4.0a2, 5.0a1",
"percentOfTotal": 0.24258064516128999,
"win_count": 56,
"changeInPercentOfTotal": 0.011139597126354983,
"linux_count": 66,
"hang_count": 0,
"signature": "FakeSignature1",
"versions_count": 8,
"changeInRank": 0,
"plugin_count": 0,
"previousPercentOfTotal": 0.23144104803493501,
"is_gc_count": 10
}
],
"totalPercentage": 0,
"start_date": "2012-05-10",
"end_date": "2012-05-24",
"totalNumberOfCrashes": 0}
""")
raise NotImplementedError(url)
def mocked_post(**options):
assert '/bugs/' in options['url'], options['url']
return Response("""
{"hits": [{"id": "123456789",
"signature": "Something"}]}
""")
rpost.side_effect = mocked_post
rget.side_effect = mocked_get
url = reverse('crashstats:home', args=('WaterWolf',))
response = self.client.get(url)
eq_(response.status_code, 200)
regex = re.compile('(<a\s+href="\?date_range_type=(\w+)[^>]+)')
for tag, value in regex.findall(response.content):
if value == 'report':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
# now, like the home page does, fire of an AJAX request to frontpage
# for 'build' instead
frontpage_json_url = reverse('crashstats:frontpage_json')
frontpage_reponse = self.client.get(frontpage_json_url, {
'product': 'WaterWolf',
'date_range_type': 'build'
})
eq_(frontpage_reponse.status_code, 200)
# load the home page again, and it should be on build date instead
response = self.client.get(url)
eq_(response.status_code, 200)
for tag, value in regex.findall(response.content):
if value == 'build':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
# open topcrashers with 'report'
topcrasher_report_url = reverse(
'crashstats:topcrasher',
kwargs={
'product': 'WaterWolf',
'versions': '19.0',
'date_range_type': 'report'
}
)
response = self.client.get(topcrasher_report_url)
eq_(response.status_code, 200)
# now, go back to the home page, and 'report' should be the new default
response = self.client.get(url)
eq_(response.status_code, 200)
for tag, value in regex.findall(response.content):
if value == 'report':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
# open topcrashers with 'build'
topcrasher_report_url = reverse(
'crashstats:topcrasher',
kwargs={
'product': 'WaterWolf',
'versions': '19.0',
'date_range_type': 'build'
}
)
response = self.client.get(topcrasher_report_url)
eq_(response.status_code, 200)
# now, go back to the home page, and 'report' should be the new default
response = self.client.get(url)
eq_(response.status_code, 200)
for tag, value in regex.findall(response.content):
if value == 'build':
ok_('selected' in tag)
else:
ok_('selected' not in tag)
@mock.patch('requests.get')
def test_correlations_json(self, rget):
url = reverse('crashstats:correlations_json')
def mocked_get(url, params, **options):
if '/correlations/' in url:
ok_('report_type' in params)
eq_(params['report_type'], 'core-counts')
return Response({
"reason": "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS",
"count": 13,
"load": "36% (4/11) vs. 26% (47/180) amd64 with 2 cores"
})
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(
url,
{'correlation_report_type': 'core-counts',
'product': 'WaterWolf',
'version': '19.0',
'platform': 'Windows NT',
'signature': 'FakeSignature'}
)
ok_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
eq_(struct['reason'], 'EXC_BAD_ACCESS / KERN_INVALID_ADDRESS')
@mock.patch('requests.get')
def test_correlations_signatures_json(self, rget):
url = reverse('crashstats:correlations_signatures_json')
def mocked_get(url, params, **options):
if '/correlations/' in url:
return Response({
"hits": ["FakeSignature1",
"FakeSignature2"],
"total": 2
})
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self.client.get(
url,
{'correlation_report_type': 'core-counts',
'product': 'WaterWolf',
'version': '19.0',
'platforms': 'Windows NT,Linux'}
)
ok_(response.status_code, 200)
ok_('application/json' in response['content-type'])
struct = json.loads(response.content)
eq_(struct['total'], 2)
def test_unauthenticated_user_redirected_from_protected_page(self):
url = reverse(
'crashstats:exploitable_crashes',
args=(settings.DEFAULT_PRODUCT,)
)
response = self.client.get(url)
self.assertRedirects(
response,
'%s?%s=%s' % (
reverse('crashstats:login'),
REDIRECT_FIELD_NAME,
url,
)
)
def test_login_page_renders(self):
url = reverse('crashstats:login')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Login Required' in response.content)
ok_('Insufficient Privileges' not in response.content)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Login Required' not in response.content)
ok_('Insufficient Privileges' in response.content)
def test_your_permissions_page(self):
url = reverse('crashstats:permissions')
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(user.email in response.content)
# make some groups and attach permissions
self._create_group_with_permission(
'view_pii', 'Group A'
)
groupB = self._create_group_with_permission(
'view_exploitability', 'Group B'
)
user.groups.add(groupB)
assert not user.has_perm('crashstats.view_pii')
assert user.has_perm('crashstats.view_exploitability')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(PERMISSIONS['view_pii'] in response.content)
ok_(PERMISSIONS['view_exploitability'] in response.content)
doc = pyquery.PyQuery(response.content)
for row in doc('table.permissions tbody tr'):
cells = []
for td in doc('td', row):
cells.append(td.text.strip())
if cells[0] == PERMISSIONS['view_pii']:
eq_(cells[1], 'No')
elif cells[0] == PERMISSIONS['view_exploitability']:
eq_(cells[1], 'Yes!')
| rhelmer/socorro-webapp | crashstats/crashstats/tests/test_views.py | Python | mpl-2.0 | 222,462 | [
"VisIt"
] | 4dbe2af9f2948618d7453f758224aeef385ad7969468cd3fd492e988e2d18144 |
import sys
tests = [
("testExecs/test.exe", "", {}),
("python", "test_list.py", {'dir': 'Wrap'}),
]
if sys.platform != 'win32':
pass
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
| jandom/rdkit | Code/GraphMol/SLNParse/test_list.py | Python | bsd-3-clause | 314 | [
"RDKit"
] | 8345e63da62acd529fa708e0c4748fd479bcad5bfd420e1ca4d5d62a1e1416fe |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Copyright (c) 2009 Jendrik Seipp
#
# RedNotebook is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RedNotebook is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with RedNotebook; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------
import signal
import os
import re
import httplib
from urllib2 import urlopen
import webbrowser
import logging
from distutils.version import StrictVersion
import gtk
from rednotebook import info
import filesystem
def sort_asc(string):
return str(string).lower()
def set_environment_variables(config):
variables = {}
for variable, value in variables.iteritems():
if variable not in os.environ:
# Only add environment variable if it does not exist yet
os.environ[variable] = config.read(variable, default=value)
logging.info('%s set to %s' % (variable, value))
for variable in variables.keys():
if variable in os.environ:
logging.info('The environment variable %s has value %s' % (variable, os.environ.get(variable)))
else:
logging.info('There is no environment variable called %s' % variable)
def setup_signal_handlers(journal):
"""
Catch abnormal exits of the program and save content to disk
Look in signal man page for signal names
SIGKILL cannot be caught
SIGINT is caught again by KeyboardInterrupt
"""
signals = []
signal_names = [
'SIGHUP', # Terminal closed, Parent process dead
'SIGINT', # Interrupt from keyboard (CTRL-C)
'SIGQUIT', # Quit from keyboard
'SIGABRT', # Abort signal from abort(3)
'SIGTERM', # Termination signal
'SIGTSTP', # Stop typed at tty
]
def signal_handler(signum, frame):
logging.info('Program was abnormally aborted with signal %s' % signum)
journal.exit()
for signal_name in signal_names:
signal_number = getattr(signal, signal_name, None)
if signal_number is not None:
try:
signal.signal(signal_number, signal_handler)
signals.append(signal_number)
except RuntimeError:
logging.info('Could not connect signal number %d' % signal_number)
logging.info('Connected Signals: %s' % signals)
def get_new_version_number():
"""
Reads version number from website and returns None if it cannot be read
"""
version_pattern = re.compile(r'<span id="download-version">(.+)</span>')
try:
project_xml = urlopen('http://rednotebook.sourceforge.net/index.html').read()
match = version_pattern.search(project_xml)
if not match:
return None
new_version = match.group(1)
new_version = StrictVersion(new_version)
logging.info('%s is the latest version' % new_version)
return new_version
except (IOError, httplib.HTTPException):
return None
def check_new_version(journal, current_version, startup=False):
current_version = StrictVersion(current_version)
new_version = get_new_version_number()
if new_version is not None:
newer_version_available = new_version > current_version
else:
logging.error('New version info could not be read')
new_version = _('unknown')
newer_version_available = None
logging.info('Current version: %s, latest version: %s, newer: %s' %
(current_version, new_version, newer_version_available))
if newer_version_available or not startup:
dialog = gtk.MessageDialog(parent=None, flags=gtk.DIALOG_MODAL,
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_YES_NO,
message_format=None)
dialog.set_transient_for(journal.frame.main_frame)
primary_text = (_('You have version <b>%s</b>.') % current_version + ' ' +
_('The latest version is <b>%s</b>.') % new_version)
secondary_text = _('Do you want to visit the RedNotebook homepage?')
dialog.set_markup(primary_text)
dialog.format_secondary_text(secondary_text)
# Let user disable checks
if startup:
# Add button on the left side
dialog.add_button(_('Do not ask again'), 30)
settings = gtk.settings_get_default()
settings.set_property('gtk-alternative-button-order', True)
dialog.set_alternative_button_order([30, gtk.RESPONSE_NO,
gtk.RESPONSE_YES])
response = dialog.run()
dialog.hide()
if response == gtk.RESPONSE_YES:
webbrowser.open(info.url)
elif response == 30:
logging.info('Checks for new versions disabled')
journal.config['checkForNewVersion'] = 0
def show_html_in_browser(html, filename):
filesystem.write_file(filename, html)
html_file = os.path.abspath(filename)
html_file = 'file://' + html_file
webbrowser.open(html_file)
class StreamDuplicator(object):
def __init__(self, streams):
self.streams = streams
def write(self, buf):
for stream in self.streams:
stream.write(buf)
# If we don't flush here, stderr messages are printed late.
stream.flush()
def flush(self):
for stream in self.streams:
stream.flush()
def close(self):
for stream in self.streams():
self.stream.close()
| dustincys/rednotebook | rednotebook/util/utils.py | Python | gpl-2.0 | 6,182 | [
"VisIt"
] | af192bc015b47fef170d5088f198d1e796f01c7626c779bc5c4e9c07c2766b81 |
#!/usr/bin/env python
"""
Visualise Changes in Edge Weights
=================================
Here, we demonstrate how to visualise changes in edge weights over time.
We change both, the colour and the width of the edges depending on the weight.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from netgraph import Graph
# Simulate a dynamic network with
# - 5 frames / network states,
# - with 10 nodes at each time point,
# - an expected edge density of 25%, and
# - edge weights drawn from a Gaussian distribution.
total_frames = 5
total_nodes = 10
adjacency_matrix = np.random.rand(total_nodes, total_nodes) < 0.25
weight_matrix = np.random.randn(total_frames, total_nodes, total_nodes)
# Normalise the weights, such that they are on the interval [0, 1].
# They can then be passed directly to matplotlib colormaps (which expect floats on that interval).
vmin, vmax = -2, 2
weight_matrix[weight_matrix<vmin] = vmin
weight_matrix[weight_matrix>vmax] = vmax
weight_matrix -= vmin
weight_matrix /= vmax - vmin
cmap = plt.cm.RdGy
fig, ax = plt.subplots()
g = Graph(adjacency_matrix, edge_cmap=cmap, arrows=True, ax=ax)
def update(ii):
artists = []
for jj, kk in zip(*np.where(adjacency_matrix)):
w = weight_matrix[ii, jj, kk]
artist = g.edge_artists[(jj, kk)]
artist.set_facecolor(cmap(w))
artist.update_width(0.03 * np.abs(w-0.5)) # np.abs(w-0.5) so that large negative edges are also wide
artists.append(artist)
return artists
animation = FuncAnimation(fig, update, frames=total_frames, interval=200, blit=True)
| paulbrodersen/netgraph | docs/source/sphinx_gallery_animations/plot_02_animate_edges.py | Python | gpl-3.0 | 1,627 | [
"Gaussian"
] | bfd1b5cbd0f79402291c0c1e62b31d4b74d970072726de2822297c920b72d91f |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.